repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
neumerance/deploy
|
refs/heads/master
|
.venv/lib/python2.7/site-packages/django/core/context_processors.py
|
232
|
"""
A set of request processors that return dictionaries to be merged into a
template context. Each function takes the request object as its only parameter
and returns a dictionary to add to the context.
These are referenced from the setting TEMPLATE_CONTEXT_PROCESSORS and used by
RequestContext.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.middleware.csrf import get_token
from django.utils import six
from django.utils.encoding import smart_text
from django.utils.functional import lazy
def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if
it has not been provided by either a view decorator or the middleware
"""
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
return smart_text(token)
_get_val = lazy(_get_val, six.text_type)
return {'csrf_token': _get_val() }
def debug(request):
"Returns context variables helpful for debugging."
context_extras = {}
if settings.DEBUG and request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:
context_extras['debug'] = True
from django.db import connection
context_extras['sql_queries'] = connection.queries
return context_extras
def i18n(request):
from django.utils import translation
context_extras = {}
context_extras['LANGUAGES'] = settings.LANGUAGES
context_extras['LANGUAGE_CODE'] = translation.get_language()
context_extras['LANGUAGE_BIDI'] = translation.get_language_bidi()
return context_extras
def tz(request):
from django.utils import timezone
return {'TIME_ZONE': timezone.get_current_timezone_name()}
def static(request):
"""
Adds static-related context variables to the context.
"""
return {'STATIC_URL': settings.STATIC_URL}
def media(request):
"""
Adds media-related context variables to the context.
"""
return {'MEDIA_URL': settings.MEDIA_URL}
def request(request):
return {'request': request}
|
makinacorpus/odoo
|
refs/heads/8.0
|
addons/sale_service/__openerp__.py
|
260
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Create Tasks on SO',
'version': '1.0',
'category': 'Project Management',
'description': """
Automatically creates project tasks from procurement lines.
===========================================================
This module will automatically create a new task for each procurement order line
(e.g. for sale order lines), if the corresponding product meets the following
characteristics:
* Product Type = Service
* Procurement Method (Order fulfillment) = MTO (Make to Order)
* Supply/Procurement Method = Manufacture
If on top of that a projet is specified on the product form (in the Procurement
tab), then the new task will be created in that specific project. Otherwise, the
new task will not belong to any project, and may be added to a project manually
later.
When the project task is completed or cancelled, the corresponding procurement
is updated accordingly. For example, if this procurement corresponds to a sale
order line, the sale order line will be considered delivered when the task is
completed.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['project', 'procurement', 'sale', 'procurement_jit'],
'data': ['views/sale_service_view.xml'],
'demo': ['demo/sale_service_demo.xml'],
'test': ['test/project_task_procurement.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
NeovaHealth/odoo
|
refs/heads/8.0
|
addons/sale_crm/wizard/crm_make_sale.py
|
223
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_make_sale(osv.osv_memory):
""" Make sale order for crm """
_name = "crm.make.sale"
_description = "Make sales"
def _selectPartner(self, cr, uid, context=None):
"""
This function gets default value for partner_id field.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values
@return: default value of partner_id field.
"""
if context is None:
context = {}
lead_obj = self.pool.get('crm.lead')
active_id = context and context.get('active_id', False) or False
if not active_id:
return False
lead = lead_obj.read(cr, uid, [active_id], ['partner_id'], context=context)[0]
return lead['partner_id'][0] if lead['partner_id'] else False
def view_init(self, cr, uid, fields_list, context=None):
return super(crm_make_sale, self).view_init(cr, uid, fields_list, context=context)
def makeOrder(self, cr, uid, ids, context=None):
"""
This function create Quotation on given case.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm make sales' ids
@param context: A standard dictionary for contextual values
@return: Dictionary value of created sales order.
"""
# update context: if come from phonecall, default state values can make the quote crash lp:1017353
context = dict(context or {})
context.pop('default_state', False)
case_obj = self.pool.get('crm.lead')
sale_obj = self.pool.get('sale.order')
partner_obj = self.pool.get('res.partner')
data = context and context.get('active_ids', []) or []
for make in self.browse(cr, uid, ids, context=context):
partner = make.partner_id
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
fpos = partner.property_account_position and partner.property_account_position.id or False
payment_term = partner.property_payment_term and partner.property_payment_term.id or False
new_ids = []
for case in case_obj.browse(cr, uid, data, context=context):
if not partner and case.partner_id:
partner = case.partner_id
fpos = partner.property_account_position and partner.property_account_position.id or False
payment_term = partner.property_payment_term and partner.property_payment_term.id or False
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
if False in partner_addr.values():
raise osv.except_osv(_('Insufficient Data!'), _('No address(es) defined for this customer.'))
vals = {
'origin': _('Opportunity: %s') % str(case.id),
'section_id': case.section_id and case.section_id.id or False,
'categ_ids': [(6, 0, [categ_id.id for categ_id in case.categ_ids])],
'partner_id': partner.id,
'pricelist_id': pricelist,
'partner_invoice_id': partner_addr['invoice'],
'partner_shipping_id': partner_addr['delivery'],
'date_order': fields.datetime.now(),
'fiscal_position': fpos,
'payment_term':payment_term,
'note': sale_obj.get_salenote(cr, uid, [case.id], partner.id, context=context),
}
if partner.id:
vals['user_id'] = partner.user_id and partner.user_id.id or uid
new_id = sale_obj.create(cr, uid, vals, context=context)
sale_order = sale_obj.browse(cr, uid, new_id, context=context)
case_obj.write(cr, uid, [case.id], {'ref': 'sale.order,%s' % new_id})
new_ids.append(new_id)
message = _("Opportunity has been <b>converted</b> to the quotation <em>%s</em>.") % (sale_order.name)
case.message_post(body=message)
if make.close:
case_obj.case_mark_won(cr, uid, data, context=context)
if not new_ids:
return {'type': 'ir.actions.act_window_close'}
if len(new_ids)<=1:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'name' : _('Quotation'),
'res_id': new_ids and new_ids[0]
}
else:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'name' : _('Quotation'),
'res_id': new_ids
}
return value
_columns = {
'partner_id': fields.many2one('res.partner', 'Customer', required=True, domain=[('customer','=',True)]),
'close': fields.boolean('Mark Won', help='Check this to close the opportunity after having created the sales order.'),
}
_defaults = {
'close': False,
'partner_id': _selectPartner,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Vogeltak/pauselan
|
refs/heads/master
|
lib/python3.4/site-packages/sqlalchemy/orm/evaluator.py
|
60
|
# orm/evaluator.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import operator
from ..sql import operators
class UnevaluatableError(Exception):
pass
_straight_ops = set(getattr(operators, op)
for op in ('add', 'mul', 'sub',
'div',
'mod', 'truediv',
'lt', 'le', 'ne', 'gt', 'ge', 'eq'))
_notimplemented_ops = set(getattr(operators, op)
for op in ('like_op', 'notlike_op', 'ilike_op',
'notilike_op', 'between_op', 'in_op',
'notin_op', 'endswith_op', 'concat_op'))
class EvaluatorCompiler(object):
def __init__(self, target_cls=None):
self.target_cls = target_cls
def process(self, clause):
meth = getattr(self, "visit_%s" % clause.__visit_name__, None)
if not meth:
raise UnevaluatableError(
"Cannot evaluate %s" % type(clause).__name__)
return meth(clause)
def visit_grouping(self, clause):
return self.process(clause.element)
def visit_null(self, clause):
return lambda obj: None
def visit_false(self, clause):
return lambda obj: False
def visit_true(self, clause):
return lambda obj: True
def visit_column(self, clause):
if 'parentmapper' in clause._annotations:
parentmapper = clause._annotations['parentmapper']
if self.target_cls and not issubclass(
self.target_cls, parentmapper.class_):
raise UnevaluatableError(
"Can't evaluate criteria against alternate class %s" %
parentmapper.class_
)
key = parentmapper._columntoproperty[clause].key
else:
key = clause.key
get_corresponding_attr = operator.attrgetter(key)
return lambda obj: get_corresponding_attr(obj)
def visit_clauselist(self, clause):
evaluators = list(map(self.process, clause.clauses))
if clause.operator is operators.or_:
def evaluate(obj):
has_null = False
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value:
return True
has_null = has_null or value is None
if has_null:
return None
return False
elif clause.operator is operators.and_:
def evaluate(obj):
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if not value:
if value is None:
return None
return False
return True
else:
raise UnevaluatableError(
"Cannot evaluate clauselist with operator %s" %
clause.operator)
return evaluate
def visit_binary(self, clause):
eval_left, eval_right = list(map(self.process,
[clause.left, clause.right]))
operator = clause.operator
if operator is operators.is_:
def evaluate(obj):
return eval_left(obj) == eval_right(obj)
elif operator is operators.isnot:
def evaluate(obj):
return eval_left(obj) != eval_right(obj)
elif operator in _straight_ops:
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is None or right_val is None:
return None
return operator(eval_left(obj), eval_right(obj))
else:
raise UnevaluatableError(
"Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
return evaluate
def visit_unary(self, clause):
eval_inner = self.process(clause.element)
if clause.operator is operators.inv:
def evaluate(obj):
value = eval_inner(obj)
if value is None:
return None
return not value
return evaluate
raise UnevaluatableError(
"Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
def visit_bindparam(self, clause):
val = clause.value
return lambda obj: val
|
NeCTAR-RC/murano
|
refs/heads/master
|
murano/api/v1/services.py
|
3
|
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools as func
from oslo_log import log as logging
from webob import exc
from murano.api.v1 import request_statistics
from murano.common.helpers import token_sanitizer
from murano.common.i18n import _
from murano.common import wsgi
from murano.db.services import core_services
from murano import utils
LOG = logging.getLogger(__name__)
API_NAME = 'Services'
def normalize_path(f):
@func.wraps(f)
def f_normalize_path(*args, **kwargs):
if 'path' in kwargs:
if kwargs['path']:
kwargs['path'] = '/services/' + kwargs['path']
else:
kwargs['path'] = '/services'
return f(*args, **kwargs)
return f_normalize_path
class Controller(object):
@request_statistics.stats_count(API_NAME, 'Index')
@utils.verify_env
@normalize_path
def get(self, request, environment_id, path):
LOG.debug('Services:Get <EnvId: {env_id}, '
'Path: {path}>'.format(env_id=environment_id, path=path))
session_id = None
if hasattr(request, 'context') and request.context.session:
session_id = request.context.session
try:
result = core_services.CoreServices.get_data(environment_id,
path,
session_id)
except (KeyError, ValueError, AttributeError):
raise exc.HTTPNotFound
return result
@request_statistics.stats_count(API_NAME, 'Create')
@utils.verify_session
@utils.verify_env
@normalize_path
def post(self, request, environment_id, path, body=None):
if not body:
msg = _('Request body is empty: please, provide '
'application object model')
LOG.error(msg)
raise exc.HTTPBadRequest(msg)
secure_data = token_sanitizer.TokenSanitizer().sanitize(body)
LOG.debug('Services:Post <EnvId: {env_id}, Path: {path}, '
'Body: {body}>'.format(env_id=environment_id,
body=secure_data, path=path))
post_data = core_services.CoreServices.post_data
session_id = request.context.session
try:
result = post_data(environment_id, session_id, body, path)
except (KeyError, ValueError):
raise exc.HTTPNotFound
return result
@request_statistics.stats_count(API_NAME, 'Update')
@utils.verify_session
@utils.verify_env
@normalize_path
def put(self, request, environment_id, path, body=None):
if not body:
msg = _('Request body is empty: please, provide '
'application object model')
LOG.error(msg)
raise exc.HTTPBadRequest(msg)
LOG.debug('Services:Put <EnvId: {0}, Path: {2}, '
'Body: {1}>'.format(environment_id, body, path))
put_data = core_services.CoreServices.put_data
session_id = request.context.session
try:
result = put_data(environment_id, session_id, body, path)
except (KeyError, ValueError):
raise exc.HTTPNotFound
return result
@request_statistics.stats_count(API_NAME, 'Delete')
@utils.verify_session
@utils.verify_env
@normalize_path
def delete(self, request, environment_id, path):
LOG.debug('Services:Delete <EnvId: {0}, '
'Path: {1}>'.format(environment_id, path))
delete_data = core_services.CoreServices.delete_data
session_id = request.context.session
try:
delete_data(environment_id, session_id, path)
except (KeyError, ValueError):
raise exc.HTTPNotFound
def create_resource():
return wsgi.Resource(Controller())
|
epssy/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/contrib/sites/managers.py
|
118
|
from django.conf import settings
from django.db import models
from django.db.models.fields import FieldDoesNotExist
class CurrentSiteManager(models.Manager):
"Use this to limit objects to those associated with the current site."
def __init__(self, field_name=None):
super(CurrentSiteManager, self).__init__()
self.__field_name = field_name
self.__is_validated = False
def _validate_field_name(self):
field_names = self.model._meta.get_all_field_names()
# If a custom name is provided, make sure the field exists on the model
if self.__field_name is not None and self.__field_name not in field_names:
raise ValueError("%s couldn't find a field named %s in %s." % \
(self.__class__.__name__, self.__field_name, self.model._meta.object_name))
# Otherwise, see if there is a field called either 'site' or 'sites'
else:
for potential_name in ['site', 'sites']:
if potential_name in field_names:
self.__field_name = potential_name
self.__is_validated = True
break
# Now do a type check on the field (FK or M2M only)
try:
field = self.model._meta.get_field(self.__field_name)
if not isinstance(field, (models.ForeignKey, models.ManyToManyField)):
raise TypeError("%s must be a ForeignKey or ManyToManyField." %self.__field_name)
except FieldDoesNotExist:
raise ValueError("%s couldn't find a field named %s in %s." % \
(self.__class__.__name__, self.__field_name, self.model._meta.object_name))
self.__is_validated = True
def get_queryset(self):
if not self.__is_validated:
self._validate_field_name()
return super(CurrentSiteManager, self).get_queryset().filter(**{self.__field_name + '__id__exact': settings.SITE_ID})
|
jasonwee/asus-rt-n14uhp-mrtg
|
refs/heads/master
|
tmp/ve_asus-rt-n14uhp-mrtg/lib/python3.4/site-packages/django/contrib/gis/geos/error.py
|
508
|
class GEOSException(Exception):
"The base GEOS exception, indicates a GEOS-related error."
pass
|
tdautc19841202/micropython
|
refs/heads/master
|
tests/basics/set_binop.py
|
22
|
sets = [set(), {1}, {1, 2}, {1, 2, 3}, {2, 3}, {2, 3, 5}, {5}, {7}]
for s in sets:
for t in sets:
print(sorted(s), '|', sorted(t), '=', sorted(s | t))
print(sorted(s), '^', sorted(t), '=', sorted(s ^ t))
print(sorted(s), '&', sorted(t), '=', sorted(s & t))
print(sorted(s), '-', sorted(t), '=', sorted(s - t))
u = s.copy()
u |= t
print(sorted(s), "|=", sorted(t), '-->', sorted(u))
u = s.copy()
u ^= t
print(sorted(s), "^=", sorted(t), '-->', sorted(u))
u = s.copy()
u &= t
print(sorted(s), "&=", sorted(t), "-->", sorted(u))
u = s.copy()
u -= t
print(sorted(s), "-=", sorted(t), "-->", sorted(u))
print(sorted(s), '==', sorted(t), '=', s == t)
print(sorted(s), '!=', sorted(t), '=', s != t)
print(sorted(s), '>', sorted(t), '=', s > t)
print(sorted(s), '>=', sorted(t), '=', s >= t)
print(sorted(s), '<', sorted(t), '=', s < t)
print(sorted(s), '<=', sorted(t), '=', s <= t)
|
michigraber/scikit-learn
|
refs/heads/master
|
sklearn/externals/joblib/func_inspect.py
|
239
|
"""
My own variation on function-specific inspect-like features.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
from itertools import islice
import inspect
import warnings
import re
import os
from ._compat import _basestring
from .logger import pformat
from ._memory_helpers import open_py_source
def get_func_code(func):
""" Attempts to retrieve a reliable function code hash.
The reason we don't use inspect.getsource is that it caches the
source, whereas we want this to be modified on the fly when the
function is modified.
Returns
-------
func_code: string
The function code
source_file: string
The path to the file in which the function is defined.
first_line: int
The first line of the code in the source file.
Notes
------
This function does a bit more magic than inspect, and is thus
more robust.
"""
source_file = None
try:
code = func.__code__
source_file = code.co_filename
if not os.path.exists(source_file):
# Use inspect for lambda functions and functions defined in an
# interactive shell, or in doctests
source_code = ''.join(inspect.getsourcelines(func)[0])
line_no = 1
if source_file.startswith('<doctest '):
source_file, line_no = re.match(
'\<doctest (.*\.rst)\[(.*)\]\>',
source_file).groups()
line_no = int(line_no)
source_file = '<doctest %s>' % source_file
return source_code, source_file, line_no
# Try to retrieve the source code.
with open_py_source(source_file) as source_file_obj:
first_line = code.co_firstlineno
# All the lines after the function definition:
source_lines = list(islice(source_file_obj, first_line - 1, None))
return ''.join(inspect.getblock(source_lines)), source_file, first_line
except:
# If the source code fails, we use the hash. This is fragile and
# might change from one session to another.
if hasattr(func, '__code__'):
# Python 3.X
return str(func.__code__.__hash__()), source_file, -1
else:
# Weird objects like numpy ufunc don't have __code__
# This is fragile, as quite often the id of the object is
# in the repr, so it might not persist across sessions,
# however it will work for ufuncs.
return repr(func), source_file, -1
def _clean_win_chars(string):
"""Windows cannot encode some characters in filename."""
import urllib
if hasattr(urllib, 'quote'):
quote = urllib.quote
else:
# In Python 3, quote is elsewhere
import urllib.parse
quote = urllib.parse.quote
for char in ('<', '>', '!', ':', '\\'):
string = string.replace(char, quote(char))
return string
def get_func_name(func, resolv_alias=True, win_characters=True):
""" Return the function import path (as a list of module names), and
a name for the function.
Parameters
----------
func: callable
The func to inspect
resolv_alias: boolean, optional
If true, possible local aliases are indicated.
win_characters: boolean, optional
If true, substitute special characters using urllib.quote
This is useful in Windows, as it cannot encode some filenames
"""
if hasattr(func, '__module__'):
module = func.__module__
else:
try:
module = inspect.getmodule(func)
except TypeError:
if hasattr(func, '__class__'):
module = func.__class__.__module__
else:
module = 'unknown'
if module is None:
# Happens in doctests, eg
module = ''
if module == '__main__':
try:
filename = os.path.abspath(inspect.getsourcefile(func))
except:
filename = None
if filename is not None:
# mangling of full path to filename
parts = filename.split(os.sep)
if parts[-1].startswith('<ipython-input'):
# function is defined in an IPython session. The filename
# will change with every new kernel instance. This hack
# always returns the same filename
parts[-1] = '__ipython-input__'
filename = '-'.join(parts)
if filename.endswith('.py'):
filename = filename[:-3]
module = module + '-' + filename
module = module.split('.')
if hasattr(func, 'func_name'):
name = func.func_name
elif hasattr(func, '__name__'):
name = func.__name__
else:
name = 'unknown'
# Hack to detect functions not defined at the module-level
if resolv_alias:
# TODO: Maybe add a warning here?
if hasattr(func, 'func_globals') and name in func.func_globals:
if not func.func_globals[name] is func:
name = '%s-alias' % name
if inspect.ismethod(func):
# We need to add the name of the class
if hasattr(func, 'im_class'):
klass = func.im_class
module.append(klass.__name__)
if os.name == 'nt' and win_characters:
# Stupid windows can't encode certain characters in filenames
name = _clean_win_chars(name)
module = [_clean_win_chars(s) for s in module]
return module, name
def filter_args(func, ignore_lst, args=(), kwargs=dict()):
""" Filters the given args and kwargs using a list of arguments to
ignore, and a function specification.
Parameters
----------
func: callable
Function giving the argument specification
ignore_lst: list of strings
List of arguments to ignore (either a name of an argument
in the function spec, or '*', or '**')
*args: list
Positional arguments passed to the function.
**kwargs: dict
Keyword arguments passed to the function
Returns
-------
filtered_args: list
List of filtered positional and keyword arguments.
"""
args = list(args)
if isinstance(ignore_lst, _basestring):
# Catch a common mistake
raise ValueError('ignore_lst must be a list of parameters to ignore '
'%s (type %s) was given' % (ignore_lst, type(ignore_lst)))
# Special case for functools.partial objects
if (not inspect.ismethod(func) and not inspect.isfunction(func)):
if ignore_lst:
warnings.warn('Cannot inspect object %s, ignore list will '
'not work.' % func, stacklevel=2)
return {'*': args, '**': kwargs}
arg_spec = inspect.getargspec(func)
# We need to if/them to account for different versions of Python
if hasattr(arg_spec, 'args'):
arg_names = arg_spec.args
arg_defaults = arg_spec.defaults
arg_keywords = arg_spec.keywords
arg_varargs = arg_spec.varargs
else:
arg_names, arg_varargs, arg_keywords, arg_defaults = arg_spec
arg_defaults = arg_defaults or {}
if inspect.ismethod(func):
# First argument is 'self', it has been removed by Python
# we need to add it back:
args = [func.__self__, ] + args
# XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such
# as on ndarrays.
_, name = get_func_name(func, resolv_alias=False)
arg_dict = dict()
arg_position = -1
for arg_position, arg_name in enumerate(arg_names):
if arg_position < len(args):
# Positional argument or keyword argument given as positional
arg_dict[arg_name] = args[arg_position]
else:
position = arg_position - len(arg_names)
if arg_name in kwargs:
arg_dict[arg_name] = kwargs.pop(arg_name)
else:
try:
arg_dict[arg_name] = arg_defaults[position]
except (IndexError, KeyError):
# Missing argument
raise ValueError('Wrong number of arguments for %s%s:\n'
' %s(%s, %s) was called.'
% (name,
inspect.formatargspec(*inspect.getargspec(func)),
name,
repr(args)[1:-1],
', '.join('%s=%s' % (k, v)
for k, v in kwargs.items())
)
)
varkwargs = dict()
for arg_name, arg_value in sorted(kwargs.items()):
if arg_name in arg_dict:
arg_dict[arg_name] = arg_value
elif arg_keywords is not None:
varkwargs[arg_name] = arg_value
else:
raise TypeError("Ignore list for %s() contains an unexpected "
"keyword argument '%s'" % (name, arg_name))
if arg_keywords is not None:
arg_dict['**'] = varkwargs
if arg_varargs is not None:
varargs = args[arg_position + 1:]
arg_dict['*'] = varargs
# Now remove the arguments to be ignored
for item in ignore_lst:
if item in arg_dict:
arg_dict.pop(item)
else:
raise ValueError("Ignore list: argument '%s' is not defined for "
"function %s%s" %
(item, name,
inspect.formatargspec(arg_names,
arg_varargs,
arg_keywords,
arg_defaults,
)))
# XXX: Return a sorted list of pairs?
return arg_dict
def format_signature(func, *args, **kwargs):
# XXX: Should this use inspect.formatargvalues/formatargspec?
module, name = get_func_name(func)
module = [m for m in module if m]
if module:
module.append(name)
module_path = '.'.join(module)
else:
module_path = name
arg_str = list()
previous_length = 0
for arg in args:
arg = pformat(arg, indent=2)
if len(arg) > 1500:
arg = '%s...' % arg[:700]
if previous_length > 80:
arg = '\n%s' % arg
previous_length = len(arg)
arg_str.append(arg)
arg_str.extend(['%s=%s' % (v, pformat(i)) for v, i in kwargs.items()])
arg_str = ', '.join(arg_str)
signature = '%s(%s)' % (name, arg_str)
return module_path, signature
def format_call(func, args, kwargs, object_name="Memory"):
""" Returns a nicely formatted statement displaying the function
call with the given arguments.
"""
path, signature = format_signature(func, *args, **kwargs)
msg = '%s\n[%s] Calling %s...\n%s' % (80 * '_', object_name,
path, signature)
return msg
# XXX: Not using logging framework
#self.debug(msg)
|
rch/flask-extdirect
|
refs/heads/master
|
flask_sencha/touch.py
|
12133432
| |
meletakis/collato
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/sessions/__init__.py
|
12133432
| |
atopuzov/nitro-python
|
refs/heads/master
|
nssrc/com/citrix/netscaler/nitro/resource/config/network/vlan_interface_binding.py
|
3
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vlan_interface_binding(base_resource) :
""" Binding class showing the interface that can be bound to vlan.
"""
def __init__(self) :
self._ifnum = ""
self._tagged = False
self._id = 0
self.___count = 0
@property
def id(self) :
ur"""Specifies the virtual LAN ID.<br/>Minimum value = 1<br/>Maximum value = 4094.
"""
try :
return self._id
except Exception as e:
raise e
@id.setter
def id(self, id) :
ur"""Specifies the virtual LAN ID.<br/>Minimum value = 1<br/>Maximum value = 4094
"""
try :
self._id = id
except Exception as e:
raise e
@property
def ifnum(self) :
ur"""The interface to be bound to the VLAN, specified in slot/port notation (for example, 1/3).<br/>Minimum length = 1.
"""
try :
return self._ifnum
except Exception as e:
raise e
@ifnum.setter
def ifnum(self, ifnum) :
ur"""The interface to be bound to the VLAN, specified in slot/port notation (for example, 1/3).<br/>Minimum length = 1
"""
try :
self._ifnum = ifnum
except Exception as e:
raise e
@property
def tagged(self) :
ur"""Make the interface an 802.1q tagged interface. Packets sent on this interface on this VLAN have an additional 4-byte 802.1q tag, which identifies the VLAN. To use 802.1q tagging, you must also configure the switch connected to the appliance's interfaces.
"""
try :
return self._tagged
except Exception as e:
raise e
@tagged.setter
def tagged(self, tagged) :
ur"""Make the interface an 802.1q tagged interface. Packets sent on this interface on this VLAN have an additional 4-byte 802.1q tag, which identifies the VLAN. To use 802.1q tagging, you must also configure the switch connected to the appliance's interfaces.
"""
try :
self._tagged = tagged
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vlan_interface_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vlan_interface_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.id is not None :
return str(self.id)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vlan_interface_binding()
updateresource.id = resource.id
updateresource.ifnum = resource.ifnum
updateresource.tagged = resource.tagged
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vlan_interface_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].id = resource[i].id
updateresources[i].ifnum = resource[i].ifnum
updateresources[i].tagged = resource[i].tagged
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vlan_interface_binding()
deleteresource.id = resource.id
deleteresource.ifnum = resource.ifnum
deleteresource.tagged = resource.tagged
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vlan_interface_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].id = resource[i].id
deleteresources[i].ifnum = resource[i].ifnum
deleteresources[i].tagged = resource[i].tagged
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, id) :
ur""" Use this API to fetch vlan_interface_binding resources.
"""
try :
obj = vlan_interface_binding()
obj.id = id
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, id, filter_) :
ur""" Use this API to fetch filtered set of vlan_interface_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vlan_interface_binding()
obj.id = id
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, id) :
ur""" Use this API to count vlan_interface_binding resources configued on NetScaler.
"""
try :
obj = vlan_interface_binding()
obj.id = id
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, id, filter_) :
ur""" Use this API to count the filtered set of vlan_interface_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vlan_interface_binding()
obj.id = id
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class vlan_interface_binding_response(base_response) :
def __init__(self, length=1) :
self.vlan_interface_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vlan_interface_binding = [vlan_interface_binding() for _ in range(length)]
|
jordiclariana/ansible
|
refs/heads/devel
|
hacking/get_library.py
|
216
|
#!/usr/bin/env python
# (c) 2014, Will Thames <will@thames.id.au>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import ansible.constants as C
import sys
def main():
print(C.DEFAULT_MODULE_PATH)
return 0
if __name__ == '__main__':
sys.exit(main())
|
coder-han/hugula
|
refs/heads/master
|
Client/tools/site-packages/xlwt/Cell.py
|
87
|
# -*- coding: windows-1252 -*-
from struct import unpack, pack
import BIFFRecords
class StrCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "sst_idx"]
def __init__(self, rowx, colx, xf_idx, sst_idx):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.sst_idx = sst_idx
def get_biff_data(self):
# return BIFFRecords.LabelSSTRecord(self.rowx, self.colx, self.xf_idx, self.sst_idx).get()
return pack('<5HL', 0x00FD, 10, self.rowx, self.colx, self.xf_idx, self.sst_idx)
class BlankCell(object):
__slots__ = ["rowx", "colx", "xf_idx"]
def __init__(self, rowx, colx, xf_idx):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
def get_biff_data(self):
# return BIFFRecords.BlankRecord(self.rowx, self.colx, self.xf_idx).get()
return pack('<5H', 0x0201, 6, self.rowx, self.colx, self.xf_idx)
class MulBlankCell(object):
__slots__ = ["rowx", "colx1", "colx2", "xf_idx"]
def __init__(self, rowx, colx1, colx2, xf_idx):
self.rowx = rowx
self.colx1 = colx1
self.colx2 = colx2
self.xf_idx = xf_idx
def get_biff_data(self):
return BIFFRecords.MulBlankRecord(self.rowx,
self.colx1, self.colx2, self.xf_idx).get()
class NumberCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "number"]
def __init__(self, rowx, colx, xf_idx, number):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.number = float(number)
def get_encoded_data(self):
rk_encoded = 0
num = self.number
# The four possible kinds of RK encoding are *not* mutually exclusive.
# The 30-bit integer variety picks up the most.
# In the code below, the four varieties are checked in descending order
# of bangs per buck, or not at all.
# SJM 2007-10-01
if -0x20000000 <= num < 0x20000000: # fits in 30-bit *signed* int
inum = int(num)
if inum == num: # survives round-trip
# print "30-bit integer RK", inum, hex(inum)
rk_encoded = 2 | (inum << 2)
return 1, rk_encoded
temp = num * 100
if -0x20000000 <= temp < 0x20000000:
# That was step 1: the coded value will fit in
# a 30-bit signed integer.
itemp = int(round(temp, 0))
# That was step 2: "itemp" is the best candidate coded value.
# Now for step 3: simulate the decoding,
# to check for round-trip correctness.
if itemp / 100.0 == num:
# print "30-bit integer RK*100", itemp, hex(itemp)
rk_encoded = 3 | (itemp << 2)
return 1, rk_encoded
if 0: # Cost of extra pack+unpack not justified by tiny yield.
packed = pack('<d', num)
w01, w23 = unpack('<2i', packed)
if not w01 and not(w23 & 3):
# 34 lsb are 0
# print "float RK", w23, hex(w23)
return 1, w23
packed100 = pack('<d', temp)
w01, w23 = unpack('<2i', packed100)
if not w01 and not(w23 & 3):
# 34 lsb are 0
# print "float RK*100", w23, hex(w23)
return 1, w23 | 1
#print "Number"
#print
return 0, pack('<5Hd', 0x0203, 14, self.rowx, self.colx, self.xf_idx, num)
def get_biff_data(self):
isRK, value = self.get_encoded_data()
if isRK:
return pack('<5Hi', 0x27E, 10, self.rowx, self.colx, self.xf_idx, value)
return value # NUMBER record already packed
class BooleanCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "number"]
def __init__(self, rowx, colx, xf_idx, number):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.number = number
def get_biff_data(self):
return BIFFRecords.BoolErrRecord(self.rowx,
self.colx, self.xf_idx, self.number, 0).get()
error_code_map = {
0x00: 0, # Intersection of two cell ranges is empty
0x07: 7, # Division by zero
0x0F: 15, # Wrong type of operand
0x17: 23, # Illegal or deleted cell reference
0x1D: 29, # Wrong function or range name
0x24: 36, # Value range overflow
0x2A: 42, # Argument or function not available
'#NULL!' : 0, # Intersection of two cell ranges is empty
'#DIV/0!': 7, # Division by zero
'#VALUE!': 36, # Wrong type of operand
'#REF!' : 23, # Illegal or deleted cell reference
'#NAME?' : 29, # Wrong function or range name
'#NUM!' : 36, # Value range overflow
'#N/A!' : 42, # Argument or function not available
}
class ErrorCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "number"]
def __init__(self, rowx, colx, xf_idx, error_string_or_code):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
try:
self.number = error_code_map[error_string_or_code]
except KeyError:
raise Exception('Illegal error value (%r)' % error_string_or_code)
def get_biff_data(self):
return BIFFRecords.BoolErrRecord(self.rowx,
self.colx, self.xf_idx, self.number, 1).get()
class FormulaCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "frmla", "calc_flags"]
def __init__(self, rowx, colx, xf_idx, frmla, calc_flags=0):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.frmla = frmla
self.calc_flags = calc_flags
def get_biff_data(self):
return BIFFRecords.FormulaRecord(self.rowx,
self.colx, self.xf_idx, self.frmla.rpn(), self.calc_flags).get()
# module-level function for *internal* use by the Row module
def _get_cells_biff_data_mul(rowx, cell_items):
# Return the BIFF data for all cell records in the row.
# Adjacent BLANK|RK records are combined into MUL(BLANK|RK) records.
pieces = []
nitems = len(cell_items)
i = 0
while i < nitems:
icolx, icell = cell_items[i]
if isinstance(icell, NumberCell):
isRK, value = icell.get_encoded_data()
if not isRK:
pieces.append(value) # pre-packed NUMBER record
i += 1
continue
muldata = [(value, icell.xf_idx)]
target = NumberCell
elif isinstance(icell, BlankCell):
muldata = [icell.xf_idx]
target = BlankCell
else:
pieces.append(icell.get_biff_data())
i += 1
continue
lastcolx = icolx
j = i
packed_record = ''
for j in xrange(i+1, nitems):
jcolx, jcell = cell_items[j]
if jcolx != lastcolx + 1:
nexti = j
break
if not isinstance(jcell, target):
nexti = j
break
if target == NumberCell:
isRK, value = jcell.get_encoded_data()
if not isRK:
packed_record = value
nexti = j + 1
break
muldata.append((value, jcell.xf_idx))
else:
muldata.append(jcell.xf_idx)
lastcolx = jcolx
else:
nexti = j + 1
if target == NumberCell:
if lastcolx == icolx:
# RK record
value, xf_idx = muldata[0]
pieces.append(pack('<5Hi', 0x027E, 10, rowx, icolx, xf_idx, value))
else:
# MULRK record
nc = lastcolx - icolx + 1
pieces.append(pack('<4H', 0x00BD, 6 * nc + 6, rowx, icolx))
pieces.append(''.join([pack('<Hi', xf_idx, value) for value, xf_idx in muldata]))
pieces.append(pack('<H', lastcolx))
else:
if lastcolx == icolx:
# BLANK record
xf_idx = muldata[0]
pieces.append(pack('<5H', 0x0201, 6, rowx, icolx, xf_idx))
else:
# MULBLANK record
nc = lastcolx - icolx + 1
pieces.append(pack('<4H', 0x00BE, 2 * nc + 6, rowx, icolx))
pieces.append(''.join([pack('<H', xf_idx) for xf_idx in muldata]))
pieces.append(pack('<H', lastcolx))
if packed_record:
pieces.append(packed_record)
i = nexti
return ''.join(pieces)
|
miniconfig/python-plexapi
|
refs/heads/master
|
plexapi/myplex.py
|
5
|
"""
PlexAPI MyPlex
"""
import plexapi, requests
from plexapi import TIMEOUT, log
from plexapi.exceptions import BadRequest, NotFound, Unauthorized
from plexapi.utils import cast, toDatetime
from requests.status_codes import _codes as codes
from threading import Thread
from xml.etree import ElementTree
class MyPlexUser:
""" Logs into my.plexapp.com to fetch account and token information. This
useful to get a token if not on the local network.
"""
SIGNIN = 'https://my.plexapp.com/users/sign_in.xml'
def __init__(self, data, initpath=None):
self.initpath = initpath
self.email = data.attrib.get('email')
self.id = data.attrib.get('id')
self.thumb = data.attrib.get('thumb')
self.username = data.attrib.get('username')
self.title = data.attrib.get('title')
self.cloudSyncDevice = data.attrib.get('cloudSyncDevice')
self.authenticationToken = data.attrib.get('authenticationToken')
self.queueEmail = data.attrib.get('queueEmail')
self.queueUid = data.attrib.get('queueUid')
def resources(self):
return MyPlexResource.fetch_resources(self.authenticationToken)
def getResource(self, search, port=32400):
""" Searches server.name, server.sourceTitle and server.host:server.port
from the list of available for this PlexUser.
"""
return _findResource(self.resources(), search, port)
@classmethod
def signin(cls, username, password):
if 'X-Plex-Token' in plexapi.BASE_HEADERS:
del plexapi.BASE_HEADERS['X-Plex-Token']
auth = (username, password)
log.info('POST %s', cls.SIGNIN)
response = requests.post(cls.SIGNIN, headers=plexapi.BASE_HEADERS, auth=auth, timeout=TIMEOUT)
if response.status_code != requests.codes.created:
codename = codes.get(response.status_code)[0]
if response.status_code == 401:
raise Unauthorized('(%s) %s' % (response.status_code, codename))
raise BadRequest('(%s) %s' % (response.status_code, codename))
data = ElementTree.fromstring(response.text.encode('utf8'))
return cls(data)
class MyPlexAccount:
""" Represents myPlex account if you already have a connection to a server. """
def __init__(self, server, data):
self.authToken = data.attrib.get('authToken')
self.username = data.attrib.get('username')
self.mappingState = data.attrib.get('mappingState')
self.mappingError = data.attrib.get('mappingError')
self.mappingErrorMessage = data.attrib.get('mappingErrorMessage')
self.signInState = data.attrib.get('signInState')
self.publicAddress = data.attrib.get('publicAddress')
self.publicPort = data.attrib.get('publicPort')
self.privateAddress = data.attrib.get('privateAddress')
self.privatePort = data.attrib.get('privatePort')
self.subscriptionFeatures = data.attrib.get('subscriptionFeatures')
self.subscriptionActive = data.attrib.get('subscriptionActive')
self.subscriptionState = data.attrib.get('subscriptionState')
def resources(self):
return MyPlexResource.fetch_resources(self.authToken)
def getResource(self, search, port=32400):
""" Searches server.name, server.sourceTitle and server.host:server.port
from the list of available for this PlexAccount.
"""
return _findResource(self.resources(), search, port)
class MyPlexResource:
RESOURCES = 'https://plex.tv/api/resources?includeHttps=1'
SSLTESTS = [(True, 'uri'), (False, 'http_uri')]
def __init__(self, data):
self.name = data.attrib.get('name')
self.accessToken = data.attrib.get('accessToken')
self.product = data.attrib.get('product')
self.productVersion = data.attrib.get('productVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.device = data.attrib.get('device')
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.createdAt = toDatetime(data.attrib.get('createdAt'))
self.lastSeenAt = toDatetime(data.attrib.get('lastSeenAt'))
self.provides = data.attrib.get('provides')
self.owned = cast(bool, data.attrib.get('owned'))
self.home = cast(bool, data.attrib.get('home'))
self.synced = cast(bool, data.attrib.get('synced'))
self.presence = cast(bool, data.attrib.get('presence'))
self.connections = [ResourceConnection(elem) for elem in data if elem.tag == 'Connection']
def __repr__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name.encode('utf8'))
def connect(self, ssl=None):
# Only check non-local connections unless we own the resource
connections = sorted(self.connections, key=lambda c:c.local, reverse=True)
if not self.owned:
connections = [c for c in connections if c.local is False]
# Try connecting to all known resource connections in parellel, but
# only return the first server (in order) that provides a response.
threads, results = [], []
for testssl, attr in self.SSLTESTS:
if ssl in [None, testssl]:
for i in range(len(connections)):
uri = getattr(connections[i], attr)
args = (uri, results, len(results))
results.append(None)
threads.append(Thread(target=self._connect, args=args))
threads[-1].start()
for thread in threads:
thread.join()
# At this point we have a list of result tuples containing (uri, PlexServer)
# or (uri, None) in the case a connection could not be established.
for uri, result in results:
log.info('Testing connection: %s %s', uri, 'OK' if result else 'ERR')
results = list(filter(None, [r[1] for r in results if r]))
if not results:
raise NotFound('Unable to connect to resource: %s' % self.name)
log.info('Connecting to server: %s', results[0])
return results[0]
def _connect(self, uri, results, i):
try:
from plexapi.server import PlexServer
results[i] = (uri, PlexServer(uri, self.accessToken))
except NotFound:
results[i] = (uri, None)
@classmethod
def fetch_resources(cls, token):
headers = plexapi.BASE_HEADERS
headers['X-Plex-Token'] = token
log.info('GET %s?X-Plex-Token=%s', cls.RESOURCES, token)
response = requests.get(cls.RESOURCES, headers=headers, timeout=TIMEOUT)
data = ElementTree.fromstring(response.text.encode('utf8'))
return [MyPlexResource(elem) for elem in data]
class ResourceConnection:
def __init__(self, data):
self.protocol = data.attrib.get('protocol')
self.address = data.attrib.get('address')
self.port = cast(int, data.attrib.get('port'))
self.uri = data.attrib.get('uri')
self.local = cast(bool, data.attrib.get('local'))
@property
def http_uri(self):
return 'http://%s:%s' % (self.address, self.port)
def __repr__(self):
return '<%s:%s>' % (self.__class__.__name__, self.uri.encode('utf8'))
def _findResource(resources, search, port=32400):
""" Searches server.name """
search = search.lower()
log.info('Looking for server: %s', search)
for server in resources:
if search == server.name.lower():
log.info('Server found: %s', server)
return server
log.info('Unable to find server: %s', search)
raise NotFound('Unable to find server: %s' % search)
|
paulmartel/voltdb
|
refs/heads/master
|
tests/testrunner-ng/testsupport/runner.py
|
1
|
# This file is part of VoltDB.
# Copyright (C) 2008-2016 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from threading import Timer
import os
class Runner:
def __init__(self, testname, timestamp, timeout, task, safety, cores):
self.name = testname
self.timestamp = timestamp
self.timeout = timeout
self.task = task
self.safety = safety
self.cores = cores
self.failures = 0
self.testsrun = 0
self.duration = 0.0
self.didtimeout = False
|
youdonghai/intellij-community
|
refs/heads/master
|
python/testData/formatter/blanksBetweenImportsPreservedWithoutOptimizeImports_after.py
|
86
|
import os
import uuid
from django.db import models
from django.utils.translation import ugettext_lazy as _
from project import something
print(something, _, models, os, uuid)
|
dionysius07/flame-detection
|
refs/heads/master
|
flameDetect3.py
|
1
|
from picamera import PiCamera
from picamera.array import PiRGBArray
from time import sleep
import cv2
import numpy as np
# Camera Setup:
cam = PiCamera()
cam.resolution = (320,240)
cam.framerate = 32
raw = PiRGBArray(cam)
# Camera Warmup:
sleep(0.1)
for frame in cam.capture_continuous(raw,format='bgr',use_video_port=True):
img = frame.array
# Pre-processing:
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
_,thresh1 = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
edge = cv2.Canny(thresh1,30,200)
# Defining central ROI:
roiH,roiW,_ = img.shape
cv2.rectangle(img,(2*roiW/5,roiH),(3*roiW/5,0),(255,128,0),2)
pointSetROI = [2*roiW/5,roiH,3*roiW/5,0]
# Define centroids:
ROI_Xcent = pointSetROI[0]+abs(pointSetROI[0]-pointSetROI[2])/2
ROI_Ycent = pointSetROI[1]+abs(pointSetROI[1]-pointSetROI[3])/2
# Flame detection
_,contours,_ = cv2.findContours(edge,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
# Flame centroid:
F_Xcent = x+(w/2)
# Orientation with respect to detected flame:
#if F_Xcent < ROI_Xcent:
# print "left!"
#elif F_Xcent > ROI_Xcent:
# print "right!"
#else:
# print "forward!"
cv2.imshow('img',img)
# Clear current frame:
raw.truncate(0)
# Exit loop:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
|
scanon/narrative
|
refs/heads/master
|
src/biokbase/narrative/common/util.py
|
4
|
"""
Utility functions for dealing with KBase services, etc.
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '1/6/14'
import json
import os
import re
import requests
from setuptools import Command
import time
from .kvp import KVP_EXPR, parse_kvp
from biokbase.workspace.client import Workspace as WS2
from biokbase.workspace.client import ServerError, URLError
def kbase_debug_mode():
return bool(os.environ.get('KBASE_DEBUG', None))
class _KBaseEnv(object):
"""Single place to get/set KBase environment variables.
Also act as a dict for LogAdapter's "extra" arg.
Use global variable `kbase_env` instead of this class.
"""
# Environment variables.
# Each is associated with an attribute that is the
# same as the variable name without the 'env_' prefix.
env_auth_token = "KB_AUTH_TOKEN"
env_narrative = "KB_NARRATIVE"
env_session = "KB_SESSION"
env_client_ip = "KB_CLIENT_IP"
env_user = None
_defaults = {'auth_token': 'none',
'narrative': 'none',
'session': 'none',
'client_ip': '0.0.0.0',
'user': 'anonymous'}
def __getattr__(self, name):
ename = "env_" + name
if ename in _KBaseEnv.__dict__:
if ename == 'env_user':
return self._user()
else:
return os.environ.get(getattr(self.__class__, ename),
self._defaults[name])
else:
raise KeyError("kbase_env:{}".format(name))
def __setattr__(self, name, value):
ename = "env_" + name
if ename in _KBaseEnv.__dict__:
if ename != 'env_user':
os.environ[getattr(self.__class__, ename)] = value
# Dict emulation
def __iter__(self):
return self.iterkeys()
def __getitem__(self, name):
return getattr(self, name)
def __contains__(self, name):
return name in self._defaults
def keys(self):
return self._defaults.keys()
def iterkeys(self):
return self._defaults.iterkeys()
def __str__(self):
return ', '.join(['{}: {}'.format(k, self[k])
for k in self.keys()])
def _user(self):
token = self.auth_token
if not token:
return self._defaults['user']
m = re.search('un=([^|]+)', token)
return m.group(1) if m else self._defaults['user']
# Get/set KBase environment variables by getting/setting
# attributes of this object.
kbase_env = _KBaseEnv()
class AweTimeoutError(Exception):
def __init__(self, jobid, timeout):
Exception.__init__(self, "AWE job ({}) timed out after {:d} seconds".format(jobid, timeout))
class AweJob(object):
URL = None
def __init__(self, meth=None, started="Starting method", running="Method"):
"""Create a new AweJob wrapper.
:param meth: Python service method state (optional)
:type meth: narrative.common.service.ServiceMethod
:param started str: Name of method started event
:param running str: Name of method, running, event
"""
self._meth = meth
self._sname, self._rname = started, running
def run(self, jid, stage_fun=None, timeout=3600):
"""Run synchronously, optionally invoking a callback at each completed stage, until
all sub-jobs have completed.
:param jid: AWE Job ID
:param stage_fun: Stage callback function.
If present, invoked with the (jid, completed, total_jobs)
:param timeout int: Timeout, in seconds
:return int: Number of jobs run
:raises: AweTimeoutError on timeout.
"""
t0 = time.time()
self._cb = stage_fun
self._jid = jid
njobs = self.job_count()
self._started()
completed = 0
self._add_jobs(njobs)
while completed < njobs:
time.sleep(5)
if time.time() - t0 > timeout:
raise AweTimeoutError(self._jid, timeout)
remaining = self.job_count()
while completed < (njobs - remaining):
completed += 1
self._advance(completed, njobs)
return njobs
def _started(self):
if self._meth:
self._meth.advance(self._sname)
def _add_jobs(self, n):
if self._meth:
self._meth.stages += n
def _advance(self, completed, njobs):
if self._meth:
self._meth.advance("{}: {:d}/{:d} jobs completed".format(self._rname, completed, njobs))
if self._cb:
self._cb(self._jid, completed, njobs)
def job_count(self):
"""Get count of jobs remaining in AWE.
"""
headers = {"Authorization": "OAuth {}".format(self._meth.token)}
url = self.URL + "/job/" + self._jid
r = requests.get(url, headers=headers)
response = json.loads(r.text)
remain_tasks = response.get("data", dict()).get("remaintasks")
return remain_tasks
class WorkspaceException(Exception):
"""More friendly workspace exception messages.
"""
def __init__(self, command, params, err):
fmt_params = ", ".join(["{nm}='{val}'".format(nm=k, val=params[k]) for k in sorted(params.keys())])
oper = "{}({})".format(command, fmt_params)
msg = "Workspace.{o}: {e}".format(o=oper, e=err)
Exception.__init__(self, msg)
class Workspace2(WS2):
"""Simple wrapper for KBase workspace-2 service.
"""
all_digits = re.compile('\d+')
#: Encoding to use for output strings
encoding = 'utf-8'
def __init__(self, url=None, token=None, user_id=None, password=None, wsid=None,
create=False, **create_kw):
"""Constructor.
:param url: URL of remote WS service
:type url: str
:param token: Authorization token, overrides 'user_id'/'password'
:type token: str
:param user_id: User ID for authentication (overridden by 'token')
:type user_id: str
:param user_id: Password for authentication (overridden by 'token')
:type user_id: str
:param wsid: Workspace ID or name
:type wsid: str
:param create_kw: Any extra keywords to create a new workspace
:type create_kw: None or dict
:raise: ValueError if workspace id is empty, KeyError if there is no workspace by that name,
WorkspaceException if creation of the workspace fails.
"""
WS2.__init__(self, url=url, user_id=user_id, password=password, token=token)
self.has_wsid = False
if wsid is not None:
self._init_ws(wsid, create_kw)
self.has_wsid = True
def set_ws(self, wsid, create_kw):
"""Set workspace.
:param wsid: Workspace ID or name
:type wsid: str
:param create_kw: Any extra keywords to create a new workspace
:type create_kw: None or dict
:return: None
"""
self._init_ws(wsid, create_kw)
self.has_wsid = True
def _init_ws(self, wsid, create_kw):
"""Set up workspace.
"""
if self.is_name(wsid):
self._ws_name, self._ws_id = wsid, None
else:
self._ws_name, self._ws_id = None, wsid
self._wsi = {'workspace': self._ws_name, 'id': self._ws_id}
# No workspace; create it or error.
if self._get_ws() is None:
if create_kw is None:
raise KeyError("No such workspace: '{}'".format(wsid))
if not self.is_name(wsid):
raise ValueError("Create new workspace: Workspace identifier cannot be an ID")
create_kw['workspace'] = wsid
self._create_ws(create_kw)
def _get_ws(self):
"""Get existing workspace, or None
"""
try:
result = self.get_workspace_info(self._wsi)
except (ServerError, URLError) as err:
if "No workspace" in str(err):
return None
raise WorkspaceException("get_workspace_info", self._wsi, err)
return result
@property
def workspace(self):
return self._wsi
def _create_ws(self, create_params):
"""Create new workspace, or raise WorkspaceException.
"""
try:
self.create_workspace(create_params)
except (ServerError, URLError) as err:
raise WorkspaceException("create_workspace", create_params, err)
def is_name(self, id_):
"""Is this an object ID or name?
:return: True for name, False for numeric ID
:rtype: bool
:raise: ValueError if it isn't a string, or it is length 0,
or if it starts with a digit but has non-digits.
"""
if not id_:
raise ValueError("Empty value")
try:
# If it does not
if not id_[0].isdigit():
return True
except (KeyError, ValueError) as err:
raise ValueError("Cannot get first letter")
# Make sure it is *all* digits
if not self.all_digits.match(id_).span() == (0, len(id_)):
raise ValueError("Starts with a digit, but not all digits")
return False
def get(self, objid, instance=None, data_only=True):
"""Get an object in the workspace.
If there are multiple objects, the first one is returned.
Returns just the object data, not all the associated info.
:param objid: Object name or ID
:type objid: str
:param instance: Instance (version) identifier, None for 'latest'
:type instance: str or None
:param data_only: Return the values for the 'data' key (True) or the whole result dict (False)
:type data_only: bool
:return: whatever the JSON for the object data parsed to, probably a dict,
or None if there are no objects by that name or ID, or workspace is not set.
:raise: WorkspaceException, if command fails
"""
if not self.has_wsid:
#_log.error("No workspace set")
return None
params = self._make_oid_params(objid, ver=instance)
try:
for result in self.get_objects(params):
return result['data'] if data_only else result
except (URLError, ServerError) as err:
if "No object with" in str(err):
return None
raise WorkspaceException("get_objects", {'object_ids': params}, err)
def _make_oid_params(self, objid, ver=None):
"""Build params for an 'object_identity'.
"""
# Set object ID/name.
if self.is_name(objid):
obj_id, obj_name = None, objid
else:
obj_id, obj_name = objid, None
# Fill in and returnparameter values.
#return {'object_ids':
return [{
# note, one of these will always be None
'workspace': self._ws_name,
'wsid': self._ws_id,
'objid': obj_id,
'name': obj_name,
'ver': ver
}]
def types(self, module=None, strip_version=True, info_keys=None):
"""Get a list of all types in a given module, or all modules.
:param module: Module (namespace) to operate in
:type module: str or None
:param strip_version: If True (the default), strip "-x.y" version off end of type name
:type strip_version: bool
:param info_keys: If None, return type name only. Otherwise return dict (see return type).
:type info_keys: None or list of str
:return: Type names, or details, depending on info_keys
:rtype: If all modules dict keyed by module name, with a list of strings (type names) for each,
or if info_keys is non-empty a dict {<type_name>:{ key:value..}} for each type.
If just one module, then just a list of strings (type names).
"""
modules = [module] if module else self.list_modules({})
result = {}
for m in modules:
try:
modinfo = self.get_module_info({'mod': m})
if info_keys is None:
# get list of types, stripping redundant module prefix
types = [t[t.find('.') + 1:] for t in modinfo['types'].iterkeys()]
# optionally strip trailing version
if strip_version:
types = [t.split("-", 1)[0] for t in types]
# encode in UTF-8
types = [s.encode(self.encoding) for s in types]
# map to module in result
else:
types = {}
for t, raw_meta in modinfo['types'].iteritems():
name = t[t.find('.') + 1:]
if strip_version:
name = name.split("-", 1)[0]
meta = json.loads(raw_meta)
types[name] = {k: meta[k] for k in info_keys}
result[m.encode(self.encoding)] = types
except ServerError as err:
if "Module wasn't uploaded" in str(err):
pass
#_log.warn("list_types: module '{}' not uploaded".format(m))
else:
#_log.error("list_types: server.error={}".format(err))
pass
continue
return result.values()[0] if module else result
class BuildDocumentation(Command):
"""Setuptools command hook to build Sphinx docs
"""
description = "build Sphinx documentation"
user_options = []
def initialize_options(self):
self.doc_dir = "biokbase-doc"
def finalize_options(self):
pass
def run(self):
filedir = os.path.dirname(os.path.realpath(__file__))
p = filedir.find("/biokbase/")
top = filedir[:p + 1]
doc = top + self.doc_dir
os.chdir(doc)
os.system("make html")
|
scottpurdy/nupic
|
refs/heads/master
|
examples/opf/experiments/spatial_classification/category_1/description.py
|
10
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_1.csv'),
'errorMetric': 'avg_err',
'modelParams': {
'sensorParams': { 'verbosity': 0},
'clParams': {
'verbosity': 0,
},
}
}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
|
sojo21/ToughRADIUS
|
refs/heads/master
|
toughradius/tools/__init__.py
|
12133432
| |
hpcloud-mon/tempest
|
refs/heads/master
|
tempest/stress/actions/__init__.py
|
12133432
| |
bdelliott/wordgame
|
refs/heads/master
|
web/django/conf/locale/id/__init__.py
|
12133432
| |
zzzeek/sqlalchemy
|
refs/heads/master
|
lib/sqlalchemy/ext/instrumentation.py
|
3
|
"""Extensible class instrumentation.
The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate
systems of class instrumentation within the ORM. Class instrumentation
refers to how the ORM places attributes on the class which maintain
data and track changes to that data, as well as event hooks installed
on the class.
.. note::
The extension package is provided for the benefit of integration
with other object management packages, which already perform
their own instrumentation. It is not intended for general use.
For examples of how the instrumentation extension is used,
see the example :ref:`examples_instrumentation`.
"""
import weakref
from .. import util
from ..orm import attributes
from ..orm import base as orm_base
from ..orm import collections
from ..orm import exc as orm_exc
from ..orm import instrumentation as orm_instrumentation
from ..orm.instrumentation import _default_dict_getter
from ..orm.instrumentation import _default_manager_getter
from ..orm.instrumentation import _default_state_getter
from ..orm.instrumentation import ClassManager
from ..orm.instrumentation import InstrumentationFactory
INSTRUMENTATION_MANAGER = "__sa_instrumentation_manager__"
"""Attribute, elects custom instrumentation when present on a mapped class.
Allows a class to specify a slightly or wildly different technique for
tracking changes made to mapped attributes and collections.
Only one instrumentation implementation is allowed in a given object
inheritance hierarchy.
The value of this attribute must be a callable and will be passed a class
object. The callable must return one of:
- An instance of an :class:`.InstrumentationManager` or subclass
- An object implementing all or some of InstrumentationManager (TODO)
- A dictionary of callables, implementing all or some of the above (TODO)
- An instance of a :class:`.ClassManager` or subclass
This attribute is consulted by SQLAlchemy instrumentation
resolution, once the :mod:`sqlalchemy.ext.instrumentation` module
has been imported. If custom finders are installed in the global
instrumentation_finders list, they may or may not choose to honor this
attribute.
"""
def find_native_user_instrumentation_hook(cls):
"""Find user-specified instrumentation management for a class."""
return getattr(cls, INSTRUMENTATION_MANAGER, None)
instrumentation_finders = [find_native_user_instrumentation_hook]
"""An extensible sequence of callables which return instrumentation
implementations
When a class is registered, each callable will be passed a class object.
If None is returned, the
next finder in the sequence is consulted. Otherwise the return must be an
instrumentation factory that follows the same guidelines as
sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER.
By default, the only finder is find_native_user_instrumentation_hook, which
searches for INSTRUMENTATION_MANAGER. If all finders return None, standard
ClassManager instrumentation is used.
"""
class ExtendedInstrumentationRegistry(InstrumentationFactory):
"""Extends :class:`.InstrumentationFactory` with additional
bookkeeping, to accommodate multiple types of
class managers.
"""
_manager_finders = weakref.WeakKeyDictionary()
_state_finders = weakref.WeakKeyDictionary()
_dict_finders = weakref.WeakKeyDictionary()
_extended = False
def _locate_extended_factory(self, class_):
for finder in instrumentation_finders:
factory = finder(class_)
if factory is not None:
manager = self._extended_class_manager(class_, factory)
return manager, factory
else:
return None, None
def _check_conflicts(self, class_, factory):
existing_factories = self._collect_management_factories_for(
class_
).difference([factory])
if existing_factories:
raise TypeError(
"multiple instrumentation implementations specified "
"in %s inheritance hierarchy: %r"
% (class_.__name__, list(existing_factories))
)
def _extended_class_manager(self, class_, factory):
manager = factory(class_)
if not isinstance(manager, ClassManager):
manager = _ClassInstrumentationAdapter(class_, manager)
if factory != ClassManager and not self._extended:
# somebody invoked a custom ClassManager.
# reinstall global "getter" functions with the more
# expensive ones.
self._extended = True
_install_instrumented_lookups()
self._manager_finders[class_] = manager.manager_getter()
self._state_finders[class_] = manager.state_getter()
self._dict_finders[class_] = manager.dict_getter()
return manager
def _collect_management_factories_for(self, cls):
"""Return a collection of factories in play or specified for a
hierarchy.
Traverses the entire inheritance graph of a cls and returns a
collection of instrumentation factories for those classes. Factories
are extracted from active ClassManagers, if available, otherwise
instrumentation_finders is consulted.
"""
hierarchy = util.class_hierarchy(cls)
factories = set()
for member in hierarchy:
manager = self.manager_of_class(member)
if manager is not None:
factories.add(manager.factory)
else:
for finder in instrumentation_finders:
factory = finder(member)
if factory is not None:
break
else:
factory = None
factories.add(factory)
factories.discard(None)
return factories
def unregister(self, class_):
super(ExtendedInstrumentationRegistry, self).unregister(class_)
if class_ in self._manager_finders:
del self._manager_finders[class_]
del self._state_finders[class_]
del self._dict_finders[class_]
def manager_of_class(self, cls):
if cls is None:
return None
try:
finder = self._manager_finders.get(cls, _default_manager_getter)
except TypeError:
# due to weakref lookup on invalid object
return None
else:
return finder(cls)
def state_of(self, instance):
if instance is None:
raise AttributeError("None has no persistent state.")
return self._state_finders.get(
instance.__class__, _default_state_getter
)(instance)
def dict_of(self, instance):
if instance is None:
raise AttributeError("None has no persistent state.")
return self._dict_finders.get(
instance.__class__, _default_dict_getter
)(instance)
orm_instrumentation._instrumentation_factory = (
_instrumentation_factory
) = ExtendedInstrumentationRegistry()
orm_instrumentation.instrumentation_finders = instrumentation_finders
class InstrumentationManager(object):
"""User-defined class instrumentation extension.
:class:`.InstrumentationManager` can be subclassed in order
to change
how class instrumentation proceeds. This class exists for
the purposes of integration with other object management
frameworks which would like to entirely modify the
instrumentation methodology of the ORM, and is not intended
for regular usage. For interception of class instrumentation
events, see :class:`.InstrumentationEvents`.
The API for this class should be considered as semi-stable,
and may change slightly with new releases.
"""
# r4361 added a mandatory (cls) constructor to this interface.
# given that, perhaps class_ should be dropped from all of these
# signatures.
def __init__(self, class_):
pass
def manage(self, class_, manager):
setattr(class_, "_default_class_manager", manager)
def unregister(self, class_, manager):
delattr(class_, "_default_class_manager")
def manager_getter(self, class_):
def get(cls):
return cls._default_class_manager
return get
def instrument_attribute(self, class_, key, inst):
pass
def post_configure_attribute(self, class_, key, inst):
pass
def install_descriptor(self, class_, key, inst):
setattr(class_, key, inst)
def uninstall_descriptor(self, class_, key):
delattr(class_, key)
def install_member(self, class_, key, implementation):
setattr(class_, key, implementation)
def uninstall_member(self, class_, key):
delattr(class_, key)
def instrument_collection_class(self, class_, key, collection_class):
return collections.prepare_instrumentation(collection_class)
def get_instance_dict(self, class_, instance):
return instance.__dict__
def initialize_instance_dict(self, class_, instance):
pass
def install_state(self, class_, instance, state):
setattr(instance, "_default_state", state)
def remove_state(self, class_, instance):
delattr(instance, "_default_state")
def state_getter(self, class_):
return lambda instance: getattr(instance, "_default_state")
def dict_getter(self, class_):
return lambda inst: self.get_instance_dict(class_, inst)
class _ClassInstrumentationAdapter(ClassManager):
"""Adapts a user-defined InstrumentationManager to a ClassManager."""
def __init__(self, class_, override):
self._adapted = override
self._get_state = self._adapted.state_getter(class_)
self._get_dict = self._adapted.dict_getter(class_)
ClassManager.__init__(self, class_)
def manage(self):
self._adapted.manage(self.class_, self)
def unregister(self):
self._adapted.unregister(self.class_, self)
def manager_getter(self):
return self._adapted.manager_getter(self.class_)
def instrument_attribute(self, key, inst, propagated=False):
ClassManager.instrument_attribute(self, key, inst, propagated)
if not propagated:
self._adapted.instrument_attribute(self.class_, key, inst)
def post_configure_attribute(self, key):
super(_ClassInstrumentationAdapter, self).post_configure_attribute(key)
self._adapted.post_configure_attribute(self.class_, key, self[key])
def install_descriptor(self, key, inst):
self._adapted.install_descriptor(self.class_, key, inst)
def uninstall_descriptor(self, key):
self._adapted.uninstall_descriptor(self.class_, key)
def install_member(self, key, implementation):
self._adapted.install_member(self.class_, key, implementation)
def uninstall_member(self, key):
self._adapted.uninstall_member(self.class_, key)
def instrument_collection_class(self, key, collection_class):
return self._adapted.instrument_collection_class(
self.class_, key, collection_class
)
def initialize_collection(self, key, state, factory):
delegate = getattr(self._adapted, "initialize_collection", None)
if delegate:
return delegate(key, state, factory)
else:
return ClassManager.initialize_collection(
self, key, state, factory
)
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
self.setup_instance(instance, state)
return instance
def _new_state_if_none(self, instance):
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if self.has_state(instance):
return False
else:
return self.setup_instance(instance)
def setup_instance(self, instance, state=None):
self._adapted.initialize_instance_dict(self.class_, instance)
if state is None:
state = self._state_constructor(instance, self)
# the given instance is assumed to have no state
self._adapted.install_state(self.class_, instance, state)
return state
def teardown_instance(self, instance):
self._adapted.remove_state(self.class_, instance)
def has_state(self, instance):
try:
self._get_state(instance)
except orm_exc.NO_STATE:
return False
else:
return True
def state_getter(self):
return self._get_state
def dict_getter(self):
return self._get_dict
def _install_instrumented_lookups():
"""Replace global class/object management functions
with ExtendedInstrumentationRegistry implementations, which
allow multiple types of class managers to be present,
at the cost of performance.
This function is called only by ExtendedInstrumentationRegistry
and unit tests specific to this behavior.
The _reinstall_default_lookups() function can be called
after this one to re-establish the default functions.
"""
_install_lookups(
dict(
instance_state=_instrumentation_factory.state_of,
instance_dict=_instrumentation_factory.dict_of,
manager_of_class=_instrumentation_factory.manager_of_class,
)
)
def _reinstall_default_lookups():
"""Restore simplified lookups."""
_install_lookups(
dict(
instance_state=_default_state_getter,
instance_dict=_default_dict_getter,
manager_of_class=_default_manager_getter,
)
)
_instrumentation_factory._extended = False
def _install_lookups(lookups):
global instance_state, instance_dict, manager_of_class
instance_state = lookups["instance_state"]
instance_dict = lookups["instance_dict"]
manager_of_class = lookups["manager_of_class"]
orm_base.instance_state = (
attributes.instance_state
) = orm_instrumentation.instance_state = instance_state
orm_base.instance_dict = (
attributes.instance_dict
) = orm_instrumentation.instance_dict = instance_dict
orm_base.manager_of_class = (
attributes.manager_of_class
) = orm_instrumentation.manager_of_class = manager_of_class
|
Maximilian-Reuter/SickRage
|
refs/heads/master
|
lib/tornado/test/resolve_test_helper.py
|
188
|
from __future__ import absolute_import, division, print_function, with_statement
from tornado.ioloop import IOLoop
from tornado.netutil import ThreadedResolver
from tornado.util import u
# When this module is imported, it runs getaddrinfo on a thread. Since
# the hostname is unicode, getaddrinfo attempts to import encodings.idna
# but blocks on the import lock. Verify that ThreadedResolver avoids
# this deadlock.
resolver = ThreadedResolver()
IOLoop.current().run_sync(lambda: resolver.resolve(u('localhost'), 80))
|
eahneahn/free
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/formtools/wizard/views.py
|
30
|
import re
from django import forms
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.forms import formsets, ValidationError
from django.views.generic import TemplateView
from django.utils.datastructures import SortedDict
from django.utils.decorators import classonlymethod
from django.utils.translation import ugettext as _
from django.utils import six
from django.contrib.formtools.wizard.storage import get_storage
from django.contrib.formtools.wizard.storage.exceptions import NoFileStorageConfigured
from django.contrib.formtools.wizard.forms import ManagementForm
def normalize_name(name):
"""
Converts camel-case style names into underscore seperated words. Example::
>>> normalize_name('oneTwoThree')
'one_two_three'
>>> normalize_name('FourFiveSix')
'four_five_six'
"""
new = re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', name)
return new.lower().strip('_')
class StepsHelper(object):
def __init__(self, wizard):
self._wizard = wizard
def __dir__(self):
return self.all
def __len__(self):
return self.count
def __repr__(self):
return '<StepsHelper for %s (steps: %s)>' % (self._wizard, self.all)
@property
def all(self):
"Returns the names of all steps/forms."
return list(self._wizard.get_form_list())
@property
def count(self):
"Returns the total number of steps/forms in this the wizard."
return len(self.all)
@property
def current(self):
"""
Returns the current step. If no current step is stored in the
storage backend, the first step will be returned.
"""
return self._wizard.storage.current_step or self.first
@property
def first(self):
"Returns the name of the first step."
return self.all[0]
@property
def last(self):
"Returns the name of the last step."
return self.all[-1]
@property
def next(self):
"Returns the next step."
return self._wizard.get_next_step()
@property
def prev(self):
"Returns the previous step."
return self._wizard.get_prev_step()
@property
def index(self):
"Returns the index for the current step."
return self._wizard.get_step_index()
@property
def step0(self):
return int(self.index)
@property
def step1(self):
return int(self.index) + 1
class WizardView(TemplateView):
"""
The WizardView is used to create multi-page forms and handles all the
storage and validation stuff. The wizard is based on Django's generic
class based views.
"""
storage_name = None
form_list = None
initial_dict = None
instance_dict = None
condition_dict = None
template_name = 'formtools/wizard/wizard_form.html'
def __repr__(self):
return '<%s: forms: %s>' % (self.__class__.__name__, self.form_list)
@classonlymethod
def as_view(cls, *args, **kwargs):
"""
This method is used within urls.py to create unique wizardview
instances for every request. We need to override this method because
we add some kwargs which are needed to make the wizardview usable.
"""
initkwargs = cls.get_initkwargs(*args, **kwargs)
return super(WizardView, cls).as_view(**initkwargs)
@classmethod
def get_initkwargs(cls, form_list=None, initial_dict=None,
instance_dict=None, condition_dict=None, *args, **kwargs):
"""
Creates a dict with all needed parameters for the form wizard instances.
* `form_list` - is a list of forms. The list entries can be single form
classes or tuples of (`step_name`, `form_class`). If you pass a list
of forms, the wizardview will convert the class list to
(`zero_based_counter`, `form_class`). This is needed to access the
form for a specific step.
* `initial_dict` - contains a dictionary of initial data dictionaries.
The key should be equal to the `step_name` in the `form_list` (or
the str of the zero based counter - if no step_names added in the
`form_list`)
* `instance_dict` - contains a dictionary whose values are model
instances if the step is based on a ``ModelForm`` and querysets if
the step is based on a ``ModelFormSet``. The key should be equal to
the `step_name` in the `form_list`. Same rules as for `initial_dict`
apply.
* `condition_dict` - contains a dictionary of boolean values or
callables. If the value of for a specific `step_name` is callable it
will be called with the wizardview instance as the only argument.
If the return value is true, the step's form will be used.
"""
kwargs.update({
'initial_dict': initial_dict or kwargs.pop('initial_dict',
getattr(cls, 'initial_dict', None)) or {},
'instance_dict': instance_dict or kwargs.pop('instance_dict',
getattr(cls, 'instance_dict', None)) or {},
'condition_dict': condition_dict or kwargs.pop('condition_dict',
getattr(cls, 'condition_dict', None)) or {}
})
form_list = form_list or kwargs.pop('form_list',
getattr(cls, 'form_list', None)) or []
computed_form_list = SortedDict()
assert len(form_list) > 0, 'at least one form is needed'
# walk through the passed form list
for i, form in enumerate(form_list):
if isinstance(form, (list, tuple)):
# if the element is a tuple, add the tuple to the new created
# sorted dictionary.
computed_form_list[six.text_type(form[0])] = form[1]
else:
# if not, add the form with a zero based counter as unicode
computed_form_list[six.text_type(i)] = form
# walk through the new created list of forms
for form in six.itervalues(computed_form_list):
if issubclass(form, formsets.BaseFormSet):
# if the element is based on BaseFormSet (FormSet/ModelFormSet)
# we need to override the form variable.
form = form.form
# check if any form contains a FileField, if yes, we need a
# file_storage added to the wizardview (by subclassing).
for field in six.itervalues(form.base_fields):
if (isinstance(field, forms.FileField) and
not hasattr(cls, 'file_storage')):
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads.")
# build the kwargs for the wizardview instances
kwargs['form_list'] = computed_form_list
return kwargs
def get_prefix(self, *args, **kwargs):
# TODO: Add some kind of unique id to prefix
return normalize_name(self.__class__.__name__)
def get_form_list(self):
"""
This method returns a form_list based on the initial form list but
checks if there is a condition method/value in the condition_list.
If an entry exists in the condition list, it will call/read the value
and respect the result. (True means add the form, False means ignore
the form)
The form_list is always generated on the fly because condition methods
could use data from other (maybe previous forms).
"""
form_list = SortedDict()
for form_key, form_class in six.iteritems(self.form_list):
# try to fetch the value from condition list, by default, the form
# gets passed to the new list.
condition = self.condition_dict.get(form_key, True)
if callable(condition):
# call the value if needed, passes the current instance.
condition = condition(self)
if condition:
form_list[form_key] = form_class
return form_list
def dispatch(self, request, *args, **kwargs):
"""
This method gets called by the routing engine. The first argument is
`request` which contains a `HttpRequest` instance.
The request is stored in `self.request` for later use. The storage
instance is stored in `self.storage`.
After processing the request using the `dispatch` method, the
response gets updated by the storage engine (for example add cookies).
"""
# add the storage engine to the current wizardview instance
self.prefix = self.get_prefix(*args, **kwargs)
self.storage = get_storage(self.storage_name, self.prefix, request,
getattr(self, 'file_storage', None))
self.steps = StepsHelper(self)
response = super(WizardView, self).dispatch(request, *args, **kwargs)
# update the response (e.g. adding cookies)
self.storage.update_response(response)
return response
def get(self, request, *args, **kwargs):
"""
This method handles GET requests.
If a GET request reaches this point, the wizard assumes that the user
just starts at the first step or wants to restart the process.
The data of the wizard will be resetted before rendering the first step.
"""
self.storage.reset()
# reset the current step to the first step.
self.storage.current_step = self.steps.first
return self.render(self.get_form())
def post(self, *args, **kwargs):
"""
This method handles POST requests.
The wizard will render either the current step (if form validation
wasn't successful), the next step (if the current step was stored
successful) or the done view (if no more steps are available)
"""
# Look for a wizard_goto_step element in the posted data which
# contains a valid step name. If one was found, render the requested
# form. (This makes stepping back a lot easier).
wizard_goto_step = self.request.POST.get('wizard_goto_step', None)
if wizard_goto_step and wizard_goto_step in self.get_form_list():
return self.render_goto_step(wizard_goto_step)
# Check if form was refreshed
management_form = ManagementForm(self.request.POST, prefix=self.prefix)
if not management_form.is_valid():
raise ValidationError(
_('ManagementForm data is missing or has been tampered.'),
code='missing_management_form',
)
form_current_step = management_form.cleaned_data['current_step']
if (form_current_step != self.steps.current and
self.storage.current_step is not None):
# form refreshed, change current step
self.storage.current_step = form_current_step
# get the form for the current step
form = self.get_form(data=self.request.POST, files=self.request.FILES)
# and try to validate
if form.is_valid():
# if the form is valid, store the cleaned data and files.
self.storage.set_step_data(self.steps.current, self.process_step(form))
self.storage.set_step_files(self.steps.current, self.process_step_files(form))
# check if the current step is the last step
if self.steps.current == self.steps.last:
# no more steps, render done view
return self.render_done(form, **kwargs)
else:
# proceed to the next step
return self.render_next_step(form)
return self.render(form)
def render_next_step(self, form, **kwargs):
"""
This method gets called when the next step/form should be rendered.
`form` contains the last/current form.
"""
# get the form instance based on the data from the storage backend
# (if available).
next_step = self.steps.next
new_form = self.get_form(next_step,
data=self.storage.get_step_data(next_step),
files=self.storage.get_step_files(next_step))
# change the stored current step
self.storage.current_step = next_step
return self.render(new_form, **kwargs)
def render_goto_step(self, goto_step, **kwargs):
"""
This method gets called when the current step has to be changed.
`goto_step` contains the requested step to go to.
"""
self.storage.current_step = goto_step
form = self.get_form(
data=self.storage.get_step_data(self.steps.current),
files=self.storage.get_step_files(self.steps.current))
return self.render(form)
def render_done(self, form, **kwargs):
"""
This method gets called when all forms passed. The method should also
re-validate all steps to prevent manipulation. If any form don't
validate, `render_revalidation_failure` should get called.
If everything is fine call `done`.
"""
final_form_list = []
# walk through the form list and try to validate the data again.
for form_key in self.get_form_list():
form_obj = self.get_form(step=form_key,
data=self.storage.get_step_data(form_key),
files=self.storage.get_step_files(form_key))
if not form_obj.is_valid():
return self.render_revalidation_failure(form_key, form_obj, **kwargs)
final_form_list.append(form_obj)
# render the done view and reset the wizard before returning the
# response. This is needed to prevent from rendering done with the
# same data twice.
done_response = self.done(final_form_list, **kwargs)
self.storage.reset()
return done_response
def get_form_prefix(self, step=None, form=None):
"""
Returns the prefix which will be used when calling the actual form for
the given step. `step` contains the step-name, `form` the form which
will be called with the returned prefix.
If no step is given, the form_prefix will determine the current step
automatically.
"""
if step is None:
step = self.steps.current
return str(step)
def get_form_initial(self, step):
"""
Returns a dictionary which will be passed to the form for `step`
as `initial`. If no initial data was provied while initializing the
form wizard, a empty dictionary will be returned.
"""
return self.initial_dict.get(step, {})
def get_form_instance(self, step):
"""
Returns a object which will be passed to the form for `step`
as `instance`. If no instance object was provied while initializing
the form wizard, None will be returned.
"""
return self.instance_dict.get(step, None)
def get_form_kwargs(self, step=None):
"""
Returns the keyword arguments for instantiating the form
(or formset) on the given step.
"""
return {}
def get_form(self, step=None, data=None, files=None):
"""
Constructs the form for a given `step`. If no `step` is defined, the
current step will be determined automatically.
The form will be initialized using the `data` argument to prefill the
new form. If needed, instance or queryset (for `ModelForm` or
`ModelFormSet`) will be added too.
"""
if step is None:
step = self.steps.current
# prepare the kwargs for the form instance.
kwargs = self.get_form_kwargs(step)
kwargs.update({
'data': data,
'files': files,
'prefix': self.get_form_prefix(step, self.form_list[step]),
'initial': self.get_form_initial(step),
})
if issubclass(self.form_list[step], forms.ModelForm):
# If the form is based on ModelForm, add instance if available
# and not previously set.
kwargs.setdefault('instance', self.get_form_instance(step))
elif issubclass(self.form_list[step], forms.models.BaseModelFormSet):
# If the form is based on ModelFormSet, add queryset if available
# and not previous set.
kwargs.setdefault('queryset', self.get_form_instance(step))
return self.form_list[step](**kwargs)
def process_step(self, form):
"""
This method is used to postprocess the form data. By default, it
returns the raw `form.data` dictionary.
"""
return self.get_form_step_data(form)
def process_step_files(self, form):
"""
This method is used to postprocess the form files. By default, it
returns the raw `form.files` dictionary.
"""
return self.get_form_step_files(form)
def render_revalidation_failure(self, step, form, **kwargs):
"""
Gets called when a form doesn't validate when rendering the done
view. By default, it changes the current step to failing forms step
and renders the form.
"""
self.storage.current_step = step
return self.render(form, **kwargs)
def get_form_step_data(self, form):
"""
Is used to return the raw form data. You may use this method to
manipulate the data.
"""
return form.data
def get_form_step_files(self, form):
"""
Is used to return the raw form files. You may use this method to
manipulate the data.
"""
return form.files
def get_all_cleaned_data(self):
"""
Returns a merged dictionary of all step cleaned_data dictionaries.
If a step contains a `FormSet`, the key will be prefixed with
'formset-' and contain a list of the formset cleaned_data dictionaries.
"""
cleaned_data = {}
for form_key in self.get_form_list():
form_obj = self.get_form(
step=form_key,
data=self.storage.get_step_data(form_key),
files=self.storage.get_step_files(form_key)
)
if form_obj.is_valid():
if isinstance(form_obj.cleaned_data, (tuple, list)):
cleaned_data.update({
'formset-%s' % form_key: form_obj.cleaned_data
})
else:
cleaned_data.update(form_obj.cleaned_data)
return cleaned_data
def get_cleaned_data_for_step(self, step):
"""
Returns the cleaned data for a given `step`. Before returning the
cleaned data, the stored values are revalidated through the form.
If the data doesn't validate, None will be returned.
"""
if step in self.form_list:
form_obj = self.get_form(step=step,
data=self.storage.get_step_data(step),
files=self.storage.get_step_files(step))
if form_obj.is_valid():
return form_obj.cleaned_data
return None
def get_next_step(self, step=None):
"""
Returns the next step after the given `step`. If no more steps are
available, None will be returned. If the `step` argument is None, the
current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
key = form_list.keyOrder.index(step) + 1
if len(form_list.keyOrder) > key:
return form_list.keyOrder[key]
return None
def get_prev_step(self, step=None):
"""
Returns the previous step before the given `step`. If there are no
steps available, None will be returned. If the `step` argument is
None, the current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
key = form_list.keyOrder.index(step) - 1
if key >= 0:
return form_list.keyOrder[key]
return None
def get_step_index(self, step=None):
"""
Returns the index for the given `step` name. If no step is given,
the current step will be used to get the index.
"""
if step is None:
step = self.steps.current
return self.get_form_list().keyOrder.index(step)
def get_context_data(self, form, **kwargs):
"""
Returns the template context for a step. You can overwrite this method
to add more data for all or some steps. This method returns a
dictionary containing the rendered form step. Available template
context variables are:
* all extra data stored in the storage backend
* `form` - form instance of the current step
* `wizard` - the wizard instance itself
Example:
.. code-block:: python
class MyWizard(WizardView):
def get_context_data(self, form, **kwargs):
context = super(MyWizard, self).get_context_data(form=form, **kwargs)
if self.steps.current == 'my_step_name':
context.update({'another_var': True})
return context
"""
context = super(WizardView, self).get_context_data(form=form, **kwargs)
context.update(self.storage.extra_data)
context['wizard'] = {
'form': form,
'steps': self.steps,
'management_form': ManagementForm(prefix=self.prefix, initial={
'current_step': self.steps.current,
}),
}
return context
def render(self, form=None, **kwargs):
"""
Returns a ``HttpResponse`` containing all needed context data.
"""
form = form or self.get_form()
context = self.get_context_data(form=form, **kwargs)
return self.render_to_response(context)
def done(self, form_list, **kwargs):
"""
This method must be overridden by a subclass to process to form data
after processing all steps.
"""
raise NotImplementedError("Your %s class has not defined a done() "
"method, which is required." % self.__class__.__name__)
class SessionWizardView(WizardView):
"""
A WizardView with pre-configured SessionStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieWizardView(WizardView):
"""
A WizardView with pre-configured CookieStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
class NamedUrlWizardView(WizardView):
"""
A WizardView with URL named steps support.
"""
url_name = None
done_step_name = None
@classmethod
def get_initkwargs(cls, *args, **kwargs):
"""
We require a url_name to reverse URLs later. Additionally users can
pass a done_step_name to change the URL name of the "done" view.
"""
assert 'url_name' in kwargs, 'URL name is needed to resolve correct wizard URLs'
extra_kwargs = {
'done_step_name': kwargs.pop('done_step_name', 'done'),
'url_name': kwargs.pop('url_name'),
}
initkwargs = super(NamedUrlWizardView, cls).get_initkwargs(*args, **kwargs)
initkwargs.update(extra_kwargs)
assert initkwargs['done_step_name'] not in initkwargs['form_list'], \
'step name "%s" is reserved for "done" view' % initkwargs['done_step_name']
return initkwargs
def get_step_url(self, step):
return reverse(self.url_name, kwargs={'step': step})
def get(self, *args, **kwargs):
"""
This renders the form or, if needed, does the http redirects.
"""
step_url = kwargs.get('step', None)
if step_url is None:
if 'reset' in self.request.GET:
self.storage.reset()
self.storage.current_step = self.steps.first
if self.request.GET:
query_string = "?%s" % self.request.GET.urlencode()
else:
query_string = ""
return redirect(self.get_step_url(self.steps.current)
+ query_string)
# is the current step the "done" name/view?
elif step_url == self.done_step_name:
last_step = self.steps.last
return self.render_done(self.get_form(step=last_step,
data=self.storage.get_step_data(last_step),
files=self.storage.get_step_files(last_step)
), **kwargs)
# is the url step name not equal to the step in the storage?
# if yes, change the step in the storage (if name exists)
elif step_url == self.steps.current:
# URL step name and storage step name are equal, render!
return self.render(self.get_form(
data=self.storage.current_step_data,
files=self.storage.current_step_files,
), **kwargs)
elif step_url in self.get_form_list():
self.storage.current_step = step_url
return self.render(self.get_form(
data=self.storage.current_step_data,
files=self.storage.current_step_files,
), **kwargs)
# invalid step name, reset to first and redirect.
else:
self.storage.current_step = self.steps.first
return redirect(self.get_step_url(self.steps.first))
def post(self, *args, **kwargs):
"""
Do a redirect if user presses the prev. step button. The rest of this
is super'd from WizardView.
"""
wizard_goto_step = self.request.POST.get('wizard_goto_step', None)
if wizard_goto_step and wizard_goto_step in self.get_form_list():
return self.render_goto_step(wizard_goto_step)
return super(NamedUrlWizardView, self).post(*args, **kwargs)
def get_context_data(self, form, **kwargs):
"""
NamedUrlWizardView provides the url_name of this wizard in the context
dict `wizard`.
"""
context = super(NamedUrlWizardView, self).get_context_data(form=form, **kwargs)
context['wizard']['url_name'] = self.url_name
return context
def render_next_step(self, form, **kwargs):
"""
When using the NamedUrlWizardView, we have to redirect to update the
browser's URL to match the shown step.
"""
next_step = self.get_next_step()
self.storage.current_step = next_step
return redirect(self.get_step_url(next_step))
def render_goto_step(self, goto_step, **kwargs):
"""
This method gets called when the current step has to be changed.
`goto_step` contains the requested step to go to.
"""
self.storage.current_step = goto_step
return redirect(self.get_step_url(goto_step))
def render_revalidation_failure(self, failed_step, form, **kwargs):
"""
When a step fails, we have to redirect the user to the first failing
step.
"""
self.storage.current_step = failed_step
return redirect(self.get_step_url(failed_step))
def render_done(self, form, **kwargs):
"""
When rendering the done view, we have to redirect first (if the URL
name doesn't fit).
"""
if kwargs.get('step', None) != self.done_step_name:
return redirect(self.get_step_url(self.done_step_name))
return super(NamedUrlWizardView, self).render_done(form, **kwargs)
class NamedUrlSessionWizardView(NamedUrlWizardView):
"""
A NamedUrlWizardView with pre-configured SessionStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class NamedUrlCookieWizardView(NamedUrlWizardView):
"""
A NamedUrlFormWizard with pre-configured CookieStorageBackend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
|
vmindru/ansible
|
refs/heads/devel
|
test/units/cli/test_playbook.py
|
29
|
# (c) 2016, Adrian Likins <alikins@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.mock.loader import DictDataLoader
from ansible import context
from ansible.inventory.manager import InventoryManager
from ansible.vars.manager import VariableManager
from ansible.cli.playbook import PlaybookCLI
class TestPlaybookCLI(unittest.TestCase):
def test_flush_cache(self):
cli = PlaybookCLI(args=["ansible-playbook", "--flush-cache", "foobar.yml"])
cli.parse()
self.assertTrue(context.CLIARGS['flush_cache'])
variable_manager = VariableManager()
fake_loader = DictDataLoader({'foobar.yml': ""})
inventory = InventoryManager(loader=fake_loader, sources='testhost,')
variable_manager.set_host_facts(inventory.get_host('testhost'), {'canary': True})
self.assertTrue('testhost' in variable_manager._fact_cache)
cli._flush_cache(inventory, variable_manager)
self.assertFalse('testhost' in variable_manager._fact_cache)
|
suutari-ai/shoop
|
refs/heads/master
|
shuup_tests/xtheme/test_theme_selection.py
|
3
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import contextmanager
import pytest
from django.template import TemplateDoesNotExist
from shuup.apps.provides import get_provide_objects, override_provides
from shuup.xtheme import set_current_theme
from shuup.xtheme.models import ThemeSettings
from shuup.xtheme.testing import override_current_theme_class
from shuup_tests.xtheme.utils import get_jinja2_engine
@contextmanager
def noop():
yield
@pytest.mark.django_db
def test_theme_selection():
"""
Test that a theme with a `template_dir` actually affects template directory selection.
"""
with override_current_theme_class(), override_provides("xtheme", [
"shuup_tests.xtheme.utils:FauxTheme",
"shuup_tests.xtheme.utils:FauxTheme2",
"shuup_tests.xtheme.utils:H2G2Theme",
]):
ThemeSettings.objects.all().delete()
for theme in get_provide_objects("xtheme"):
set_current_theme(theme.identifier)
je = get_jinja2_engine()
wrapper = (noop() if theme.identifier == "h2g2" else pytest.raises(TemplateDoesNotExist))
with wrapper:
t = je.get_template("42.jinja")
content = t.render().strip()
assert "a slice of lemon wrapped around a large gold brick" in content.replace("\n", " ")
|
naousse/odoo
|
refs/heads/8.0
|
addons/account_bank_statement_extensions/report/__init__.py
|
415
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bank_statement_balance_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ZLLab-Mooc/edx-platform
|
refs/heads/named-release/dogwood.rc
|
common/lib/xmodule/xmodule/contentstore/__init__.py
|
12133432
| |
Petrole/MaturePyRobots
|
refs/heads/master
|
WebPyRobot/backend/__init__.py
|
12133432
| |
rohinkumar/galsurveystudy
|
refs/heads/master
|
APTest/setupaptestdt.py
|
2
|
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
setup(name="APzdtheta", ext_modules=cythonize('aptestmetricdt.pyx'),)
#ext_modules=[Extension("demo",sources=["demo.pyx"],libraries=["m"] # Unix-like specific)]
#setup( name = "Demos",ext_modules = cythonize(ext_modules))
|
larrybradley/astropy
|
refs/heads/remote-tests
|
astropy/timeseries/periodograms/lombscargle/implementations/tests/test_utils.py
|
5
|
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy.timeseries.periodograms.lombscargle.implementations.utils import extirpolate, bitceil, trig_sum
@pytest.mark.parametrize('N', 2 ** np.arange(1, 12))
@pytest.mark.parametrize('offset', [-1, 0, 1])
def test_bitceil(N, offset):
assert_equal(bitceil(N + offset),
int(2 ** np.ceil(np.log2(N + offset))))
@pytest.fixture
def extirpolate_data():
rng = np.random.default_rng(0)
x = 100 * rng.random(50)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize('N', [100, None])
@pytest.mark.parametrize('M', [5])
def test_extirpolate(N, M, extirpolate_data):
x, y, f = extirpolate_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat), rtol=1.5e-5)
@pytest.fixture
def extirpolate_int_data():
rng = np.random.default_rng(0)
x = 100 * rng.random(50)
x[:25] = x[:25].astype(int)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize('N', [100, None])
@pytest.mark.parametrize('M', [5])
def test_extirpolate_with_integers(N, M, extirpolate_int_data):
x, y, f = extirpolate_int_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat), rtol=1.7e-5)
@pytest.fixture
def trig_sum_data():
rng = np.random.default_rng(0)
t = 10 * rng.random(50)
h = np.sin(t)
return t, h
@pytest.mark.parametrize('f0', [0, 1])
@pytest.mark.parametrize('adjust_t', [True, False])
@pytest.mark.parametrize('freq_factor', [1, 2])
@pytest.mark.parametrize('df', [0.1])
def test_trig_sum(f0, adjust_t, freq_factor, df, trig_sum_data):
t, h = trig_sum_data
tfit = t - t.min() if adjust_t else t
S1, C1 = trig_sum(tfit, h, df, N=1000, use_fft=True,
f0=f0, freq_factor=freq_factor, oversampling=10)
S2, C2 = trig_sum(tfit, h, df, N=1000, use_fft=False,
f0=f0, freq_factor=freq_factor, oversampling=10)
assert_allclose(S1, S2, atol=1E-2)
assert_allclose(C1, C2, atol=1E-2)
|
espenhgn/iCSD
|
refs/heads/master
|
test_icsd.py
|
1
|
# -*- coding: utf-8 -*-
'''icsd testing suite'''
import os
import numpy as np
import numpy.testing as nt
import quantities as pq
import scipy.integrate as si
from scipy.interpolate import interp1d
import icsd
import unittest
#patch quantities with the SI unit Siemens if it does not exist
for symbol, prefix, definition, u_symbol in zip(
['siemens', 'S', 'mS', 'uS', 'nS', 'pS'],
['', '', 'milli', 'micro', 'nano', 'pico'],
[pq.A/pq.V, pq.A/pq.V, 'S', 'mS', 'uS', 'nS'],
[None, None, None, None, u'µS', None]):
if type(definition) is str:
definition = lastdefinition / 1000
if not hasattr(pq, symbol):
setattr(pq, symbol, pq.UnitQuantity(
prefix + 'siemens',
definition,
symbol=symbol,
u_symbol=u_symbol))
lastdefinition = definition
def potential_of_plane(z_j, z_i=0.*pq.m,
C_i=1*pq.A/pq.m**2,
sigma=0.3*pq.S/pq.m):
'''
Return potential of infinite horizontal plane with constant
current source density at a vertical offset z_j.
Arguments
---------
z_j : float*pq.m
distance perpendicular to source layer
z_i : float*pq.m
z-position of source layer
C_i : float*pq.A/pq.m**2
current source density on circular disk in units of charge per area
sigma : float*pq.S/pq.m
conductivity of medium in units of S/m
Notes
-----
The potential is 0 at the plane, as the potential goes to infinity for
large distances
'''
try:
assert(z_j.units == z_i.units)
except AssertionError as ae:
print('units of z_j ({}) and z_i ({}) not equal'.format(z_j.units,
z_i.units))
raise ae
return -C_i/(2*sigma)*abs(z_j-z_i).simplified
def potential_of_disk(z_j,
z_i=0.*pq.m,
C_i=1*pq.A/pq.m**2,
R_i=1E-3*pq.m,
sigma=0.3*pq.S/pq.m):
'''
Return potential of circular disk in horizontal plane with constant
current source density at a vertical offset z_j.
Arguments
---------
z_j : float*pq.m
distance perpendicular to center of disk
z_i : float*pq.m
z_j-position of source disk
C_i : float*pq.A/pq.m**2
current source density on circular disk in units of charge per area
R_i : float*pq.m
radius of disk source
sigma : float*pq.S/pq.m
conductivity of medium in units of S/m
'''
try:
assert(z_j.units == z_i.units == R_i.units)
except AssertionError as ae:
print('units of z_j ({}), z_i ({}) and R_i ({}) not equal'.format(
z_j.units, z_i.units, R_i.units))
raise ae
return C_i/(2*sigma)*(np.sqrt((z_j-z_i)**2 + R_i**2) - abs(z_j-z_i)).simplified
def potential_of_cylinder(z_j,
z_i=0.*pq.m,
C_i=1*pq.A/pq.m**3,
R_i=1E-3*pq.m,
h_i=0.1*pq.m,
sigma=0.3*pq.S/pq.m,
):
'''
Return potential of cylinder in horizontal plane with constant homogeneous
current source density at a vertical offset z_j.
Arguments
---------
z_j : float*pq.m
distance perpendicular to center of disk
z_i : float*pq.m
z-position of center of source cylinder
h_i : float*pq.m
thickness of cylinder
C_i : float*pq.A/pq.m**3
current source density on circular disk in units of charge per area
R_i : float*pq.m
radius of disk source
sigma : float*pq.S/pq.m
conductivity of medium in units of S/m
Notes
-----
Sympy can't deal with eq. 11 in Pettersen et al 2006, J neurosci Meth,
so we numerically evaluate it in this function.
Tested with
>>>from sympy import *
>>>C_i, z_i, h, z_j, z_j, sigma, R = symbols('C_i z_i h z z_j sigma R')
>>>C_i*integrate(1/(2*sigma)*(sqrt((z-z_j)**2 + R**2) - abs(z-z_j)), (z, z_i-h/2, z_i+h/2))
'''
try:
assert(z_j.units == z_i.units == R_i.units == h_i.units)
except AssertionError as ae:
print('units of z_j ({}), z_i ({}), R_i ({}) and h ({}) not equal'.format(
z_j.units, z_i.units, R_i.units, h_i.units))
raise ae
#speed up tests by stripping units
_sigma = float(sigma)
_R_i = float(R_i)
_z_i = float(z_i)
_z_j = float(z_j)
#evaluate integrand using quad
def integrand(z):
return 1/(2*_sigma)*(np.sqrt((z-_z_j)**2 + _R_i**2) - abs(z-_z_j))
phi_j, abserr = C_i*si.quad(integrand, z_i-h_i/2, z_i+h_i/2)
return (phi_j * z_i.units**2 / sigma.units)
def get_lfp_of_planes(z_j=np.arange(21)*1E-4*pq.m,
z_i=np.array([8E-4, 10E-4, 12E-4])*pq.m,
C_i=np.array([-.5, 1., -.5])*pq.A/pq.m**2,
sigma=0.3*pq.S/pq.m,
plot=True):
'''
Compute the lfp of spatially separated planes with given current source
density
'''
phi_j = np.zeros(z_j.size)*pq.V
for i, (zi, Ci) in enumerate(zip(z_i, C_i)):
for j, zj in enumerate(z_j):
phi_j[j] += potential_of_plane(zj, zi, Ci, sigma)
#test plot
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(121)
ax = plt.gca()
ax.plot(np.zeros(z_j.size), z_j, 'r-o')
for i, C in enumerate(C_i):
ax.plot((0, C), (z_i[i], z_i[i]), 'r-o')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_ylabel('z_j ({})'.format(z_j.units))
ax.set_xlabel('C_i ({})'.format(C_i.units))
ax.set_title('planar CSD')
plt.subplot(122)
ax = plt.gca()
ax.plot(phi_j, z_j, 'r-o')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_xlabel('phi_j ({})'.format(phi_j.units))
ax.set_title('LFP')
return phi_j, C_i
def get_lfp_of_disks(z_j=np.arange(21)*1E-4*pq.m,
z_i=np.array([8E-4, 10E-4, 12E-4])*pq.m,
C_i=np.array([-.5, 1., -.5])*pq.A/pq.m**2,
R_i = np.array([1, 1, 1])*1E-3*pq.m,
sigma=0.3*pq.S/pq.m,
plot=True):
'''
Compute the lfp of spatially separated disks with a given
current source density
'''
phi_j = np.zeros(z_j.size)*pq.V
for i, (zi, Ci, Ri) in enumerate(zip(z_i, C_i, R_i)):
for j, zj in enumerate(z_j):
phi_j[j] += potential_of_disk(zj, zi, Ci, Ri, sigma)
#test plot
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(121)
ax = plt.gca()
ax.plot(np.zeros(z_j.size), z_j, 'r-o')
for i, C in enumerate(C_i):
ax.plot((0, C), (z_i[i], z_i[i]), 'r-o')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_ylabel('z_j ({})'.format(z_j.units))
ax.set_xlabel('C_i ({})'.format(C_i.units))
ax.set_title('disk CSD\nR={}'.format(R_i))
plt.subplot(122)
ax = plt.gca()
ax.plot(phi_j, z_j, 'r-o')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_xlabel('phi_j ({})'.format(phi_j.units))
ax.set_title('LFP')
return phi_j, C_i
def get_lfp_of_cylinders(z_j=np.arange(21)*1E-4*pq.m,
z_i=np.array([8E-4, 10E-4, 12E-4])*pq.m,
C_i=np.array([-.5, 1., -.5])*pq.A/pq.m**3,
R_i = np.array([1, 1, 1])*1E-3*pq.m,
h_i=np.array([1, 1, 1])*1E-4*pq.m,
sigma=0.3*pq.S/pq.m,
plot=True):
'''
Compute the lfp of spatially separated disks with a given
current source density
'''
phi_j = np.zeros(z_j.size)*pq.V
for i, (zi, Ci, Ri, hi) in enumerate(zip(z_i, C_i, R_i, h_i)):
for j, zj in enumerate(z_j):
phi_j[j] += potential_of_cylinder(zj, zi, Ci, Ri, hi, sigma)
#test plot
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(121)
ax = plt.gca()
ax.plot(np.zeros(z_j.size), z_j, 'r-o')
ax.barh(np.asarray(z_i-h_i/2),
np.asarray(C_i),
np.asarray(h_i), color='r')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_ylabel('z_j ({})'.format(z_j.units))
ax.set_xlabel('C_i ({})'.format(C_i.units))
ax.set_title('cylinder CSD\nR={}'.format(R_i))
plt.subplot(122)
ax = plt.gca()
ax.plot(phi_j, z_j, 'r-o')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_xlabel('phi_j ({})'.format(phi_j.units))
ax.set_title('LFP')
return phi_j, C_i
class TestICSD(unittest.TestCase):
'''
Set of test functions for each CSD estimation method comparing
estimate to LFPs calculated with known ground truth CSD
'''
def test_StandardCSD_00(self):
'''test using standard SI units'''
#set some parameters for ground truth csd and csd estimates.
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2
#uniform conductivity
sigma = 0.3*pq.S/pq.m
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot)
std_input = {
'lfp' : phi_j,
'coord_electrode' : z_j,
'sigma' : sigma,
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
std_csd = icsd.StandardCSD(**std_input)
csd = std_csd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StandardCSD_01(self):
'''test using non-standard SI units 1'''
#set some parameters for ground truth csd and csd estimates.
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5])*1E3*pq.A/pq.m**2
#uniform conductivity
sigma = 0.3*pq.S/pq.m
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot)
std_input = {
'lfp' : phi_j*1E3*pq.mV/pq.V,
'coord_electrode' : z_j,
'sigma' : sigma,
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
std_csd = icsd.StandardCSD(**std_input)
csd = std_csd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StandardCSD_02(self):
'''test using non-standard SI units 2'''
#set some parameters for ground truth csd and csd estimates.
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2
#uniform conductivity
sigma = 0.3*pq.S/pq.m
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot)
std_input = {
'lfp' : phi_j,
'coord_electrode' : z_j*1E3*pq.mm/pq.m,
'sigma' : sigma,
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
std_csd = icsd.StandardCSD(**std_input)
csd = std_csd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StandardCSD_03(self):
'''test using non-standard SI units 3'''
#set some parameters for ground truth csd and csd estimates.
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2
#uniform conductivity
sigma = 0.3*pq.mS/pq.m
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot)
std_input = {
'lfp' : phi_j,
'coord_electrode' : z_j,
'sigma' : sigma*1E3*pq.mS/pq.S,
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
std_csd = icsd.StandardCSD(**std_input)
csd = std_csd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_DeltaiCSD_00(self):
'''test using standard SI units'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma,
plot)
delta_input = {
'lfp' : phi_j,
'coord_electrode' : z_j,
'diam' : R_i.mean()*2, # source diameter
'sigma' : sigma, # extracellular conductivity
'sigma_top' : sigma_top, # conductivity on top of cortex
'f_type' : 'gaussian', # gaussian filter
'f_order' : (3, 1), # 3-point filter, sigma = 1.
}
delta_icsd = icsd.DeltaiCSD(**delta_input)
csd = delta_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_DeltaiCSD_01(self):
'''test using non-standard SI units 1'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma,
plot)
delta_input = {
'lfp' : phi_j*1E3*pq.mV/pq.V,
'coord_electrode' : z_j,
'diam' : R_i.mean()*2, # source diameter
'sigma' : sigma, # extracellular conductivity
'sigma_top' : sigma_top, # conductivity on top of cortex
'f_type' : 'gaussian', # gaussian filter
'f_order' : (3, 1), # 3-point filter, sigma = 1.
}
delta_icsd = icsd.DeltaiCSD(**delta_input)
csd = delta_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_DeltaiCSD_02(self):
'''test using non-standard SI units 2'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma,
plot)
delta_input = {
'lfp' : phi_j,
'coord_electrode' : z_j*1E3*pq.mm/pq.m,
'diam' : R_i.mean()*2*1E3*pq.mm/pq.m, # source diameter
'sigma' : sigma, # extracellular conductivity
'sigma_top' : sigma_top, # conductivity on top of cortex
'f_type' : 'gaussian', # gaussian filter
'f_order' : (3, 1), # 3-point filter, sigma = 1.
}
delta_icsd = icsd.DeltaiCSD(**delta_input)
csd = delta_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_DeltaiCSD_03(self):
'''test using non-standard SI units 3'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma,
plot)
delta_input = {
'lfp' : phi_j,
'coord_electrode' : z_j,
'diam' : R_i.mean()*2, # source diameter
'sigma' : sigma*1E3*pq.mS/pq.S, # extracellular conductivity
'sigma_top' : sigma_top*1E3*pq.mS/pq.S, # conductivity on top of cortex
'f_type' : 'gaussian', # gaussian filter
'f_order' : (3, 1), # 3-point filter, sigma = 1.
}
delta_icsd = icsd.DeltaiCSD(**delta_input)
csd = delta_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_DeltaiCSD_04(self):
'''test non-continous z_j array'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**2
#source radius (delta, step)
R_i = np.ones(z_j.size)*1E-3*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma,
plot)
inds = np.delete(np.arange(21), 5)
delta_input = {
'lfp' : phi_j[inds],
'coord_electrode' : z_j[inds],
'diam' : R_i[inds]*2, # source diameter
'sigma' : sigma, # extracellular conductivity
'sigma_top' : sigma_top, # conductivity on top of cortex
'f_type' : 'gaussian', # gaussian filter
'f_order' : (3, 1), # 3-point filter, sigma = 1.
}
delta_icsd = icsd.DeltaiCSD(**delta_input)
csd = delta_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i[inds], csd)
def test_StepiCSD_units_00(self):
'''test using standard SI units'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#source height (cylinder)
h_i = np.ones(z_i.size)*1E-4*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i,
sigma, plot)
step_input = {
'lfp' : phi_j,
'coord_electrode' : z_j,
'diam' : R_i.mean()*2,
'sigma' : sigma,
'sigma_top' : sigma,
'h' : h_i,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
step_icsd = icsd.StepiCSD(**step_input)
csd = step_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StepiCSD_01(self):
'''test using non-standard SI units 1'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#source height (cylinder)
h_i = np.ones(z_i.size)*1E-4*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i,
sigma, plot)
step_input = {
'lfp' : phi_j*1E3*pq.mV/pq.V,
'coord_electrode' : z_j,
'diam' : R_i.mean()*2,
'sigma' : sigma,
'sigma_top' : sigma,
'h' : h_i,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
step_icsd = icsd.StepiCSD(**step_input)
csd = step_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StepiCSD_02(self):
'''test using non-standard SI units 2'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#source height (cylinder)
h_i = np.ones(z_i.size)*1E-4*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i,
sigma, plot)
step_input = {
'lfp' : phi_j,
'coord_electrode' : z_j*1E3*pq.mm/pq.m,
'diam' : R_i.mean()*2*1E3*pq.mm/pq.m,
'sigma' : sigma,
'sigma_top' : sigma,
'h' : h_i*1E3*pq.mm/pq.m,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
step_icsd = icsd.StepiCSD(**step_input)
csd = step_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StepiCSD_03(self):
'''test using non-standard SI units 3'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#source height (cylinder)
h_i = np.ones(z_i.size)*1E-4*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i,
sigma, plot)
step_input = {
'lfp' : phi_j,
'coord_electrode' : z_j,
'diam' : R_i.mean()*2,
'sigma' : sigma*1E3*pq.mS/pq.S,
'sigma_top' : sigma*1E3*pq.mS/pq.S,
'h' : h_i,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
step_icsd = icsd.StepiCSD(**step_input)
csd = step_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StepiCSD_units_04(self):
'''test non-continous z_j array'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#source height (cylinder)
h_i = np.ones(z_i.size)*1E-4*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i,
sigma, plot)
inds = np.delete(np.arange(21), 5)
step_input = {
'lfp' : phi_j[inds],
'coord_electrode' : z_j[inds],
'diam' : R_i[inds]*2,
'sigma' : sigma,
'sigma_top' : sigma,
'h' : h_i[inds],
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
step_icsd = icsd.StepiCSD(**step_input)
csd = step_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i[inds], csd)
def test_SplineiCSD_00(self):
'''test using standard SI units'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#source height (cylinder)
h_i = np.ones(z_i.size)*1E-4*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#construct interpolators, spline method assume underlying source
#pattern generating LFPs that are cubic spline interpolates between
#contacts so we generate CSD data relying on the same assumption
f_C = interp1d(z_i, C_i, kind='cubic')
f_R = interp1d(z_i, R_i)
num_steps = 201
z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units
C_i_i = f_C(np.asarray(z_i_i))*C_i.units
R_i_i = f_R(z_i_i)*R_i.units
h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min()
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
sigma, plot)
spline_input = {
'lfp' : phi_j,
'coord_electrode' : z_j,
'diam' : R_i*2,
'sigma' : sigma,
'sigma_top' : sigma,
'num_steps' : num_steps,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
spline_icsd = icsd.SplineiCSD(**spline_input)
csd = spline_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd, decimal=3)
def test_SplineiCSD_01(self):
'''test using standard SI units, deep electrode coordinates'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(10, 31)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#source height (cylinder)
h_i = np.ones(z_i.size)*1E-4*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#construct interpolators, spline method assume underlying source
#pattern generating LFPs that are cubic spline interpolates between
#contacts so we generate CSD data relying on the same assumption
f_C = interp1d(z_i, C_i, kind='cubic')
f_R = interp1d(z_i, R_i)
num_steps = 201
z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units
C_i_i = f_C(np.asarray(z_i_i))*C_i.units
R_i_i = f_R(z_i_i)*R_i.units
h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min()
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
sigma, plot)
spline_input = {
'lfp' : phi_j,
'coord_electrode' : z_j,
'diam' : R_i*2,
'sigma' : sigma,
'sigma_top' : sigma,
'num_steps' : num_steps,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
spline_icsd = icsd.SplineiCSD(**spline_input)
csd = spline_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd, decimal=3)
def test_SplineiCSD_02(self):
'''test using non-standard SI units'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#source height (cylinder)
h_i = np.ones(z_i.size)*1E-4*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#construct interpolators, spline method assume underlying source
#pattern generating LFPs that are cubic spline interpolates between
#contacts so we generate CSD data relying on the same assumption
f_C = interp1d(z_i, C_i, kind='cubic')
f_R = interp1d(z_i, R_i)
num_steps = 201
z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units
C_i_i = f_C(np.asarray(z_i_i))*C_i.units
R_i_i = f_R(z_i_i)*R_i.units
h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min()
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
sigma, plot)
spline_input = {
'lfp' : phi_j*1E3*pq.mV/pq.V,
'coord_electrode' : z_j,
'diam' : R_i*2,
'sigma' : sigma,
'sigma_top' : sigma,
'num_steps' : num_steps,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
spline_icsd = icsd.SplineiCSD(**spline_input)
csd = spline_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd, decimal=3)
def test_SplineiCSD_03(self):
'''test using standard SI units'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#source height (cylinder)
h_i = np.ones(z_i.size)*1E-4*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#construct interpolators, spline method assume underlying source
#pattern generating LFPs that are cubic spline interpolates between
#contacts so we generate CSD data relying on the same assumption
f_C = interp1d(z_i, C_i, kind='cubic')
f_R = interp1d(z_i, R_i)
num_steps = 201
z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units
C_i_i = f_C(np.asarray(z_i_i))*C_i.units
R_i_i = f_R(z_i_i)*R_i.units
h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min()
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
sigma, plot)
spline_input = {
'lfp' : phi_j,
'coord_electrode' : z_j*1E3*pq.mm/pq.m,
'diam' : R_i*2*1E3*pq.mm/pq.m,
'sigma' : sigma,
'sigma_top' : sigma,
'num_steps' : num_steps,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
spline_icsd = icsd.SplineiCSD(**spline_input)
csd = spline_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd, decimal=3)
def test_SplineiCSD_04(self):
'''test using standard SI units'''
#set some parameters for ground truth csd and csd estimates., e.g.,
#we will use same source diameter as in ground truth
#contact point coordinates
z_j = np.arange(21)*1E-4*pq.m
#source coordinates
z_i = z_j
#current source density magnitude
C_i = np.zeros(z_i.size)*pq.A/pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
#source radius (delta, step)
R_i = np.ones(z_i.size)*1E-3*pq.m
#source height (cylinder)
h_i = np.ones(z_i.size)*1E-4*pq.m
#conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3*pq.S/pq.m
sigma_top = sigma
#construct interpolators, spline method assume underlying source
#pattern generating LFPs that are cubic spline interpolates between
#contacts so we generate CSD data relying on the same assumption
f_C = interp1d(z_i, C_i, kind='cubic')
f_R = interp1d(z_i, R_i)
num_steps = 201
z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units
C_i_i = f_C(np.asarray(z_i_i))*C_i.units
R_i_i = f_R(z_i_i)*R_i.units
h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min()
#flag for debug plots
plot = False
#get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
sigma, plot)
spline_input = {
'lfp' : phi_j,
'coord_electrode' : z_j,
'diam' : R_i*2,
'sigma' : sigma*1E3*pq.mS/pq.S,
'sigma_top' : sigma*1E3*pq.mS/pq.S,
'num_steps' : num_steps,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
spline_icsd = icsd.SplineiCSD(**spline_input)
csd = spline_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd, decimal=3)
#def suite(verbosity=2):
# '''
# Run unittests for the CSD toolbox
#
#
# Arguments
# ---------
# verbosity : int
# verbosity level
#
# '''
# suite = unittest.TestLoader().loadTestsFromTestCase(TestICSD)
# unittest.TextTestRunner(verbosity=verbosity).run(suite)
#
#
#
#if __name__ == '__main__':
# suite()
def suite():
suite = unittest.makeSuite(TestICSD, 'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
|
leorochael/odoo
|
refs/heads/8.0
|
addons/crm_helpdesk/__init__.py
|
442
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_helpdesk
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
smarter-travel-media/warthog
|
refs/heads/master
|
test/test_cli.py
|
1
|
# -*- coding: utf-8 -*-
from click.testing import CliRunner
import pytest
import click
import requests
import warthog.cli
import warthog.exceptions
def test_main_no_command():
runner = CliRunner()
result = runner.invoke(warthog.cli.main)
assert 0 == result.exit_code, "Expected zero exit code"
def test_main_bad_command():
runner = CliRunner()
result = runner.invoke(warthog.cli.main, args=['foo'])
assert 0 != result.exit_code, "Expected non-zero exit code"
def test_main_default_config():
runner = CliRunner()
result = runner.invoke(warthog.cli.main, args=['default-config'])
assert 0 == result.exit_code, 'Expected zero exit code'
assert '[warthog]' in result.output, 'Expected "[warthog]" header in default config'
def test_main_config_path():
runner = CliRunner()
result = runner.invoke(warthog.cli.main, args=['config-path'])
assert 0 == result.exit_code, 'Expected zero exit code'
assert len(result.output), 'Expected at least some output'
def test_error_wrapper_no_such_node():
@warthog.cli.error_wrapper
def my_test_func(*_):
raise warthog.exceptions.WarthogNoSuchNodeError(
'No such server!', server='app1.example.com')
with pytest.raises(click.BadParameter):
my_test_func('something')
def test_error_wrapper_auth_failure():
@warthog.cli.error_wrapper
def my_test_func(*_):
raise warthog.exceptions.WarthogAuthFailureError(
'Authentication failed for some reason!')
with pytest.raises(click.ClickException):
my_test_func('something')
def test_error_wrapper_connection_error():
@warthog.cli.error_wrapper
def my_test_func(*_):
raise requests.ConnectionError('No such LB host!')
with pytest.raises(click.ClickException):
my_test_func('something')
|
jayme-github/headphones
|
refs/heads/master
|
lib/cherrypy/_cpserver.py
|
58
|
"""Manage HTTP servers with CherryPy."""
import warnings
import cherrypy
from cherrypy.lib import attributes
from cherrypy._cpcompat import basestring, py3k
# We import * because we want to export check_port
# et al as attributes of this module.
from cherrypy.process.servers import *
class Server(ServerAdapter):
"""An adapter for an HTTP server.
You can set attributes (like socket_host and socket_port)
on *this* object (which is probably cherrypy.server), and call
quickstart. For example::
cherrypy.server.socket_port = 80
cherrypy.quickstart()
"""
socket_port = 8080
"""The TCP port on which to listen for connections."""
_socket_host = '127.0.0.1'
def _get_socket_host(self):
return self._socket_host
def _set_socket_host(self, value):
if value == '':
raise ValueError("The empty string ('') is not an allowed value. "
"Use '0.0.0.0' instead to listen on all active "
"interfaces (INADDR_ANY).")
self._socket_host = value
socket_host = property(
_get_socket_host,
_set_socket_host,
doc="""The hostname or IP address on which to listen for connections.
Host values may be any IPv4 or IPv6 address, or any valid hostname.
The string 'localhost' is a synonym for '127.0.0.1' (or '::1', if
your hosts file prefers IPv6). The string '0.0.0.0' is a special
IPv4 entry meaning "any active interface" (INADDR_ANY), and '::'
is the similar IN6ADDR_ANY for IPv6. The empty string or None are
not allowed.""")
socket_file = None
"""If given, the name of the UNIX socket to use instead of TCP/IP.
When this option is not None, the `socket_host` and `socket_port` options
are ignored."""
socket_queue_size = 5
"""The 'backlog' argument to socket.listen(); specifies the maximum number
of queued connections (default 5)."""
socket_timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
accepted_queue_size = -1
"""The maximum number of requests which will be queued up before
the server refuses to accept it (default -1, meaning no limit)."""
accepted_queue_timeout = 10
"""The timeout in seconds for attempting to add a request to the
queue when the queue is full (default 10)."""
shutdown_timeout = 5
"""The time to wait for HTTP worker threads to clean up."""
protocol_version = 'HTTP/1.1'
"""The version string to write in the Status-Line of all HTTP responses,
for example, "HTTP/1.1" (the default). Depending on the HTTP server used,
this should also limit the supported features used in the response."""
thread_pool = 10
"""The number of worker threads to start up in the pool."""
thread_pool_max = -1
"""The maximum size of the worker-thread pool. Use -1 to indicate no limit.
"""
max_request_header_size = 500 * 1024
"""The maximum number of bytes allowable in the request headers.
If exceeded, the HTTP server should return "413 Request Entity Too Large".
"""
max_request_body_size = 100 * 1024 * 1024
"""The maximum number of bytes allowable in the request body. If exceeded,
the HTTP server should return "413 Request Entity Too Large"."""
instance = None
"""If not None, this should be an HTTP server instance (such as
CPWSGIServer) which cherrypy.server will control. Use this when you need
more control over object instantiation than is available in the various
configuration options."""
ssl_context = None
"""When using PyOpenSSL, an instance of SSL.Context."""
ssl_certificate = None
"""The filename of the SSL certificate to use."""
ssl_certificate_chain = None
"""When using PyOpenSSL, the certificate chain to pass to
Context.load_verify_locations."""
ssl_private_key = None
"""The filename of the private key to use with SSL."""
if py3k:
ssl_module = 'builtin'
"""The name of a registered SSL adaptation module to use with
the builtin WSGI server. Builtin options are: 'builtin' (to
use the SSL library built into recent versions of Python).
You may also register your own classes in the
wsgiserver.ssl_adapters dict."""
else:
ssl_module = 'pyopenssl'
"""The name of a registered SSL adaptation module to use with the
builtin WSGI server. Builtin options are 'builtin' (to use the SSL
library built into recent versions of Python) and 'pyopenssl' (to
use the PyOpenSSL project, which you must install separately). You
may also register your own classes in the wsgiserver.ssl_adapters
dict."""
statistics = False
"""Turns statistics-gathering on or off for aware HTTP servers."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
wsgi_version = (1, 0)
"""The WSGI version tuple to use with the builtin WSGI server.
The provided options are (1, 0) [which includes support for PEP 3333,
which declares it covers WSGI version 1.0.1 but still mandates the
wsgi.version (1, 0)] and ('u', 0), an experimental unicode version.
You may create and register your own experimental versions of the WSGI
protocol by adding custom classes to the wsgiserver.wsgi_gateways dict."""
def __init__(self):
self.bus = cherrypy.engine
self.httpserver = None
self.interrupt = None
self.running = False
def httpserver_from_self(self, httpserver=None):
"""Return a (httpserver, bind_addr) pair based on self attributes."""
if httpserver is None:
httpserver = self.instance
if httpserver is None:
from cherrypy import _cpwsgi_server
httpserver = _cpwsgi_server.CPWSGIServer(self)
if isinstance(httpserver, basestring):
# Is anyone using this? Can I add an arg?
httpserver = attributes(httpserver)(self)
return httpserver, self.bind_addr
def start(self):
"""Start the HTTP server."""
if not self.httpserver:
self.httpserver, self.bind_addr = self.httpserver_from_self()
ServerAdapter.start(self)
start.priority = 75
def _get_bind_addr(self):
if self.socket_file:
return self.socket_file
if self.socket_host is None and self.socket_port is None:
return None
return (self.socket_host, self.socket_port)
def _set_bind_addr(self, value):
if value is None:
self.socket_file = None
self.socket_host = None
self.socket_port = None
elif isinstance(value, basestring):
self.socket_file = value
self.socket_host = None
self.socket_port = None
else:
try:
self.socket_host, self.socket_port = value
self.socket_file = None
except ValueError:
raise ValueError("bind_addr must be a (host, port) tuple "
"(for TCP sockets) or a string (for Unix "
"domain sockets), not %r" % value)
bind_addr = property(
_get_bind_addr,
_set_bind_addr,
doc='A (host, port) tuple for TCP sockets or '
'a str for Unix domain sockets.')
def base(self):
"""Return the base (scheme://host[:port] or sock file) for this server.
"""
if self.socket_file:
return self.socket_file
host = self.socket_host
if host in ('0.0.0.0', '::'):
# 0.0.0.0 is INADDR_ANY and :: is IN6ADDR_ANY.
# Look up the host name, which should be the
# safest thing to spit out in a URL.
import socket
host = socket.gethostname()
port = self.socket_port
if self.ssl_certificate:
scheme = "https"
if port != 443:
host += ":%s" % port
else:
scheme = "http"
if port != 80:
host += ":%s" % port
return "%s://%s" % (scheme, host)
|
msumit/qds-sdk-py
|
refs/heads/master
|
tests/test_role.py
|
6
|
import sys
import os
if sys.version_info > (2, 7, 0):
import unittest
else:
import unittest2 as unittest
from mock import *
sys.path.append(os.path.join(os.path.dirname(__file__), '../bin'))
import qds
from qds_sdk.connection import Connection
from test_base import print_command
from test_base import QdsCliTestCase
class TestRoleCheck(QdsCliTestCase):
def test_list(self):
sys.argv = ['qds.py', 'role', 'list']
print_command()
Connection._api_call = Mock(return_value={'roles': []})
qds.main()
Connection._api_call.assert_called_with("GET", "roles", params=None)
def test_view(self):
sys.argv = ['qds.py', 'role', 'view', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with(
"GET", "roles/123", params=None)
def test_view_neg(self):
sys.argv = ['qds.py', 'role', 'view']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_update_name(self):
sys.argv = ['qds.py', 'role', 'update', '123', '--name', 'test']
print_command()
Connection._api_call = Mock(return_value={'roles': []})
qds.main()
Connection._api_call.assert_called_with(
"PUT", "roles/123", {'name': 'test'})
def test_update_policy(self):
sys.argv = ['qds.py', 'role', 'update', '123', '--policy',
'[{\"access\":\"allow\", \"resource\": \"all\"}]']
print_command()
Connection._api_call = Mock(return_value={'roles': []})
qds.main()
Connection._api_call.assert_called_with("PUT", "roles/123", {
'policies': '[{\"access\":\"allow\", \"resource\": \"all\"}]'})
def test_delete(self):
sys.argv = ['qds.py', 'role', 'delete', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("DELETE", "roles/123", None)
def test_delete_neg(self):
sys.argv = ['qds.py', 'role', 'delete']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_duplicate(self):
sys.argv = ['qds.py', 'role', 'duplicate', '123']
print_command()
Connection._api_call = Mock(return_value={'roles': []})
qds.main()
Connection._api_call.assert_called_with(
"POST", "roles/123/duplicate", {})
def test_duplicate_with_name(self):
sys.argv = ['qds.py', 'role', 'duplicate', '123', '--name', 'duplicate']
print_command()
Connection._api_call = Mock(return_value={'roles': []})
qds.main()
Connection._api_call.assert_called_with(
"POST", "roles/123/duplicate", {'name': 'duplicate'})
def test_duplicate_with_policy(self):
sys.argv = ['qds.py', 'role', 'duplicate', '123', '--policy',
'[{\"access\":\"allow\", \"resource\": \"all\"}]']
print_command()
Connection._api_call = Mock(return_value={'roles': []})
qds.main()
Connection._api_call.assert_called_with(
"POST", "roles/123/duplicate",
{'policy': '[{\"access\":\"allow\", \"resource\": \"all\"}]'})
def test_duplicate_neg(self):
sys.argv = ['qds.py', 'role', 'duplicate']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_assign_role(self):
sys.argv = ['qds.py', 'role', 'assign-role', '123', '--group-id', '456']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with(
"PUT", "groups/456/roles/123/assign", None)
def test_assign_role_neg(self):
sys.argv = ['qds.py', 'role', 'assign-role', '123', '--group-id']
print_command()
Connection._api_call = Mock(return_value={})
with self.assertRaises(SystemExit):
qds.main()
def test_unassign_role(self):
sys.argv = ['qds.py', 'role', 'unassign-role', '123', "--group-id",
"456"]
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with(
"PUT", "groups/456/roles/123/unassign", None)
def test_unassign_role_neg(self):
sys.argv = ['qds.py', 'role', 'unassign-role', '123', "--group-id"]
print_command()
Connection._api_call = Mock(return_value={})
with self.assertRaises(SystemExit):
qds.main()
def test_list_groups(self):
sys.argv = ['qds.py', 'role', 'list-groups', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with(
"GET", "roles/123/groups", params=None)
def test_list_groups_neg(self):
sys.argv = ['qds.py', 'role', 'list-groups']
print_command()
Connection._api_call = Mock(return_value={})
with self.assertRaises(SystemExit):
qds.main()
if __name__ == '__main__':
unittest.main()
|
michalliu/OpenWrt-Firefly-Libraries
|
refs/heads/master
|
staging_dir/host/lib/python3.4/test/test_bz2.py
|
8
|
from test import support
from test.support import bigmemtest, _4G
import unittest
from io import BytesIO
import os
import pickle
import random
import subprocess
import sys
from test.support import unlink
try:
import threading
except ImportError:
threading = None
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
def setUp(self):
self.filename = support.TESTFN
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
if sys.platform == "win32":
# bunzip2 isn't available to run on Windows.
def decompress(self, data):
return bz2.decompress(data)
else:
def decompress(self, data):
pop = subprocess.Popen("bunzip2", shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pop.stdin.write(data)
pop.stdin.close()
ret = pop.stdout.read()
pop.stdout.close()
if pop.wait() != 0:
ret = bz2.decompress(data)
return ret
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, "/dev/null", "z")
self.assertRaises(ValueError, BZ2File, "/dev/null", "rx")
self.assertRaises(ValueError, BZ2File, "/dev/null", "rbt")
self.assertRaises(ValueError, BZ2File, "/dev/null", compresslevel=0)
self.assertRaises(ValueError, BZ2File, "/dev/null", compresslevel=10)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = bz2._BUFFER_SIZE
bz2._BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
bz2._BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
@unittest.skipUnless(threading, 'Threading required for this test.')
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
def testWithoutThreading(self):
module = support.import_fresh_module("bz2", blocked=("threading",))
with module.BZ2File(self.filename, "wb") as f:
f.write(b"abc")
with module.BZ2File(self.filename, "rb") as f:
self.assertEqual(f.read(), b"abc")
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(self.decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
|
huaxz1986/git_book
|
refs/heads/master
|
chapters/PreProcessing/onehot_encode.py
|
1
|
# -*- coding: utf-8 -*-
"""
数据预处理
~~~~~~~~~~~~~~~~
独热码编码
:copyright: (c) 2016 by the huaxz1986.
:license: lgpl-3.0, see LICENSE for more details.
"""
from sklearn.preprocessing import OneHotEncoder
def test_OneHotEncoder():
'''
测试 OneHotEncoder 的用法
:return: None
'''
X=[ [1,2,3,4,5],
[5,4,3,2,1],
[3,3,3,3,3,],
[1,1,1,1,1] ]
print("before transform:",X)
encoder=OneHotEncoder(sparse=False)
encoder.fit(X)
print("active_features_:",encoder.active_features_)
print("feature_indices_:",encoder.feature_indices_)
print("n_values_:",encoder.n_values_)
print("after transform:",encoder.transform( [[1,2,3,4,5]]))
if __name__=='__main__':
test_OneHotEncoder() # 调用 test_OneHotEncoder
|
yongshengwang/builthue
|
refs/heads/master
|
desktop/core/ext-py/Pygments-1.3.1/pygments/__init__.py
|
56
|
# -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://dev.pocoo.org/hg/pygments-main/archive/tip.tar.gz#egg=Pygments-dev
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
__version__ = '1.3.1'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
import sys
from pygments.util import StringIO, BytesIO
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method get_tokens' in err.args[0]:
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None):
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
#print formatter, 'using', formatter.encoding
realoutfile = formatter.encoding and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method format' in err.args[0]:
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
if __name__ == '__main__':
from pygments.cmdline import main
sys.exit(main(sys.argv))
|
SuperDARNCanada/placeholderOS
|
refs/heads/master
|
experiments/testing_archive/test_txbw_not_divisible.py
|
2
|
#!/usr/bin/python
# write an experiment that raises an exception
import sys
import os
BOREALISPATH = os.environ['BOREALISPATH']
sys.path.append(BOREALISPATH)
import experiments.superdarn_common_fields as scf
from experiment_prototype.experiment_prototype import ExperimentPrototype
from utils.experiment_options.experimentoptions import ExperimentOptions as eo
from experiment_prototype.decimation_scheme.decimation_scheme import \
DecimationScheme, DecimationStage, create_firwin_filter_by_attenuation
class TestExperiment(ExperimentPrototype):
def __init__(self):
cpid = 1
# should fail due to not being integer divisor of usrp master clock rate
super(TestExperiment, self).__init__(cpid, tx_bandwidth=3.14159e6)
if scf.IS_FORWARD_RADAR:
beams_to_use = scf.STD_16_FORWARD_BEAM_ORDER
else:
beams_to_use = scf.STD_16_REVERSE_BEAM_ORDER
if scf.opts.site_id in ["cly", "rkn", "inv"]:
num_ranges = scf.POLARDARN_NUM_RANGES
if scf.opts.site_id in ["sas", "pgr"]:
num_ranges = scf.STD_NUM_RANGES
slice_1 = { # slice_id = 0, there is only one slice.
"pulse_sequence": scf.SEQUENCE_7P,
"tau_spacing": scf.TAU_SPACING_7P,
"pulse_len": scf.PULSE_LEN_45KM,
"num_ranges": num_ranges,
"first_range": scf.STD_FIRST_RANGE,
"intt": 3500, # duration of an integration, in ms
"beam_angle": scf.STD_16_BEAM_ANGLE,
"beam_order": beams_to_use,
"scanbound": [i * 3.5 for i in range(len(beams_to_use))], #1 min scan
"txfreq" : scf.COMMON_MODE_FREQ_1, #kHz
"acf": True,
"xcf": True, # cross-correlation processing
"acfint": True, # interferometer acfs
}
self.add_slice(slice_1)
|
djbaldey/django
|
refs/heads/master
|
tests/utils_tests/test_autoreload.py
|
139
|
import os
import tempfile
from importlib import import_module
from django import conf
from django.contrib import admin
from django.test import SimpleTestCase, override_settings
from django.test.utils import extend_sys_path
from django.utils._os import npath, upath
from django.utils.autoreload import gen_filenames
LOCALE_PATH = os.path.join(os.path.dirname(__file__), 'locale')
class TestFilenameGenerator(SimpleTestCase):
def setUp(self):
# Empty cached variables
from django.utils import autoreload
autoreload._cached_modules = set()
autoreload._cached_filenames = []
def test_django_locales(self):
"""
Test that gen_filenames() also yields the built-in django locale files.
"""
filenames = list(gen_filenames())
self.assertIn(os.path.join(os.path.dirname(conf.__file__), 'locale',
'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
@override_settings(LOCALE_PATHS=[LOCALE_PATH])
def test_locale_paths_setting(self):
"""
Test that gen_filenames also yields from LOCALE_PATHS locales.
"""
filenames = list(gen_filenames())
self.assertIn(os.path.join(LOCALE_PATH, 'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
@override_settings(INSTALLED_APPS=[])
def test_project_root_locale(self):
"""
Test that gen_filenames also yields from the current directory (project
root).
"""
old_cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
try:
filenames = list(gen_filenames())
self.assertIn(
os.path.join(LOCALE_PATH, 'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
finally:
os.chdir(old_cwd)
@override_settings(INSTALLED_APPS=['django.contrib.admin'])
def test_app_locales(self):
"""
Test that gen_filenames also yields from locale dirs in installed apps.
"""
filenames = list(gen_filenames())
self.assertIn(
os.path.join(os.path.dirname(upath(admin.__file__)), 'locale', 'nl', 'LC_MESSAGES', 'django.mo'),
filenames
)
@override_settings(USE_I18N=False)
def test_no_i18n(self):
"""
If i18n machinery is disabled, there is no need for watching the
locale files.
"""
filenames = list(gen_filenames())
self.assertNotIn(
os.path.join(os.path.dirname(upath(conf.__file__)), 'locale', 'nl', 'LC_MESSAGES', 'django.mo'),
filenames
)
def test_only_new_files(self):
"""
When calling a second time gen_filenames with only_new = True, only
files from newly loaded modules should be given.
"""
list(gen_filenames())
from fractions import Fraction # NOQA
filenames2 = list(gen_filenames(only_new=True))
self.assertEqual(len(filenames2), 1)
self.assertTrue(filenames2[0].endswith('fractions.py'))
self.assertFalse(any(f.endswith('.pyc') for f in gen_filenames()))
def test_deleted_removed(self):
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, 'test_deleted_removed_module.py')
with open(filename, 'w'):
pass
with extend_sys_path(dirname):
import_module('test_deleted_removed_module')
self.assertIn(npath(filename), gen_filenames())
os.unlink(filename)
self.assertNotIn(filename, gen_filenames())
|
wemanuel/smry
|
refs/heads/master
|
server-auth/ls/google-cloud-sdk/lib/requests/sessions.py
|
156
|
# -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .packages.urllib3._collections import RecentlyUsedContainer
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
REDIRECT_CACHE_SIZE = 1000
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""
Determines appropriate setting for a given request, taking into account the
explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None.
for (k, v) in request_setting.items():
if v is None:
del merged_setting[k]
merged_setting = dict((k, v) for (k, v) in merged_setting.items() if v is not None)
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""
Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = [] # keep track of history
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
method = req.method
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if (resp.status_code == codes.see_other and
method != 'HEAD'):
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if resp.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""
This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get(scheme)
if proxy:
new_proxies.setdefault(scheme, environ_proxies[scheme])
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
200
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol to the URL of the proxy (e.g.
#: {'http': 'foo.bar:3128'}) to be used on each
#: :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Should we trust the environment?
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
# Only store 1000 redirects to prevent using infinite memory
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the
:class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of
the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) if ``True``, the SSL cert will be verified.
A CA_BUNDLE path can also be provided.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
"""
method = to_native_string(method)
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
json = json,
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if not isinstance(request, PreparedRequest):
raise ValueError('You can only send PreparedRequests.')
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
timeout = kwargs.get('timeout')
verify = kwargs.get('verify')
cert = kwargs.get('cert')
proxies = kwargs.get('proxies')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""Check the environment and merge it with some settings."""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""Returns the appropriate connnection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length."""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
state['redirect_cache'] = dict(self.redirect_cache)
return state
def __setstate__(self, state):
redirect_cache = state.pop('redirect_cache', {})
for attr, value in state.items():
setattr(self, attr, value)
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
for redirect, to in redirect_cache.items():
self.redirect_cache[redirect] = to
def session():
"""Returns a :class:`Session` for context-management."""
return Session()
|
was4444/chromium.src
|
refs/heads/nw15
|
content/test/gpu/gpu_tests/webgl_conformance_expectations.py
|
1
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from gpu_tests.gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class WebGLConformanceExpectations(GpuTestExpectations):
def __init__(self, conformance_path):
self.conformance_path = conformance_path
super(WebGLConformanceExpectations, self).__init__()
def Fail(self, pattern, condition=None, bug=None):
self.CheckPatternIsValid(pattern)
GpuTestExpectations.Fail(self, pattern, condition, bug)
def Skip(self, pattern, condition=None, bug=None):
self.CheckPatternIsValid(pattern)
GpuTestExpectations.Skip(self, pattern, condition, bug)
def CheckPatternIsValid(self, pattern):
# Look for basic wildcards.
if not '*' in pattern:
full_path = os.path.normpath(os.path.join(self.conformance_path, pattern))
if not os.path.exists(full_path):
raise Exception('The WebGL conformance test path specified in ' +
'expectation does not exist: ' + full_path)
def SetExpectations(self):
# Fails on all platforms
self.Fail('deqp/data/gles2/shaders/functions.html',
bug=478572)
self.Fail('deqp/data/gles2/shaders/scoping.html',
bug=478572)
self.Fail('conformance/extensions/ext-sRGB.html',
bug=540900)
# Remove after we roll in https://github.com/KhronosGroup/WebGL/pull/1520.
self.Fail('conformance/extensions/oes-vertex-array-object.html',
bug=295792)
# We need to add WebGL 1 check in command buffer that format/type from
# TexSubImage2D have to match the current texture's.
self.Fail('conformance/textures/misc/tex-sub-image-2d-bad-args.html',
bug=570453)
# Fails on multiple platforms
# OpenGL / NVIDIA failures
self.Fail('conformance/attribs/gl-disabled-vertex-attrib.html',
['win', 'linux', 'nvidia', 'opengl'], bug=1007) # angle bug ID
# Win failures
# Note that the following two tests pass with OpenGL.
self.Fail('conformance/glsl/bugs/' +
'pow-of-small-constant-in-user-defined-function.html',
['win'], bug=485641)
self.Fail('conformance/glsl/bugs/sampler-struct-function-arg.html',
['win'], bug=485642)
# Note that the following test seems to pass, but it may still be flaky.
self.Fail('conformance/glsl/constructors/' +
'glsl-construct-vec-mat-index.html',
['win'], bug=525188)
self.Flaky('deqp/data/gles2/shaders/constants.html', ['win'], bug=594922)
# Win7 / Intel failures
self.Fail('conformance/textures/misc/' +
'copy-tex-image-and-sub-image-2d.html',
['win7', 'intel'])
# Win / AMD flakiness seen on new tryservers.
# It's unfortunate that this suppression needs to be so broad, but
# basically any test that uses readPixels is potentially flaky, and
# it's infeasible to suppress individual failures one by one.
self.Flaky('conformance/*', ['win', ('amd', 0x6779)], bug=491419)
# Win / AMD D3D9 failures
self.Fail('conformance/extensions/angle-instanced-arrays.html',
['win', 'amd', 'd3d9'], bug=475095)
self.Fail('conformance/rendering/more-than-65536-indices.html',
['win', 'amd', 'd3d9'], bug=475095)
# Win / D3D9 failures
# Skipping these two tests because they're causing assertion failures.
self.Skip('conformance/extensions/oes-texture-float-with-canvas.html',
['win', 'd3d9'], bug=896) # angle bug ID
self.Skip('conformance/extensions/oes-texture-half-float-with-canvas.html',
['win', 'd3d9'], bug=896) # angle bug ID
self.Fail('conformance/glsl/bugs/floor-div-cos-should-not-truncate.html',
['win', 'd3d9'], bug=1179) # angle bug ID
# The functions test have been persistently flaky on D3D9
self.Flaky('conformance/glsl/functions/*',
['win', 'd3d9'], bug=415609)
# WIN / D3D9 / Intel failures
self.Fail('conformance/ogles/GL/cos/cos_001_to_006.html',
['win', 'intel', 'd3d9'], bug=540538)
# WIN / OpenGL / NVIDIA failures
# Mark ANGLE's OpenGL as flaky on Windows Nvidia
self.Flaky('conformance/*', ['win', 'nvidia', 'opengl'], bug=582083)
# Win / OpenGL / AMD failures
self.Skip('conformance/glsl/misc/shader-struct-scope.html',
['win', 'amd', 'opengl'], bug=1007) # angle bug ID
self.Skip('conformance/glsl/misc/shaders-with-invariance.html',
['win', 'amd', 'opengl'], bug=1007) # angle bug ID
self.Fail('conformance/glsl/misc/struct-nesting-of-variable-names.html',
['win', 'amd', 'opengl'], bug=1007) # angle bug ID
self.Fail('deqp/data/gles2/shaders/preprocessor.html',
['win', 'amd', 'opengl'], bug=478572)
# Win / OpenGL / Intel failures
self.Fail('conformance/extensions/webgl-draw-buffers.html',
['win', 'intel', 'opengl'], bug=1007) # angle bug ID
self.Fail('conformance/glsl/functions/glsl-function-normalize.html',
['win', 'intel', 'opengl'], bug=1007) # angle bug ID
self.Fail('conformance/glsl/misc/shader-struct-scope.html',
['win', 'intel', 'opengl'], bug=1007) # angle bug ID
self.Fail('conformance/uniforms/uniform-default-values.html',
['win', 'intel', 'opengl'], bug=1007) # angle bug ID
# Mac failures
self.Fail('conformance/glsl/misc/shaders-with-invariance.html',
['mac'], bug=421710)
self.Fail('deqp/data/gles2/shaders/preprocessor.html',
['mac'], bug=478572)
# Mac Retina NVIDIA failures
self.Fail(
'conformance/glsl/bugs/array-of-struct-with-int-first-position.html',
['mac', ('nvidia', 0xfd5), ('nvidia', 0xfe9)], bug=368912)
self.Fail('conformance/textures/image_bitmap_from_image/*',
['mac', ('nvidia', 0xfd5), ('nvidia', 0xfe9)], bug=589930)
self.Fail('conformance/extensions/webgl-draw-buffers.html',
['mavericks', ('nvidia', 0xfe9)], bug=586536)
# Mac Retina AMD failures
self.Fail('conformance/textures/image_bitmap_from_image/*',
['mac', ('amd', 0x6821)], bug=589930)
# Mac AMD failures
self.Fail('conformance/textures/image_bitmap_from_image/' +
'tex-2d-rgb-rgb-unsigned_byte.html',
['mac', ('amd', 0x679e)], bug=589930)
self.Fail('conformance/textures/image_bitmap_from_image/' +
'tex-2d-rgba-rgba-unsigned_byte.html',
['mac', ('amd', 0x679e)], bug=589930)
# Mac Intel failures
self.Fail('conformance/textures/image_bitmap_from_image/' +
'tex-2d-rgb-rgb-unsigned_byte.html',
['mac', 'intel'], bug=589930)
self.Fail('conformance/textures/image_bitmap_from_image/' +
'tex-2d-rgba-rgba-unsigned_byte.html',
['mac', 'intel'], bug=589930)
# Linux failures
# NVIDIA
self.Fail('conformance/extensions/angle-instanced-arrays.html',
['linux', 'nvidia'], bug=544989) # Too flaky to retry
self.Flaky('conformance/extensions/oes-element-index-uint.html',
['linux', 'nvidia'], bug=524144)
self.Flaky('conformance/textures/image/' +
'tex-2d-rgb-rgb-unsigned_byte.html',
['linux', 'nvidia'], bug=596622)
# AMD
self.Flaky('conformance/more/functions/uniformi.html',
['linux', 'amd'], bug=550989)
self.Fail('deqp/data/gles2/shaders/preprocessor.html',
['linux', 'amd'], bug=478572)
# AMD Radeon 6450
self.Fail('conformance/extensions/angle-instanced-arrays.html',
['linux', ('amd', 0x6779)], bug=479260)
self.Flaky('conformance/extensions/ext-texture-filter-anisotropic.html',
['linux', ('amd', 0x6779)], bug=436212)
self.Flaky('conformance/glsl/misc/shader-struct-scope.html',
['linux', ('amd', 0x6779)], bug=436212)
self.Flaky('conformance/glsl/misc/struct-nesting-of-variable-names.html',
['linux', ('amd', 0x6779)], bug=436212)
self.Flaky('conformance/rendering/point-size.html',
['linux', ('amd', 0x6779)], bug=436212)
self.Flaky('conformance/textures/misc/texture-sub-image-cube-maps.html',
['linux', ('amd', 0x6779)], bug=436212)
self.Flaky('conformance/more/functions/uniformf.html',
['linux', ('amd', 0x6779)], bug=436212)
self.Fail('conformance/glsl/misc/shaders-with-invariance.html',
['linux', ('amd', 0x6779)], bug=479952)
self.Flaky('conformance/textures/misc/texture-mips.html',
['linux', ('amd', 0x6779)], bug=479981)
self.Flaky('conformance/textures/misc/texture-size-cube-maps.html',
['linux', ('amd', 0x6779)], bug=479983)
self.Flaky('conformance/uniforms/uniform-default-values.html',
['linux', ('amd', 0x6779)], bug=482013)
self.Flaky('conformance/glsl/samplers/glsl-function-texture2dlod.html',
['linux', ('amd', 0x6779)], bug=436212)
self.Flaky('conformance/glsl/samplers/glsl-function-texture2dprojlod.html',
['linux', ('amd', 0x6779)], bug=436212)
# Intel
self.Skip('conformance/glsl/bugs/temp-expressions-should-not-crash.html',
['linux', 'intel'], bug=540543) # GPU timeout
self.Fail('conformance/glsl/bugs/qualcomm-loop-with-continue-crash.html',
['linux', 'intel'], bug=540543) # ANGLE bug 1277
self.Fail('conformance/glsl/misc/empty_main.vert.html',
['linux', 'intel'], bug=540543) # ANGLE bug 1277
self.Fail('conformance/glsl/misc/gl_position_unset.vert.html',
['linux', 'intel'], bug=540543) # ANGLE bug 1277
self.Fail('conformance/glsl/misc/shaders-with-invariance.html',
['linux', 'intel'], bug=540543) # ANGLE bug 1276
self.Fail('conformance/glsl/misc/shaders-with-varyings.html',
['linux', 'intel'], bug=540543)
self.Fail('conformance/extensions/ext-disjoint-timer-query.html',
['linux', 'intel', 'opengl'], bug=1312) # ANGLE bug id
self.Fail('deqp/data/gles2/shaders/linkage.html',
['linux', 'intel'], bug=540543)
self.Fail('deqp/data/gles2/shaders/preprocessor.html',
['linux', 'intel'], bug=1312) # ANGLE bug id. See also 598910
self.Fail('conformance/glsl/bugs/sampler-array-using-loop-index.html',
['linux', 'intel', 'opengl'], bug=598924)
# Android failures
self.Fail('deqp/data/gles2/shaders/constants.html',
['android'], bug=478572)
self.Fail('deqp/data/gles2/shaders/conversions.html',
['android'], bug=478572)
self.Fail('deqp/data/gles2/shaders/declarations.html',
['android'], bug=478572)
self.Fail('deqp/data/gles2/shaders/linkage.html',
['android'], bug=478572)
self.Fail('conformance/textures/image/tex-2d-rgb-rgb-unsigned_byte.html',
['android'], bug=586183)
self.Fail('conformance/textures/misc/texture-npot-video.html',
['android', 'android-content-shell'], bug=601110)
self.Fail('conformance/textures/video/*',
['android'], bug=601110)
# The following tests timed out on android, so skip them for now.
self.Skip('conformance/textures/image_bitmap_from_video/*',
['android'], bug=585108)
# The following WebView crashes are causing problems with further
# tests in the suite, so skip them for now.
self.Skip('conformance/textures/video/' +
'tex-2d-rgb-rgb-unsigned_byte.html',
['android', 'android-webview-shell'], bug=352645)
self.Skip('conformance/textures/video/' +
'tex-2d-rgb-rgb-unsigned_short_5_6_5.html',
['android', 'android-webview-shell'], bug=352645)
self.Skip('conformance/textures/video/' +
'tex-2d-rgba-rgba-unsigned_byte.html',
['android', 'android-webview-shell'], bug=352645)
self.Skip('conformance/textures/video/' +
'tex-2d-rgba-rgba-unsigned_short_4_4_4_4.html',
['android', 'android-webview-shell'], bug=352645)
self.Skip('conformance/textures/video/' +
'tex-2d-rgba-rgba-unsigned_short_5_5_5_1.html',
['android', 'android-webview-shell'], bug=352645)
self.Skip('conformance/textures/misc/texture-npot-video.html',
['android', 'android-webview-shell'], bug=352645)
# These are failing on the Nexus 5 and 6
self.Fail('conformance/extensions/oes-texture-float-with-canvas.html',
['android', 'qualcomm'], bug=499555)
# This crashes in Android WebView on the Nexus 6, preventing the
# suite from running further. Rather than add multiple
# suppressions, skip it until it's passing at least in content
# shell.
self.Skip('conformance/extensions/oes-texture-float-with-video.html',
['android', 'qualcomm'], bug=499555)
# Nexus 5 failures
self.Fail('conformance/glsl/bugs/struct-constructor-highp-bug.html',
['android', ('qualcomm', 'Adreno (TM) 330')], bug=559342)
self.Fail('conformance/glsl/bugs/qualcomm-loop-with-continue-crash.html',
['android', ('qualcomm', 'Adreno (TM) 330')], bug=527761)
self.Fail('conformance/glsl/bugs/sketchfab-lighting-shader-crash.html',
['android', ('qualcomm', 'Adreno (TM) 330')], bug=551937)
# Nexus 6 failures only
self.Fail('conformance/context/' +
'context-attributes-alpha-depth-stencil-antialias.html',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/context/premultiplyalpha-test.html',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/extensions/oes-texture-float-with-image-data.html',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/extensions/oes-texture-float-with-image.html',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/textures/image_bitmap_from_blob/*',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=585108)
self.Fail('conformance/textures/image_bitmap_from_canvas/*',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=585108)
self.Fail('conformance/textures/image_bitmap_from_image/*',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=585108)
self.Fail('conformance/textures/image_bitmap_from_image_data/*',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=585108)
self.Fail('conformance/textures/image_bitmap_from_image_bitmap/*',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=598262)
self.Fail('conformance/textures/video/' +
'tex-2d-rgb-rgb-unsigned_byte.html',
['android', 'android-content-shell',
('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/textures/video/' +
'tex-2d-rgba-rgba-unsigned_byte.html',
['android', 'android-content-shell',
('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/textures/video/' +
'tex-2d-rgb-rgb-unsigned_short_5_6_5.html',
['android', 'android-content-shell',
('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/textures/video/' +
'tex-2d-rgba-rgba-unsigned_short_4_4_4_4.html',
['android', 'android-content-shell',
('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/textures/video/' +
'tex-2d-rgba-rgba-unsigned_short_5_5_5_1.html',
['android', 'android-content-shell',
('qualcomm', 'Adreno (TM) 420')], bug=499555)
# bindBufferBadArgs is causing the GPU thread to crash, taking
# down the WebView shell, causing the next test to fail and
# subsequent tests to be aborted.
self.Skip('conformance/more/functions/bindBufferBadArgs.html',
['android', 'android-webview-shell',
('qualcomm', 'Adreno (TM) 420')], bug=499874)
self.Fail('conformance/rendering/gl-scissor-test.html',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/textures/misc/' +
'copy-tex-image-and-sub-image-2d.html',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/textures/misc/' +
'tex-image-and-sub-image-2d-with-array-buffer-view.html',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/textures/canvas/*',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/textures/image_data/*',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/textures/image/*',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=499555)
self.Fail('conformance/textures/webgl_canvas/*',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=499555)
# Nexus 9 failures
self.Skip('conformance/extensions/oes-texture-float-with-video.html',
['android', 'nvidia'], bug=499555) # flaky
# The following test is very slow and therefore times out on Android bot.
self.Skip('conformance/rendering/multisample-corruption.html',
['android'])
# ChromeOS: affecting all devices.
self.Fail('conformance/extensions/webgl-depth-texture.html',
['chromeos'], bug=382651)
# ChromeOS: all Intel except for pinetrail (stumpy, parrot, peppy,...)
# We will just include pinetrail here for now as we don't want to list
# every single Intel device ID.
self.Fail('conformance/glsl/misc/empty_main.vert.html',
['chromeos', 'intel'], bug=375556)
self.Fail('conformance/glsl/misc/gl_position_unset.vert.html',
['chromeos', 'intel'], bug=375556)
self.Fail('conformance/glsl/misc/shaders-with-varyings.html',
['chromeos', 'intel'], bug=375556)
self.Fail('conformance/renderbuffers/framebuffer-object-attachment.html',
['chromeos', 'intel'], bug=375556)
self.Fail('conformance/textures/misc/texture-size-limit.html',
['chromeos', 'intel'], bug=385361)
# ChromeOS: pinetrail (alex, mario, zgb).
self.Fail('conformance/attribs/gl-vertex-attrib-render.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/glsl/functions/glsl-function-atan-xy.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/glsl/functions/glsl-function-cos.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/glsl/functions/glsl-function-sin.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/glsl/variables/gl-frontfacing.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/limits/gl-max-texture-dimensions.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/acos/acos_001_to_006.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/asin/asin_001_to_006.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/atan/atan_001_to_008.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/build/build_009_to_016.html',
['chromeos', ('intel', 0xa011)], bug=378938)
self.Fail('conformance/ogles/GL/control_flow/control_flow_001_to_008.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/cos/cos_001_to_006.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/discard/discard_001_to_002.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_001_to_008.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_065_to_072.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_081_to_088.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_097_to_104.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_105_to_112.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_113_to_120.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/functions/functions_121_to_126.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail(
'conformance/ogles/GL/gl_FrontFacing/gl_FrontFacing_001_to_001.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/log/log_001_to_008.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/log2/log2_001_to_008.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/normalize/normalize_001_to_006.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/ogles/GL/sin/sin_001_to_006.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/rendering/point-size.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/rendering/polygon-offset.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/textures/misc/texture-mips.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/textures/misc/texture-npot.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/textures/misc/texture-npot-video.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/textures/misc/texture-size.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Fail('conformance/uniforms/gl-uniform-arrays.html',
['chromeos', ('intel', 0xa011)], bug=375554)
self.Skip('conformance/uniforms/uniform-default-values.html',
['chromeos', ('intel', 0xa011)], bug=375554)
|
richardtran415/pymatgen
|
refs/heads/master
|
pymatgen/core/tests/test_trajectory.py
|
4
|
import os
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.trajectory import Trajectory
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.vasp.outputs import Xdatcar
from pymatgen.util.testing import PymatgenTest
class TrajectoryTest(PymatgenTest):
def setUp(self):
xdatcar = Xdatcar(os.path.join(PymatgenTest.TEST_FILES_DIR, "Traj_XDATCAR"))
self.traj = Trajectory.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Traj_XDATCAR"))
self.structures = xdatcar.structures
def _check_traj_equality(self, traj_1, traj_2):
if np.sum(np.square(np.subtract(traj_1.lattice, traj_2.lattice))) > 0.0001:
return False
if traj_1.species != traj_2.species:
return False
return all(i == j for i, j in zip(self.traj, traj_2))
def test_single_index_slice(self):
self.assertTrue(all([self.traj[i] == self.structures[i] for i in range(0, len(self.structures), 19)]))
def test_slice(self):
sliced_traj = self.traj[2:99:3]
sliced_traj_from_structs = Trajectory.from_structures(self.structures[2:99:3])
if len(sliced_traj) == len(sliced_traj_from_structs):
self.assertTrue(all([sliced_traj[i] == sliced_traj_from_structs[i] for i in range(len(sliced_traj))]))
else:
self.assertTrue(False)
sliced_traj = self.traj[:-4:2]
sliced_traj_from_structs = Trajectory.from_structures(self.structures[:-4:2])
if len(sliced_traj) == len(sliced_traj_from_structs):
self.assertTrue(all([sliced_traj[i] == sliced_traj_from_structs[i] for i in range(len(sliced_traj))]))
else:
self.assertTrue(False)
def test_list_slice(self):
sliced_traj = self.traj[[10, 30, 70]]
sliced_traj_from_structs = Trajectory.from_structures([self.structures[i] for i in [10, 30, 70]])
if len(sliced_traj) == len(sliced_traj_from_structs):
self.assertTrue(all([sliced_traj[i] == sliced_traj_from_structs[i] for i in range(len(sliced_traj))]))
else:
self.assertTrue(False)
def test_conversion(self):
# Convert to displacements and back. Check structures
self.traj.to_displacements()
self.traj.to_positions()
self.assertTrue(all([struct == self.structures[i] for i, struct in enumerate(self.traj)]))
def test_copy(self):
traj_copy = self.traj.copy()
self.assertTrue(all([i == j for i, j in zip(self.traj, traj_copy)]))
def test_site_properties(self):
lattice = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
species = ["Si", "Si"]
frac_coords = [
[[0, 0, 0], [0.5, 0.5, 0.5]],
[[0.1, 0.1, 0.1], [0.6, 0.6, 0.6]],
[[0.2, 0.2, 0.2], [0.7, 0.7, 0.7]],
]
site_properties = [
{
"selective_dynamics": [[True, True, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[False, False, False], [False, False, False]],
"magmom": [6, 6],
},
{
"selective_dynamics": [[True, True, True], [False, False, False]],
"magmom": [5, 5],
},
]
traj = Trajectory(lattice, species, frac_coords, site_properties=site_properties)
# compare the overall site properties list
self.assertEqual(traj.site_properties, site_properties)
# # compare the site properties after slicing
self.assertEqual(traj[0].site_properties, site_properties[0])
self.assertEqual(traj[1:].site_properties, site_properties[1:])
def test_frame_properties(self):
lattice = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
species = ["Si", "Si"]
frac_coords = [
[[0, 0, 0], [0.5, 0.5, 0.5]],
[[0.1, 0.1, 0.1], [0.6, 0.6, 0.6]],
[[0.2, 0.2, 0.2], [0.7, 0.7, 0.7]],
]
site_properties = [
{
"selective_dynamics": [[True, True, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[False, False, False], [False, False, False]],
"magmom": [6, 6],
},
{
"selective_dynamics": [[True, True, True], [False, False, False]],
"magmom": [5, 5],
},
]
frame_properties = {"energy_per_atom": [-3.0001, -3.0971, -3.0465]}
traj = Trajectory(
lattice,
species,
frac_coords,
site_properties=site_properties,
frame_properties=frame_properties,
)
# compare the overall site properties list
self.assertEqual(traj.frame_properties, frame_properties)
# compare the site properties after slicing
expected_output = {"energy_per_atom": [-3.0971, -3.0465]}
self.assertEqual(traj[1:].frame_properties, expected_output)
def test_extend(self):
traj = self.traj.copy()
# Case of compatible trajectories
compatible_traj = Trajectory.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Traj_Combine_Test_XDATCAR_1"))
traj.extend(compatible_traj)
full_traj = Trajectory.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Traj_Combine_Test_XDATCAR_Full"))
compatible_success = self._check_traj_equality(self.traj, full_traj)
# Case of incompatible trajectories
traj = self.traj.copy()
incompatible_traj = Trajectory.from_file(
os.path.join(PymatgenTest.TEST_FILES_DIR, "Traj_Combine_Test_XDATCAR_2")
)
incompatible_test_success = False
try:
traj.extend(incompatible_traj)
except Exception:
incompatible_test_success = True
self.assertTrue(compatible_success and incompatible_test_success)
def test_extend_no_site_props(self):
lattice = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
species = ["Si", "Si"]
frac_coords = [
[[0, 0, 0], [0.5, 0.5, 0.5]],
[[0.1, 0.1, 0.1], [0.6, 0.6, 0.6]],
[[0.2, 0.2, 0.2], [0.7, 0.7, 0.7]],
]
# Trajectory with no site_properties
traj_1 = Trajectory(lattice, species, frac_coords)
traj_2 = Trajectory(lattice, species, frac_coords)
# Test combining two trajectories with no site properties
traj_combined = traj_1.copy()
traj_combined.extend(traj_2)
self.assertEqual(traj_combined.site_properties, None)
def test_extend_equivalent_site_props(self):
lattice = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
species = ["Si", "Si"]
frac_coords = [
[[0, 0, 0], [0.5, 0.5, 0.5]],
[[0.1, 0.1, 0.1], [0.6, 0.6, 0.6]],
[[0.2, 0.2, 0.2], [0.7, 0.7, 0.7]],
]
# Trajectories with constant site properties
site_properties_1 = [
{
"selective_dynamics": [[True, True, True], [False, False, False]],
"magmom": [5, 5],
}
]
traj_1 = Trajectory(lattice, species, frac_coords, site_properties=site_properties_1)
site_properties_2 = [
{
"selective_dynamics": [[True, True, True], [False, False, False]],
"magmom": [5, 5],
}
]
traj_2 = Trajectory(lattice, species, frac_coords, site_properties=site_properties_2)
# Test combining two trajectories with similar site_properties
traj_combined = traj_1.copy()
traj_combined.extend(traj_2)
self.assertEqual(traj_combined.site_properties, site_properties_1)
def test_extend_inequivalent_site_props(self):
lattice = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
species = ["Si", "Si"]
frac_coords = [
[[0, 0, 0], [0.5, 0.5, 0.5]],
[[0.1, 0.1, 0.1], [0.6, 0.6, 0.6]],
[[0.2, 0.2, 0.2], [0.7, 0.7, 0.7]],
]
# Trajectories with constant site properties
site_properties_1 = [
{
"selective_dynamics": [[False, False, False], [False, False, False]],
"magmom": [5, 5],
}
]
traj_1 = Trajectory(lattice, species, frac_coords, site_properties=site_properties_1)
site_properties_2 = [
{
"selective_dynamics": [[True, True, True], [False, False, False]],
"magmom": [5, 5],
}
]
traj_2 = Trajectory(lattice, species, frac_coords, site_properties=site_properties_2)
# Test combining two trajectories with similar site_properties
traj_combined = traj_1.copy()
traj_combined.extend(traj_2)
expected_site_props = [
{
"selective_dynamics": [[False, False, False], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[False, False, False], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[False, False, False], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, True, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, True, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, True, True], [False, False, False]],
"magmom": [5, 5],
},
]
self.assertEqual(traj_combined.site_properties, expected_site_props)
# Trajectory with const site_properties and trajectory with changing site properties
site_properties_1 = [
{
"selective_dynamics": [[True, False, False], [False, False, False]],
"magmom": [5, 5],
}
]
traj_1 = Trajectory(lattice, species, frac_coords, site_properties=site_properties_1)
site_properties_2 = [
{
"selective_dynamics": [[False, True, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, False, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, True, False], [False, False, False]],
"magmom": [5, 5],
},
]
traj_2 = Trajectory(lattice, species, frac_coords, site_properties=site_properties_2)
# Test combining two trajectories with similar site_properties
traj_combined = traj_1.copy()
traj_combined.extend(traj_2)
expected_site_props = [
{
"selective_dynamics": [[True, False, False], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, False, False], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, False, False], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[False, True, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, False, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, True, False], [False, False, False]],
"magmom": [5, 5],
},
]
self.assertEqual(traj_combined.site_properties, expected_site_props)
# The other way around
traj_combined = traj_2.copy()
traj_combined.extend(traj_1)
expected_site_props = [
{
"selective_dynamics": [[False, True, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, False, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, True, False], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, False, False], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, False, False], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, False, False], [False, False, False]],
"magmom": [5, 5],
},
]
self.assertEqual(traj_combined.site_properties, expected_site_props)
# Trajectory with no and trajectory with changing site properties
site_properties_1 = None
traj_1 = Trajectory(lattice, species, frac_coords, site_properties=site_properties_1)
site_properties_2 = [
{
"selective_dynamics": [[False, True, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, False, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, True, False], [False, False, False]],
"magmom": [5, 5],
},
]
traj_2 = Trajectory(lattice, species, frac_coords, site_properties=site_properties_2)
# Test combining two trajectories with similar site_properties
traj_combined = traj_1.copy()
traj_combined.extend(traj_2)
expected_site_props = [
None,
None,
None,
{
"selective_dynamics": [[False, True, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, False, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, True, False], [False, False, False]],
"magmom": [5, 5],
},
]
self.assertEqual(traj_combined.site_properties, expected_site_props)
# The other way around
traj_combined = traj_2.copy()
traj_combined.extend(traj_1)
expected_site_props = [
{
"selective_dynamics": [[False, True, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, False, True], [False, False, False]],
"magmom": [5, 5],
},
{
"selective_dynamics": [[True, True, False], [False, False, False]],
"magmom": [5, 5],
},
None,
None,
None,
]
self.assertEqual(traj_combined.site_properties, expected_site_props)
def test_extend_no_frame_props(self):
lattice = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
species = ["Si", "Si"]
frac_coords = [
[[0, 0, 0], [0.5, 0.5, 0.5]],
[[0.1, 0.1, 0.1], [0.6, 0.6, 0.6]],
[[0.2, 0.2, 0.2], [0.7, 0.7, 0.7]],
]
# Trajectory with no site_properties
traj_1 = Trajectory(lattice, species, frac_coords)
traj_2 = Trajectory(lattice, species, frac_coords)
# Test combining two trajectories with no site properties
traj_combined = traj_1.copy()
traj_combined.extend(traj_2)
self.assertEqual(traj_combined.frame_properties, None)
def test_extend_frame_props(self):
lattice = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
species = ["Si", "Si"]
frac_coords = [
[[0, 0, 0], [0.5, 0.5, 0.5]],
[[0.1, 0.1, 0.1], [0.6, 0.6, 0.6]],
[[0.2, 0.2, 0.2], [0.7, 0.7, 0.7]],
]
# Trajectories with constant site properties
frame_properties_1 = {"energy": [-3, -3.9, -4.1]}
traj_1 = Trajectory(lattice, species, frac_coords, frame_properties=frame_properties_1)
frame_properties_2 = {"energy": [-4.2, -4.25, -4.3]}
traj_2 = Trajectory(lattice, species, frac_coords, frame_properties=frame_properties_2)
# Test combining two trajectories with similar site_properties
traj_combined = traj_1.copy()
traj_combined.extend(traj_2)
expected_frame_properties = {"energy": [-3, -3.9, -4.1, -4.2, -4.25, -4.3]}
self.assertEqual(traj_combined.frame_properties, expected_frame_properties)
# Mismatched frame propertied
frame_properties_3 = {"energy": [-4.2, -4.25, -4.3], "pressure": [2, 2.5, 2.5]}
traj_3 = Trajectory(lattice, species, frac_coords, frame_properties=frame_properties_3)
traj_combined = traj_1.copy()
traj_combined.extend(traj_3)
expected_frame_properties = {
"energy": [-3, -3.9, -4.1, -4.2, -4.25, -4.3],
"pressure": [None, None, None, 2, 2.5, 2.5],
}
self.assertEqual(traj_combined.frame_properties, expected_frame_properties)
def test_length(self):
self.assertTrue(len(self.traj) == len(self.structures))
def test_displacements(self):
poscar = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
structures = [poscar.structure]
displacements = np.zeros((11, *np.shape(structures[-1].frac_coords)))
for i in range(10):
displacement = np.random.random_sample(np.shape(structures[-1].frac_coords)) / 20
new_coords = displacement + structures[-1].frac_coords
structures.append(Structure(structures[-1].lattice, structures[-1].species, new_coords))
displacements[i + 1, :, :] = displacement
traj = Trajectory.from_structures(structures, constant_lattice=True)
traj.to_displacements()
self.assertTrue(np.allclose(traj.frac_coords, displacements))
def test_variable_lattice(self):
structure = self.structures[0]
# Generate structures with different lattices
structures = []
for i in range(10):
new_lattice = np.dot(structure.lattice.matrix, np.diag(1 + np.random.random_sample(3) / 20))
temp_struct = structure.copy()
temp_struct.lattice = Lattice(new_lattice)
structures.append(temp_struct)
traj = Trajectory.from_structures(structures, constant_lattice=False)
# Check if lattices were properly stored
self.assertTrue(
all(np.allclose(struct.lattice.matrix, structures[i].lattice.matrix) for i, struct in enumerate(traj))
)
def test_to_from_dict(self):
d = self.traj.as_dict()
traj = Trajectory.from_dict(d)
self.assertEqual(type(traj), Trajectory)
def test_xdatcar_write(self):
self.traj.write_Xdatcar(filename="traj_test_XDATCAR")
# Load trajectory from written xdatcar and compare to original
written_traj = Trajectory.from_file("traj_test_XDATCAR")
self._check_traj_equality(self.traj, written_traj)
os.remove("traj_test_XDATCAR")
if __name__ == "__main__":
import unittest
unittest.main()
|
aidanlister/django
|
refs/heads/master
|
django/conf/locale/ml/__init__.py
|
12133432
| |
zigdon/evelink
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
guidosarducci/plugin.video.youtube
|
refs/heads/master
|
resources/lib/kodion/simple_requests/__init__.py
|
35
|
__author__ = 'bromix'
__ALL__ = ['get', 'post', 'put', 'delete', 'head', 'codes']
from .constants import codes
from api import get, post, put, delete, head
|
luhn/AutobahnPython
|
refs/heads/master
|
examples/twisted/wamp/basic/rpc/slowsquare/frontend.py
|
3
|
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import time
from twisted.internet import reactor
from twisted.internet.defer import DeferredList
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
An application component using the time service.
"""
def onJoin(self, details):
print("session attached")
def got(res, started, msg):
duration = 1000. * (time.clock() - started)
print("{}: {} in {}".format(msg, res, duration))
t1 = time.clock()
d1 = self.call('com.math.slowsquare', 3)
d1.addCallback(got, t1, "Slow Square")
t2 = time.clock()
d2 = self.call('com.math.square', 3)
d2.addCallback(got, t2, "Quick Square")
def done(_):
print("All finished.")
self.leave()
DeferredList ([d1, d2]).addBoth(done)
def onDisconnect(self):
print("disconnected")
reactor.stop()
if __name__ == '__main__':
from autobahn.twisted.wamp import ApplicationRunner
runner = ApplicationRunner("ws://127.0.0.1:8080/ws", "realm1")
runner.run(Component)
|
enoposix/mpv
|
refs/heads/master
|
waftools/dependencies.py
|
13
|
from waflib.Errors import ConfigurationError, WafError
from waflib.Configure import conf
from waflib.Build import BuildContext
from waflib.Logs import pprint
import inflector
class DependencyError(Exception):
pass
class Dependency(object):
def __init__(self, ctx, known_deps, satisfied_deps, dependency):
self.ctx = ctx
self.known_deps = known_deps
self.satisfied_deps = satisfied_deps
self.identifier, self.desc = dependency['name'], dependency['desc']
self.attributes = self.__parse_attributes__(dependency)
known_deps.add(self.identifier)
for dep_key in ['deps', 'deps_any', 'deps_neg']:
if dep_key in self.attributes:
deps = self.attributes[dep_key]
self.ctx.ensure_dependency_is_known(*deps)
def __parse_attributes__(self, dependency):
if 'os_specific_checks' in dependency:
all_chks = dependency['os_specific_checks']
chks = [check for check in all_chks if check in self.satisfied_deps]
if any(chks):
return all_chks[chks[0]]
return dependency
def check(self):
self.ctx.start_msg('Checking for {0}'.format(self.desc))
try:
self.check_group_disabled()
self.check_disabled()
self.check_any_dependencies()
self.check_dependencies()
self.check_negative_dependencies()
except DependencyError:
# No check was run, since the prerequisites of the dependency are
# not satisfied. Make sure the define is 'undefined' so that we
# get a `#define YYY 0` in `config.h`.
self.ctx.undefine(inflector.define_key(self.identifier))
self.fatal_if_needed()
return
self.check_autodetect_func()
def check_group_disabled(self):
if 'groups' in self.attributes:
groups = self.attributes['groups']
disabled = (self.enabled_option(g) == False for g in groups)
if any(disabled):
self.skip()
raise DependencyError
def check_disabled(self):
if self.enabled_option() == False:
self.skip()
raise DependencyError
if self.enabled_option() == True:
self.attributes['req'] = True
self.attributes['fmsg'] = "You manually enabled the feature '{0}', but \
the autodetection check failed.".format(self.identifier)
def check_any_dependencies(self):
if 'deps_any' in self.attributes:
deps = set(self.attributes['deps_any'])
if len(deps & self.satisfied_deps) == 0:
self.skip("not found any of {0}".format(", ".join(deps)))
raise DependencyError
def check_dependencies(self):
if 'deps' in self.attributes:
deps = set(self.attributes['deps'])
if not deps <= self.satisfied_deps:
missing_deps = deps - self.satisfied_deps
self.skip("{0} not found".format(", ".join(missing_deps)))
raise DependencyError
def check_negative_dependencies(self):
if 'deps_neg' in self.attributes:
deps = set(self.attributes['deps_neg'])
conflicting_deps = deps & self.satisfied_deps
if len(conflicting_deps) > 0:
self.skip("{0} found".format(", ".join(conflicting_deps)), 'CYAN')
raise DependencyError
def check_autodetect_func(self):
if self.attributes['func'](self.ctx, self.identifier):
self.success(self.identifier)
else:
self.fail()
self.ctx.undefine(inflector.define_key(self.identifier))
self.fatal_if_needed()
def enabled_option(self, identifier=None):
try:
return getattr(self.ctx.options, self.enabled_option_repr(identifier))
except AttributeError:
pass
return None
def enabled_option_repr(self, identifier):
return "enable_{0}".format(identifier or self.identifier)
def success(self, depname):
self.ctx.mark_satisfied(depname)
self.ctx.end_msg(self.__message__('yes'))
def fail(self, reason='no'):
self.ctx.end_msg(self.__message__(reason), 'RED')
def fatal_if_needed(self):
if self.enabled_option() == False:
return
if self.attributes.get('req', False):
raise ConfigurationError(self.attributes['fmsg'])
def skip(self, reason='disabled', color='YELLOW'):
self.ctx.end_msg(self.__message__(reason), color)
def __message__(self, message):
optional_message = self.ctx.deps_msg.get(self.identifier)
if optional_message:
return "{0} ({1})".format(message, optional_message)
else:
return message
def configure(ctx):
def __detect_target_os_dependency__(ctx):
target = "os-{0}".format(ctx.env.DEST_OS)
ctx.start_msg('Detected target OS:')
ctx.end_msg(target)
ctx.known_deps.add(target)
ctx.satisfied_deps.add(target)
ctx.deps_msg = {}
ctx.known_deps = set()
ctx.satisfied_deps = set()
__detect_target_os_dependency__(ctx)
@conf
def ensure_dependency_is_known(ctx, *depnames):
deps = set([d for d in depnames if not d.startswith('os-')])
if not deps <= ctx.known_deps:
raise ConfigurationError(
"error in dependencies definition: some dependencies in"
" {0} are unknown.".format(deps))
@conf
def mark_satisfied(ctx, dependency_identifier):
ctx.satisfied_deps.add(dependency_identifier)
@conf
def add_optional_message(ctx, dependency_identifier, message):
ctx.deps_msg[dependency_identifier] = message
@conf
def parse_dependencies(ctx, dependencies):
def __check_dependency__(ctx, dependency):
Dependency(ctx,
ctx.known_deps,
ctx.satisfied_deps,
dependency).check()
[__check_dependency__(ctx, dependency) for dependency in dependencies]
@conf
def dependency_satisfied(ctx, dependency_identifier):
ctx.ensure_dependency_is_known(dependency_identifier)
return dependency_identifier in ctx.satisfied_deps
@conf
def store_dependencies_lists(ctx):
ctx.env.known_deps = list(ctx.known_deps)
ctx.env.satisfied_deps = list(ctx.satisfied_deps)
@conf
def unpack_dependencies_lists(ctx):
ctx.known_deps = set(ctx.env.known_deps)
ctx.satisfied_deps = set(ctx.env.satisfied_deps)
def filtered_sources(ctx, sources):
def __source_file__(source):
if isinstance(source, tuple):
return source[0]
else:
return source
def __check_filter__(dependency):
if dependency.find('!') == 0:
dependency = dependency.lstrip('!')
ctx.ensure_dependency_is_known(dependency)
return dependency not in ctx.satisfied_deps
else:
ctx.ensure_dependency_is_known(dependency)
return dependency in ctx.satisfied_deps
def __unpack_and_check_filter__(source):
try:
_, dependency = source
return __check_filter__(dependency)
except ValueError:
return True
return [__source_file__(source) for source in sources \
if __unpack_and_check_filter__(source)]
def env_fetch(tx):
def fn(ctx):
deps = ctx.env.satisfied_deps
lists = [ctx.env[tx(dep)] for dep in deps if (tx(dep) in ctx.env)]
return [item for sublist in lists for item in sublist]
return fn
def dependencies_use(ctx):
return [inflector.storage_key(dep) for dep in ctx.env.satisfied_deps]
BuildContext.filtered_sources = filtered_sources
BuildContext.dependencies_use = dependencies_use
BuildContext.dependencies_includes = env_fetch(lambda x: "INCLUDES_{0}".format(x))
BuildContext.dependency_satisfied = dependency_satisfied
|
meghana1995/sympy
|
refs/heads/master
|
sympy/liealgebras/root_system.py
|
4
|
# -*- coding: utf-8 -*-
from .cartan_type import CartanType
from sympy.core import Basic
from sympy.core.compatibility import range
class RootSystem(Basic):
"""
Every simple Lie algebra has a unique root system.
To find the root system, we first consider the Cartan subalgebra of g,
which is the maximal abelian subalgebra, and consider the adjoint
action of g on this subalgebra. There is a root system associated
with this action. Now, a root system over a vector space V is a set
of finite vectors Φ(called roots), which satisfy:
1. The roots span V
2. The only scalar multiples of x in Φ are x and -x
3. For every x in Φ, the set Φ is closed under reflection
through the hyperplane perpendicular to x.
4. If x and y are roots in Φ, then the projection of y onto
the line through x is a half-integral multiple of x.
Now, there is a subset of Φ, which we will call Δ, such that:
1. Δ is a basis of V
2. Each root x in Φ can be written x = Σ k_y y for y in Δ
The elements of Δ are called the simple roots.
Therefore, we see that the simple roots span the root space of a given
simple Lie algebra.
References: https://en.wikipedia.org/wiki/Root_system
Lie Algebras and Representation Theory - Humphreys
"""
def __new__(cls, cartantype):
"""
Creates a new RootSystem object. This method assigns an attribute
called cartan_type to each instance of a RootSystem object. When
an instance of RootSystem is called, it needs an argument, which
should be an instance of a simple Lie algebra. We then take the
CartanType of this argument and set it as the cartan_type attribute
of the RootSystem instance.
"""
obj = Basic.__new__(cls, cartantype)
obj.cartan_type = CartanType(cartantype)
return obj
def simple_roots(self):
"""
This method generates and returns the simple roots of the Lie
algebra. The rank of the Lie algebra determines the number of
simple roots that it has. This method obtains the rank of the
Lie algebra, and then uses the simple_root method from the Lie
algebra classes to generate all the simple roots.
Example
====
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> roots = c.simple_roots()
>>> roots
{1: [1, -1, 0, 0], 2: [0, 1, -1, 0], 3: [0, 0, 1, -1]}
"""
n = self.cartan_type.rank()
roots = {}
for i in range(1, n+1):
root = self.cartan_type.simple_root(i)
roots[i] = root
return roots
def all_roots(self):
"""
This method generates all the roots of a given root
system, and stores them in a dictionary where the
keys are integer numbers. It generates the roots
by getting the dictionary of all positive roots from
the bases classes, and then taking each root, and
multiplying it by -1 and adding it to the dictionary.
In this way all the negative roots are generated.
"""
alpha = self.cartan_type.positive_roots()
keys = list(alpha.keys())
k = max(keys)
for val in keys:
k += 1
root = alpha[val]
newroot = [-x for x in root]
alpha[k] = newroot
return alpha
def root_space(self):
"""
The root space is the vector space spanned by the
simple roots, i.e. it is a vector space with a
distinguished basis, the simple roots. This method
returns a string that represents the root space as
the span of the simple roots, alpha[1],...., alpha[n].
Example
=======
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.root_space()
'alpha[1] + alpha[2] + alpha[3]'
"""
n = self.cartan_type.rank()
rs = " + ".join("alpha["+str(i) +"]" for i in range(1, n+1))
return rs
def add_simple_roots(self, root1, root2):
"""
This is a method for adding two simple roots together.
The function takes as input two integers, root1 and root2.
It then uses these integers as keys in the dictionary of
simple roots, and gets the corresponding simple roots, and
then adds them together.
Example
=======
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> newroot = c.add_simple_roots(1, 2)
>>> newroot
[1, 0, -1, 0]
"""
alpha = self.simple_roots()
if root1 > len(alpha) or root2 > len(alpha):
raise ValueError("You've used a root that doesn't exist!")
a1 = alpha[root1]
a2 = alpha[root2]
newroot = []
length = len(a1)
for i in range(length):
newroot.append(a1[i] + a2[i])
return newroot
def add_as_roots(self, root1, root2):
"""
This is a method that takes two roots and adds them together
if and only if their sum is also a root. It takes as input
two vectors which should be roots. It then computes their sum
and checks if it is in the list of all possible roots. If it
is, it returns the sum. Otherwise it returns a string saying
that the sum is not a root.
Example
=======
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.add_as_roots([1, 0, -1, 0], [0, 0, 1, -1])
[1, 0, 0, -1]
>>> c.add_as_roots([1, -1, 0, 0], [0, 0, -1, 1])
'The sum of these two roots is not a root'
"""
alpha = self.all_roots()
newroot = []
for entry in range(len(root1)):
newroot.append(root1[entry] + root2[entry])
if newroot in alpha.values():
return newroot
else:
return "The sum of these two roots is not a root"
def cartan_matrix(self):
"""
Return the Cartan matrix of Lie algebra associated
with this root system.
Example
=======
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, -1, 2]])
"""
return self.cartan_type.cartan_matrix()
def dynkin_diagram(self):
"""
Return the Dynkin diagram of the Lie algebra
associated with this root system.
Example
=======
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> print(c.dynkin_diagram())
0---0---0
1 2 3
"""
return self.cartan_type.dynkin_diagram()
|
benschneider/dephasing_ringdown_sim
|
refs/heads/master
|
fit_to_data.py
|
1
|
"""
Created on Tue May 6 17:54:15 2014
@author: benschneider
structure of this code:
1st.
load simulated data with the fixed Qs and different Qrs.
(simulated data set needs to have the same time/point ratio as the measured data)
load measured data with known Qs and unknown Qr
2nd.
for each (Qr) in simulated data and the measured data list:
normalize (amplitude from 0 to 1) for measured and simulated data.
3rd.
find position in time where amplutude is 0.5
define this position to be at t = 0
4th.
compare (or fit), simulated with measured line for each Qr
5th.
show/print result
"""
import parsers as parser
import matplotlib.pyplot as pl
import numpy as np
#import simulation_functions as sim
def norm_line(linecut):
'''set the number range from 0 to 1'''
tmp = linecut - linecut.min()
linecut = tmp/tmp.max()
return linecut
def find_index(linecut,target):
'''go through each element and
see if it is closer to the target element.
if so, store position,
otherwise continue searching until all elements have been checked.
'''
i = 0
tmp1 = 1
for num in linecut:
value = abs(target-num)
if value < tmp1:
tmp1 = value
index = i
i+=1
return index#,tmp1
def find_idx2(data, target, pre_target = 0.5, pre_range = 60):
'''specifically for this type of ringdown data'''
#pre normalize data
data = norm_line(data)
#find pre position
pre_pos0 = find_index(data, pre_target)
#select target area
pre_0 = (pre_pos0 - pre_range)
if pre_0 < 0: print 'pre_0 is less than 0, decrease pre_range value'
pre_1 = (pre_pos0 + pre_range)
data1 = data[pre_0:pre_1]
#find target in target area
pre_pos1 = find_index(data1, target)
pos = pre_0 +pre_pos1
return pos
def crop_at_target(data1d, pos, fit_left, fit_right):
p0 = pos - fit_left
p1 = pos + fit_right
if p0 < 0:
print 'p0 is less than 0, decrease left range'
data1d2 = data1d[p0:p1]
return data1d2
#all manual changes are done in parameters.py
execfile('parameters.py') #this file contain all the parameters above
#----- Load files (given by parameters.py)----
meas_raw = parser.loaddat(filename_2)
sim_raw, sim_raw_head = parser.loadmtx(filename_1)
#to be adjusted to ensure the pre alignment works well
fit_adj = 0.5 #
fit_adjf = 0.1 #0.0
pre_range = 120 #points
fit_left = 70#20#38 #points
fit_right = 600#300 #points
Qr_0 = eval(sim_raw_head[9])
Qr_1 = eval(sim_raw_head[10])
Qr_p = sim_raw.shape[0]
Qr_array = np.linspace(Qr_0, Qr_1, Qr_p)
#----- Measured trace ---- (no loop required)
meas_data = np.array(meas_raw[1])
meas_data = norm_line(meas_data) #normalize
meas_time = np.array(meas_raw[0])#time in sec
meas_time = meas_time
#find zero position and adjust time
meas_pos = find_idx2(meas_data, fit_adjf, fit_adj, pre_range)
meas_time_off = meas_time[meas_pos]
meas_time = (meas_time - meas_time_off)
#crop data and time arrays
meas_data = crop_at_target(meas_data, meas_pos, fit_left, fit_right)
meas_time = crop_at_target(meas_time, meas_pos, fit_left, fit_right)
#normalize data for fitting
meas_data = norm_line(meas_data)
pl.figure(1)
pl.plot(meas_time,-meas_data+1) #plot trace to be fitted to
#----- Simulated trace ---- (loop is required)
sim_res_index = round(sim_raw.shape[1]/2) #selected freq index number to use
#produce time axis for simulated data set
sim_t_0 = eval(sim_raw_head[3]) #start in usec
sim_t_1 = eval(sim_raw_head[4])
sim_time2 = np.linspace(sim_t_0, sim_t_1, sim_raw.shape[2])
#create empty Xi square matrix
Ki2 = np.zeros(sim_raw.shape[0])
sim_store = np.zeros([sim_raw.shape[0],(fit_left+fit_right)])
for Qr_index in range(0,sim_raw.shape[0]):
#Qr_index = 0 #for debugging
sim_data = sim_raw[Qr_index][sim_res_index] #select Qr trace
sim_data = sim_data[t_cut:] #crop away edge
sim_time = sim_time2[t_cut:] #crop away edge
#find zero position and adjust time
sim_pos = find_idx2(sim_data, fit_adjf, fit_adj, pre_range)
sim_time_off = sim_time[sim_pos]
sim_time = (sim_time - sim_time_off)
#crop data and time arrays
sim_data = crop_at_target(sim_data, sim_pos, fit_left, fit_right)
sim_time = crop_at_target(sim_time, sim_pos, fit_left, fit_right)
#post normalize daya
sim_data = norm_line(sim_data)
sim_store[Qr_index] = sim_data
#calculate difference
Ki2_tmp= (meas_data-sim_data)**2
Ki2[Qr_index] = Ki2_tmp.sum()
#plot results
pl.figure(1)
pl.plot(sim_time,-sim_data+1)
#plot Chi-squared as a func of Qr
pl.figure(2)
pl.plot(Qr_array, Ki2)
#plot best fit and measured data
pl.figure(3)
#pl.plot(meas_time,-meas_data+1)
pl.plot(-meas_data+1)
bestfit_idx = Ki2.argmin()
bestfit = -sim_store[bestfit_idx]+1
nodephasing = -sim_store[0]+1
#pl.plot(sim_time,bestfit)
pl.plot(bestfit)
#plot the difference squared (meas-bestfit)
pl.figure(4)
difference = ((-meas_data+1)-bestfit)
pl.plot(sim_time,difference**2)
print (difference**2).sum()
#save for gnuplot
#gnu_data1 = np.array(([meas_time,(-meas_data+1),bestfit, nodephasing]))
gnu_data1 = np.array(([meas_time,(-meas_data+1),bestfit])) # time, meas, fit
gnu_data2 = np.array(([Qr_array, Ki2]))
parser.savedat(filename_4, gnu_data1, delimiter = '\t')
parser.savedat(filename_5, gnu_data2, delimiter = '\t')
|
double-y/django
|
refs/heads/master
|
tests/forms_tests/tests/tests.py
|
61
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.forms import (
CharField, FileField, Form, ModelChoiceField, ModelForm,
)
from django.forms.models import ModelFormMetaclass
from django.test import SimpleTestCase, TestCase
from django.utils import six
from ..models import (
BoundaryModel, ChoiceFieldModel, ChoiceModel, ChoiceOptionModel, Defaults,
FileModel, Group, OptionalMultiChoiceModel,
)
class ChoiceFieldForm(ModelForm):
class Meta:
model = ChoiceFieldModel
fields = '__all__'
class OptionalMultiChoiceModelForm(ModelForm):
class Meta:
model = OptionalMultiChoiceModel
fields = '__all__'
class ChoiceFieldExclusionForm(ModelForm):
multi_choice = CharField(max_length=50)
class Meta:
exclude = ['multi_choice']
model = ChoiceFieldModel
class EmptyCharLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice']
class EmptyIntegerLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_integer']
class EmptyCharLabelNoneChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_string_w_none']
class FileForm(Form):
file1 = FileField()
class TestTicket12510(TestCase):
''' It is not necessary to generate choices for ModelChoiceField (regression test for #12510). '''
def setUp(self):
self.groups = [Group.objects.create(name=name) for name in 'abc']
def test_choices_not_fetched_when_not_rendering(self):
# only one query is required to pull the model from DB
with self.assertNumQueries(1):
field = ModelChoiceField(Group.objects.order_by('-name'))
self.assertEqual('a', field.clean(self.groups[0].pk).name)
class TestTicket14567(TestCase):
"""
Check that the return values of ModelMultipleChoiceFields are QuerySets
"""
def test_empty_queryset_return(self):
"If a model's ManyToManyField has blank=True and is saved with no data, a queryset is returned."
option = ChoiceOptionModel.objects.create(name='default')
form = OptionalMultiChoiceModelForm({'multi_choice_optional': '', 'multi_choice': [option.pk]})
self.assertTrue(form.is_valid())
# Check that the empty value is a QuerySet
self.assertIsInstance(form.cleaned_data['multi_choice_optional'], models.query.QuerySet)
# While we're at it, test whether a QuerySet is returned if there *is* a value.
self.assertIsInstance(form.cleaned_data['multi_choice'], models.query.QuerySet)
class ModelFormCallableModelDefault(TestCase):
def test_no_empty_option(self):
"If a model's ForeignKey has blank=False and a default, no empty option is created (Refs #10792)."
option = ChoiceOptionModel.objects.create(name='default')
choices = list(ChoiceFieldForm().fields['choice'].choices)
self.assertEqual(len(choices), 1)
self.assertEqual(choices[0], (option.pk, six.text_type(option)))
def test_callable_initial_value(self):
"The initial value for a callable default returning a queryset is the pk (refs #13769)"
ChoiceOptionModel.objects.create(id=1, name='default')
ChoiceOptionModel.objects.create(id=2, name='option 2')
ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(ChoiceFieldForm().as_p(), """<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="1" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="1" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label> <select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="1" id="initial-id_multi_choice_0" /></p>
<p><label for="id_multi_choice_int">Multi choice int:</label> <select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="1" id="initial-id_multi_choice_int_0" /></p>""")
def test_initial_instance_value(self):
"Initial instances for model fields may also be instances (refs #7287)"
ChoiceOptionModel.objects.create(id=1, name='default')
obj2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
obj3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(ChoiceFieldForm(initial={
'choice': obj2,
'choice_int': obj2,
'multi_choice': [obj2, obj3],
'multi_choice_int': ChoiceOptionModel.objects.exclude(name="default"),
}).as_p(), """<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="2" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="2" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label> <select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="2" id="initial-id_multi_choice_0" />
<input type="hidden" name="initial-multi_choice" value="3" id="initial-id_multi_choice_1" /></p>
<p><label for="id_multi_choice_int">Multi choice int:</label> <select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="2" id="initial-id_multi_choice_int_0" />
<input type="hidden" name="initial-multi_choice_int" value="3" id="initial-id_multi_choice_int_1" /></p>""")
class FormsModelTestCase(TestCase):
def test_unicode_filename(self):
# FileModel with unicode filename and data #########################
f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))}, auto_id=False)
self.assertTrue(f.is_valid())
self.assertIn('file1', f.cleaned_data)
m = FileModel.objects.create(file=f.cleaned_data['file1'])
self.assertEqual(m.file.name, 'tests/\u6211\u96bb\u6c23\u588a\u8239\u88dd\u6eff\u6652\u9c54.txt')
m.delete()
def test_boundary_conditions(self):
# Boundary conditions on a PostitiveIntegerField #########################
class BoundaryForm(ModelForm):
class Meta:
model = BoundaryModel
fields = '__all__'
f = BoundaryForm({'positive_integer': 100})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': 0})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': -100})
self.assertFalse(f.is_valid())
def test_formfield_initial(self):
# Formfield initial values ########
# If the model has default values for some fields, they are used as the formfield
# initial values.
class DefaultsForm(ModelForm):
class Meta:
model = Defaults
fields = '__all__'
self.assertEqual(DefaultsForm().fields['name'].initial, 'class default value')
self.assertEqual(DefaultsForm().fields['def_date'].initial, datetime.date(1980, 1, 1))
self.assertEqual(DefaultsForm().fields['value'].initial, 42)
r1 = DefaultsForm()['callable_default'].as_widget()
r2 = DefaultsForm()['callable_default'].as_widget()
self.assertNotEqual(r1, r2)
# In a ModelForm that is passed an instance, the initial values come from the
# instance's values, not the model's defaults.
foo_instance = Defaults(name='instance value', def_date=datetime.date(1969, 4, 4), value=12)
instance_form = DefaultsForm(instance=foo_instance)
self.assertEqual(instance_form.initial['name'], 'instance value')
self.assertEqual(instance_form.initial['def_date'], datetime.date(1969, 4, 4))
self.assertEqual(instance_form.initial['value'], 12)
from django.forms import CharField
class ExcludingForm(ModelForm):
name = CharField(max_length=255)
class Meta:
model = Defaults
exclude = ['name', 'callable_default']
f = ExcludingForm({'name': 'Hello', 'value': 99, 'def_date': datetime.date(1999, 3, 2)})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Hello')
obj = f.save()
self.assertEqual(obj.name, 'class default value')
self.assertEqual(obj.value, 99)
self.assertEqual(obj.def_date, datetime.date(1999, 3, 2))
class RelatedModelFormTests(SimpleTestCase):
def test_invalid_loading_order(self):
"""
Test for issue 10405
"""
class A(models.Model):
ref = models.ForeignKey("B")
class Meta:
model = A
fields = '__all__'
self.assertRaises(ValueError, ModelFormMetaclass, str('Form'), (ModelForm,), {'Meta': Meta})
class B(models.Model):
pass
def test_valid_loading_order(self):
"""
Test for issue 10405
"""
class C(models.Model):
ref = models.ForeignKey("D")
class D(models.Model):
pass
class Meta:
model = C
fields = '__all__'
self.assertTrue(issubclass(ModelFormMetaclass(str('Form'), (ModelForm,), {'Meta': Meta}), ModelForm))
class ManyToManyExclusionTestCase(TestCase):
def test_m2m_field_exclusion(self):
# Issue 12337. save_instance should honor the passed-in exclude keyword.
opt1 = ChoiceOptionModel.objects.create(id=1, name='default')
opt2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
opt3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
initial = {
'choice': opt1,
'choice_int': opt1,
}
data = {
'choice': opt2.pk,
'choice_int': opt2.pk,
'multi_choice': 'string data!',
'multi_choice_int': [opt1.pk],
}
instance = ChoiceFieldModel.objects.create(**initial)
instance.multi_choice = instance.multi_choice_int = [opt2, opt3]
form = ChoiceFieldExclusionForm(data=data, instance=instance)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['multi_choice'], data['multi_choice'])
form.save()
self.assertEqual(form.instance.choice.pk, data['choice'])
self.assertEqual(form.instance.choice_int.pk, data['choice_int'])
self.assertEqual(list(form.instance.multi_choice.all()), [opt2, opt3])
self.assertEqual([obj.pk for obj in form.instance.multi_choice_int.all()], data['multi_choice_int'])
class EmptyLabelTestCase(TestCase):
def test_empty_field_char(self):
f = EmptyCharLabelChoiceForm()
self.assertHTMLEqual(f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice">Choice:</label> <select id="id_choice" name="choice">
<option value="" selected="selected">No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>""")
def test_empty_field_char_none(self):
f = EmptyCharLabelNoneChoiceForm()
self.assertHTMLEqual(f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice_string_w_none">Choice string w none:</label> <select id="id_choice_string_w_none" name="choice_string_w_none">
<option value="" selected="selected">No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>""")
def test_save_empty_label_forms(self):
# Test that saving a form with a blank choice results in the expected
# value being stored in the database.
tests = [
(EmptyCharLabelNoneChoiceForm, 'choice_string_w_none', None),
(EmptyIntegerLabelChoiceForm, 'choice_integer', None),
(EmptyCharLabelChoiceForm, 'choice', ''),
]
for form, key, expected in tests:
f = form({'name': 'some-key', key: ''})
self.assertTrue(f.is_valid())
m = f.save()
self.assertEqual(expected, getattr(m, key))
self.assertEqual('No Preference',
getattr(m, 'get_{}_display'.format(key))())
def test_empty_field_integer(self):
f = EmptyIntegerLabelChoiceForm()
self.assertHTMLEqual(f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice_integer">Choice integer:</label> <select id="id_choice_integer" name="choice_integer">
<option value="" selected="selected">No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>""")
def test_get_display_value_on_none(self):
m = ChoiceModel.objects.create(name='test', choice='', choice_integer=None)
self.assertIsNone(m.choice_integer)
self.assertEqual('No Preference', m.get_choice_integer_display())
def test_html_rendering_of_prepopulated_models(self):
none_model = ChoiceModel(name='none-test', choice_integer=None)
f = EmptyIntegerLabelChoiceForm(instance=none_model)
self.assertHTMLEqual(f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" value="none-test"/></p>
<p><label for="id_choice_integer">Choice integer:</label> <select id="id_choice_integer" name="choice_integer">
<option value="" selected="selected">No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>""")
foo_model = ChoiceModel(name='foo-test', choice_integer=1)
f = EmptyIntegerLabelChoiceForm(instance=foo_model)
self.assertHTMLEqual(f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" value="foo-test"/></p>
<p><label for="id_choice_integer">Choice integer:</label> <select id="id_choice_integer" name="choice_integer">
<option value="">No Preference</option>
<option value="1" selected="selected">Foo</option>
<option value="2">Bar</option>
</select></p>""")
|
xtmhm2000/scrapy-0.22
|
refs/heads/master
|
scrapy/contrib/memdebug.py
|
11
|
"""
MemoryDebugger extension
See documentation in docs/topics/extensions.rst
"""
import gc
from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.utils.trackref import live_refs
class MemoryDebugger(object):
def __init__(self, stats):
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool('MEMDEBUG_ENABLED'):
raise NotConfigured
o = cls(crawler.stats)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
return o
def spider_closed(self, spider, reason):
gc.collect()
self.stats.set_value('memdebug/gc_garbage_count', len(gc.garbage), spider=spider)
for cls, wdict in live_refs.iteritems():
if not wdict:
continue
self.stats.set_value('memdebug/live_refs/%s' % cls.__name__, len(wdict), spider=spider)
|
rtucker-mozilla/mozpackager
|
refs/heads/master
|
vendor-local/lib/python/amqp/basic_message.py
|
12
|
"""Messages for AMQP"""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import
from .serialization import GenericContent
__all__ = ['Message']
class Message(GenericContent):
"""A Message for use with the Channnel.basic_* methods."""
#: Instances of this class have these attributes, which
#: are passed back and forth as message properties between
#: client and server
PROPERTIES = [
('content_type', 'shortstr'),
('content_encoding', 'shortstr'),
('application_headers', 'table'),
('delivery_mode', 'octet'),
('priority', 'octet'),
('correlation_id', 'shortstr'),
('reply_to', 'shortstr'),
('expiration', 'shortstr'),
('message_id', 'shortstr'),
('timestamp', 'timestamp'),
('type', 'shortstr'),
('user_id', 'shortstr'),
('app_id', 'shortstr'),
('cluster_id', 'shortstr')
]
def __init__(self, body='', children=None, **properties):
"""Expected arg types
body: string
children: (not supported)
Keyword properties may include:
content_type: shortstr
MIME content type
content_encoding: shortstr
MIME content encoding
application_headers: table
Message header field table, a dict with string keys,
and string | int | Decimal | datetime | dict values.
delivery_mode: octet
Non-persistent (1) or persistent (2)
priority: octet
The message priority, 0 to 9
correlation_id: shortstr
The application correlation identifier
reply_to: shortstr
The destination to reply to
expiration: shortstr
Message expiration specification
message_id: shortstr
The application message identifier
timestamp: datetime.datetime
The message timestamp
type: shortstr
The message type name
user_id: shortstr
The creating user id
app_id: shortstr
The creating application id
cluster_id: shortstr
Intra-cluster routing identifier
Unicode bodies are encoded according to the 'content_encoding'
argument. If that's None, it's set to 'UTF-8' automatically.
example::
msg = Message('hello world',
content_type='text/plain',
application_headers={'foo': 7})
"""
super(Message, self).__init__(**properties)
self.body = body
def __eq__(self, other):
"""Check if the properties and bodies of this Message and another
Message are the same.
Received messages may contain a 'delivery_info' attribute,
which isn't compared.
"""
try:
return (super(Message, self).__eq__(other) and
self.body == other.body)
except AttributeError:
return NotImplemented
|
mirokuratczyk/psiphon-tunnel-core
|
refs/heads/master
|
vendor/github.com/redjack/marionette/third_party/re2/re2/testing/unicode_test.py
|
325
|
#!/usr/bin/python2.4
#
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Unittest for the util/regexp/re2/unicode.py module."""
import os
import StringIO
from google3.pyglib import flags
from google3.testing.pybase import googletest
from google3.util.regexp.re2 import unicode
_UNICODE_DIR = os.path.join(flags.FLAGS.test_srcdir, "google3", "third_party",
"unicode", "ucd-5.1.0")
class ConvertTest(googletest.TestCase):
"""Test the conversion functions."""
def testUInt(self):
self.assertEquals(0x0000, unicode._UInt("0000"))
self.assertEquals(0x263A, unicode._UInt("263A"))
self.assertEquals(0x10FFFF, unicode._UInt("10FFFF"))
self.assertRaises(unicode.InputError, unicode._UInt, "263")
self.assertRaises(unicode.InputError, unicode._UInt, "263AAAA")
self.assertRaises(unicode.InputError, unicode._UInt, "110000")
def testURange(self):
self.assertEquals([1, 2, 3], unicode._URange("0001..0003"))
self.assertEquals([1], unicode._URange("0001"))
self.assertRaises(unicode.InputError, unicode._URange, "0001..0003..0005")
self.assertRaises(unicode.InputError, unicode._URange, "0003..0001")
self.assertRaises(unicode.InputError, unicode._URange, "0001..0001")
def testUStr(self):
self.assertEquals("0x263A", unicode._UStr(0x263a))
self.assertEquals("0x10FFFF", unicode._UStr(0x10FFFF))
self.assertRaises(unicode.InputError, unicode._UStr, 0x110000)
self.assertRaises(unicode.InputError, unicode._UStr, -1)
_UNICODE_TABLE = """# Commented line, should be ignored.
# The next line is blank and should be ignored.
0041;Capital A;Line 1
0061..007A;Lowercase;Line 2
1F00;<Greek, First>;Ignored
1FFE;<Greek, Last>;Line 3
10FFFF;Runemax;Line 4
0000;Zero;Line 5
"""
_BAD_TABLE1 = """
111111;Not a code point;
"""
_BAD_TABLE2 = """
0000;<Zero, First>;Missing <Zero, Last>
"""
_BAD_TABLE3 = """
0010..0001;Bad range;
"""
class AbortError(Exception):
"""Function should not have been called."""
def Abort():
raise AbortError("Abort")
def StringTable(s, n, f):
unicode.ReadUnicodeTable(StringIO.StringIO(s), n, f)
class ReadUnicodeTableTest(googletest.TestCase):
"""Test the ReadUnicodeTable function."""
def testSimpleTable(self):
ncall = [0] # can't assign to ordinary int in DoLine
def DoLine(codes, fields):
self.assertEquals(3, len(fields))
ncall[0] += 1
self.assertEquals("Line %d" % (ncall[0],), fields[2])
if ncall[0] == 1:
self.assertEquals([0x0041], codes)
self.assertEquals("0041", fields[0])
self.assertEquals("Capital A", fields[1])
elif ncall[0] == 2:
self.assertEquals(range(0x0061, 0x007A + 1), codes)
self.assertEquals("0061..007A", fields[0])
self.assertEquals("Lowercase", fields[1])
elif ncall[0] == 3:
self.assertEquals(range(0x1F00, 0x1FFE + 1), codes)
self.assertEquals("1F00..1FFE", fields[0])
self.assertEquals("Greek", fields[1])
elif ncall[0] == 4:
self.assertEquals([0x10FFFF], codes)
self.assertEquals("10FFFF", fields[0])
self.assertEquals("Runemax", fields[1])
elif ncall[0] == 5:
self.assertEquals([0x0000], codes)
self.assertEquals("0000", fields[0])
self.assertEquals("Zero", fields[1])
StringTable(_UNICODE_TABLE, 3, DoLine)
self.assertEquals(5, ncall[0])
def testErrorTables(self):
self.assertRaises(unicode.InputError, StringTable, _UNICODE_TABLE, 4, Abort)
self.assertRaises(unicode.InputError, StringTable, _UNICODE_TABLE, 2, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE1, 3, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE2, 3, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE3, 3, Abort)
class ParseContinueTest(googletest.TestCase):
"""Test the ParseContinue function."""
def testParseContinue(self):
self.assertEquals(("Private Use", "First"),
unicode._ParseContinue("<Private Use, First>"))
self.assertEquals(("Private Use", "Last"),
unicode._ParseContinue("<Private Use, Last>"))
self.assertEquals(("<Private Use, Blah>", None),
unicode._ParseContinue("<Private Use, Blah>"))
class CaseGroupsTest(googletest.TestCase):
"""Test the CaseGroups function (and the CaseFoldingReader)."""
def FindGroup(self, c):
if type(c) == str:
c = ord(c)
for g in self.groups:
if c in g:
return g
return None
def testCaseGroups(self):
self.groups = unicode.CaseGroups(unicode_dir=_UNICODE_DIR)
self.assertEquals([ord("A"), ord("a")], self.FindGroup("a"))
self.assertEquals(None, self.FindGroup("0"))
class ScriptsTest(googletest.TestCase):
"""Test the Scripts function (and the ScriptsReader)."""
def FindScript(self, c):
if type(c) == str:
c = ord(c)
for script, codes in self.scripts.items():
for code in codes:
if c == code:
return script
return None
def testScripts(self):
self.scripts = unicode.Scripts(unicode_dir=_UNICODE_DIR)
self.assertEquals("Latin", self.FindScript("a"))
self.assertEquals("Common", self.FindScript("0"))
self.assertEquals(None, self.FindScript(0xFFFE))
class CategoriesTest(googletest.TestCase):
"""Test the Categories function (and the UnicodeDataReader)."""
def FindCategory(self, c):
if type(c) == str:
c = ord(c)
short = None
for category, codes in self.categories.items():
for code in codes:
if code == c:
# prefer category Nd over N
if len(category) > 1:
return category
if short == None:
short = category
return short
def testCategories(self):
self.categories = unicode.Categories(unicode_dir=_UNICODE_DIR)
self.assertEquals("Ll", self.FindCategory("a"))
self.assertEquals("Nd", self.FindCategory("0"))
self.assertEquals("Lo", self.FindCategory(0xAD00)) # in First, Last range
self.assertEquals(None, self.FindCategory(0xFFFE))
self.assertEquals("Lo", self.FindCategory(0x8B5A))
self.assertEquals("Lo", self.FindCategory(0x6C38))
self.assertEquals("Lo", self.FindCategory(0x92D2))
self.assertTrue(ord("a") in self.categories["L"])
self.assertTrue(ord("0") in self.categories["N"])
self.assertTrue(0x8B5A in self.categories["L"])
self.assertTrue(0x6C38 in self.categories["L"])
self.assertTrue(0x92D2 in self.categories["L"])
def main():
googletest.main()
if __name__ == "__main__":
main()
|
michaelmior/cassandra
|
refs/heads/cassandra-2.2
|
pylib/cqlshlib/test/run_cqlsh.py
|
84
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: this testing tool is *nix specific
import os
import re
import pty
import fcntl
import contextlib
import subprocess
import signal
import math
from time import time
from . import basecase
DEFAULT_CQLSH_PROMPT = '\ncqlsh(:\S+)?> '
DEFAULT_CQLSH_TERM = 'xterm'
cqlshlog = basecase.cqlshlog
def set_controlling_pty(master, slave):
os.setsid()
os.close(master)
for i in range(3):
os.dup2(slave, i)
if slave > 2:
os.close(slave)
os.close(os.open(os.ttyname(1), os.O_RDWR))
def set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
@contextlib.contextmanager
def raising_signal(signum, exc):
"""
Within the wrapped context, the given signal will interrupt signal
calls and will raise the given exception class. The preexisting signal
handling will be reinstated on context exit.
"""
def raiser(signum, frames):
raise exc()
oldhandlr = signal.signal(signum, raiser)
try:
yield
finally:
signal.signal(signum, oldhandlr)
class TimeoutError(Exception):
pass
@contextlib.contextmanager
def timing_out_itimer(seconds):
if seconds is None:
yield
return
with raising_signal(signal.SIGALRM, TimeoutError):
oldval, oldint = signal.getitimer(signal.ITIMER_REAL)
if oldval != 0.0:
raise RuntimeError("ITIMER_REAL already in use")
signal.setitimer(signal.ITIMER_REAL, seconds)
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
@contextlib.contextmanager
def timing_out_alarm(seconds):
if seconds is None:
yield
return
with raising_signal(signal.SIGALRM, TimeoutError):
oldval = signal.alarm(int(math.ceil(seconds)))
if oldval != 0:
signal.alarm(oldval)
raise RuntimeError("SIGALRM already in use")
try:
yield
finally:
signal.alarm(0)
# setitimer is new in 2.6, but it's still worth supporting, for potentially
# faster tests because of sub-second resolution on timeouts.
if hasattr(signal, 'setitimer'):
timing_out = timing_out_itimer
else:
timing_out = timing_out_alarm
def noop(*a):
pass
class ProcRunner:
def __init__(self, path, tty=True, env=None, args=()):
self.exe_path = path
self.args = args
self.tty = bool(tty)
if env is None:
env = {}
self.env = env
self.readbuf = ''
self.start_proc()
def start_proc(self):
preexec = noop
stdin = stdout = stderr = None
if self.tty:
masterfd, slavefd = pty.openpty()
preexec = lambda: set_controlling_pty(masterfd, slavefd)
else:
stdin = stdout = subprocess.PIPE
stderr = subprocess.STDOUT
cqlshlog.info("Spawning %r subprocess with args: %r and env: %r"
% (self.exe_path, self.args, self.env))
self.proc = subprocess.Popen((self.exe_path,) + tuple(self.args),
env=self.env, preexec_fn=preexec,
stdin=stdin, stdout=stdout, stderr=stderr,
close_fds=False)
if self.tty:
os.close(slavefd)
self.childpty = masterfd
self.send = self.send_tty
self.read = self.read_tty
else:
self.send = self.send_pipe
self.read = self.read_pipe
def close(self):
cqlshlog.info("Closing %r subprocess." % (self.exe_path,))
if self.tty:
os.close(self.childpty)
else:
self.proc.stdin.close()
cqlshlog.debug("Waiting for exit")
return self.proc.wait()
def send_tty(self, data):
os.write(self.childpty, data)
def send_pipe(self, data):
self.proc.stdin.write(data)
def read_tty(self, blksize):
return os.read(self.childpty, blksize)
def read_pipe(self, blksize):
return self.proc.stdout.read(blksize)
def read_until(self, until, blksize=4096, timeout=None, flags=0):
if not isinstance(until, re._pattern_type):
until = re.compile(until, flags)
got = self.readbuf
self.readbuf = ''
with timing_out(timeout):
while True:
val = self.read(blksize)
cqlshlog.debug("read %r from subproc" % (val,))
if val == '':
raise EOFError("'until' pattern %r not found" % (until.pattern,))
got += val
m = until.search(got)
if m is not None:
self.readbuf = got[m.end():]
got = got[:m.end()]
return got
def read_lines(self, numlines, blksize=4096, timeout=None):
lines = []
with timing_out(timeout):
for n in range(numlines):
lines.append(self.read_until('\n', blksize=blksize))
return lines
def read_up_to_timeout(self, timeout, blksize=4096):
got = self.readbuf
self.readbuf = ''
curtime = time()
stoptime = curtime + timeout
while curtime < stoptime:
try:
with timing_out(stoptime - curtime):
stuff = self.read(blksize)
except TimeoutError:
break
cqlshlog.debug("read %r from subproc" % (stuff,))
if stuff == '':
break
got += stuff
curtime = time()
return got
class CqlshRunner(ProcRunner):
def __init__(self, path=None, host=None, port=None, keyspace=None, cqlver=None,
args=(), prompt=DEFAULT_CQLSH_PROMPT, env=None, **kwargs):
if path is None:
path = basecase.path_to_cqlsh
if host is None:
host = basecase.TEST_HOST
if port is None:
port = basecase.TEST_PORT
if env is None:
env = {}
env.setdefault('TERM', 'xterm')
env.setdefault('CQLSH_NO_BUNDLED', os.environ.get('CQLSH_NO_BUNDLED', ''))
env.setdefault('PYTHONPATH', os.environ.get('PYTHONPATH', ''))
args = tuple(args) + (host, str(port))
if cqlver is not None:
args += ('--cqlversion', str(cqlver))
if keyspace is not None:
args += ('--keyspace', keyspace)
self.keyspace = keyspace
ProcRunner.__init__(self, path, args=args, env=env, **kwargs)
self.prompt = prompt
if self.prompt is None:
self.output_header = ''
else:
self.output_header = self.read_to_next_prompt()
def read_to_next_prompt(self):
return self.read_until(self.prompt, timeout=4.0)
def read_up_to_timeout(self, timeout, blksize=4096):
output = ProcRunner.read_up_to_timeout(self, timeout, blksize=blksize)
# readline trying to be friendly- remove these artifacts
output = output.replace(' \r', '')
output = output.replace('\r', '')
return output
def cmd_and_response(self, cmd):
self.send(cmd + '\n')
output = self.read_to_next_prompt()
# readline trying to be friendly- remove these artifacts
output = output.replace(' \r', '')
output = output.replace('\r', '')
output = output.replace(' \b', '')
if self.tty:
echo, output = output.split('\n', 1)
assert echo == cmd, "unexpected echo %r instead of %r" % (echo, cmd)
try:
output, promptline = output.rsplit('\n', 1)
except ValueError:
promptline = output
output = ''
assert re.match(self.prompt, '\n' + promptline), \
'last line of output %r does not match %r?' % (promptline, self.prompt)
return output + '\n'
def run_cqlsh(**kwargs):
return contextlib.closing(CqlshRunner(**kwargs))
def call_cqlsh(**kwargs):
kwargs.setdefault('prompt', None)
proginput = kwargs.pop('input', '')
kwargs['tty'] = False
c = CqlshRunner(**kwargs)
output, _ = c.proc.communicate(proginput)
result = c.close()
return output, result
|
leoliujie/odoo
|
refs/heads/8.0
|
addons/website_mail_group/models/mail_group.py
|
321
|
# -*- coding: utf-8 -*-
from openerp.osv import osv
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
from openerp.addons.website.models.website import slug
class MailGroup(osv.Model):
_inherit = 'mail.group'
def message_get_email_values(self, cr, uid, id, notif_mail=None, context=None):
res = super(MailGroup, self).message_get_email_values(cr, uid, id, notif_mail=notif_mail, context=context)
group = self.browse(cr, uid, id, context=context)
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
headers = {}
if res.get('headers'):
try:
headers = eval(res['headers'])
except Exception:
pass
headers.update({
'List-Archive': '<%s/groups/%s>' % (base_url, slug(group)),
'List-Subscribe': '<%s/groups>' % (base_url),
'List-Unsubscribe': '<%s/groups?unsubscribe>' % (base_url,),
})
res['headers'] = repr(headers)
return res
class MailMail(osv.Model):
_inherit = 'mail.mail'
def send_get_mail_body(self, cr, uid, mail, partner=None, context=None):
""" Short-circuit parent method for mail groups, replace the default
footer with one appropriate for mailing-lists."""
if mail.model == 'mail.group' and mail.res_id:
# no super() call on purpose, no private links that could be quoted!
group = self.pool['mail.group'].browse(cr, uid, mail.res_id, context=context)
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
vals = {
'maillist': _('Mailing-List'),
'post_to': _('Post to'),
'unsub': _('Unsubscribe'),
'mailto': 'mailto:%s@%s' % (group.alias_name, group.alias_domain),
'group_url': '%s/groups/%s' % (base_url, slug(group)),
'unsub_url': '%s/groups?unsubscribe' % (base_url,),
}
footer = """_______________________________________________
%(maillist)s: %(group_url)s
%(post_to)s: %(mailto)s
%(unsub)s: %(unsub_url)s
""" % vals
body = tools.append_content_to_html(mail.body, footer, container_tag='div')
return body
else:
return super(MailMail, self).send_get_mail_body(cr, uid, mail,
partner=partner,
context=context)
|
rishirajsinghjhelumi/Entity-Mining
|
refs/heads/master
|
lastfm/mixin/_propertyadder.py
|
2
|
#!/usr/bin/env python
__author__ = "Abhinav Sarkar <abhinav@abhinavsarkar.net>"
__version__ = "0.2"
__license__ = "GNU Lesser General Public License"
__package__ = "lastfm.mixin"
def property_adder(cls):
for p in cls.Meta.properties:
if not hasattr(cls, p):
def wrapper():
q = p
@property
def get(self):
try:
return getattr(self, "_{0}".format(q))
except AttributeError:
return None
return get
setattr(cls, p, wrapper())
if hasattr(cls.Meta, 'fillable_properties'):
for p in cls.Meta.fillable_properties:
if not hasattr(cls, p):
def wrapper():
q = p
@property
def get(self):
fill = False
try:
attrval = getattr(self, "_{0}".format(q))
if attrval is None:
fill = True
else:
return attrval
except AttributeError:
fill = True
if fill:
self._fill_info()
return getattr(self, "_{0}".format(q))
return get
setattr(cls, p, wrapper())
return cls
|
djbaldey/django
|
refs/heads/master
|
tests/view_tests/regression_21530_urls.py
|
487
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^index/$', views.index_page, name='index'),
]
|
zooba/PTVS
|
refs/heads/master
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32com/makegw/__init__.py
|
46
|
# indicates a python package.
|
lbjay/cds-invenio
|
refs/heads/master
|
modules/elmsubmit/lib/elmsubmit_field_validation.py
|
4
|
# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import re
def author(value):
"""
The author list must be in the following format:
Put one author per line, and a comma ',' (with no preceding
space) between the name and the firstname initial letters.
The name is going first, followed by the firstname initial
letters. Precede each initial by a single space. Place only a
single space between surnames.
Example: Put
Le Meur, J Y
Baron, T
for
Le Meur Jean-Yves & Baron Thomas.
"""
# Strip each line of leading/trainling whitespace and remove blank lines.
value = '\n'.join(filter(lambda line: line != '', map(lambda line: line.strip(), value.splitlines())))
# txt = txt.replace("\r\n", "\n") # Change to unix newline conventions.
# Allow names like:
# 'MacDonald Schlüter Wolsey-Smith, P J'
hyphenated_word = r'\w+(-\w+)*'
author_surname = r'%s( %s)*' % (hyphenated_word, hyphenated_word)
comma_space = r', '
initials = r'\w( \w)*'
author_re = author_surname + comma_space + initials
# Allow multiline list with no trailing spaces, and only single
# (optional) terminating newline:
author_list = r'(?u)^%s(\n%s)*?$' % (author_re, author_re)
if re.compile(author_list).search(value):
return (author.__doc__, value, True)
else:
return (author.__doc__, value, False)
def date(value):
"""
The date field must be in dd/mm/yyyy format.
eg. 01/03/2010
"""
value = value.strip()
day = '(3[01]|[12][0-9]|0[1-9])'
month = '(1[012]|0[1-9])'
year = '(\d\d\d\d)'
date_re = r'^%s/%s/%s(?!\n)$' % (day, month, year)
if re.compile(date_re).search(value):
return (date.__doc__, value, True)
else:
return (date.__doc__, value, False)
def files(value):
# Strip each line of leading/trainling whitespace and remove blank lines.
# Lowercase each filename.
value = '\n'.join(filter(lambda line: line != '', map(lambda line: line.strip().lower(), value.splitlines())))
return (files.__doc__, value, True)
|
Taiiwo/gittorrent
|
refs/heads/master
|
test.py
|
1
|
import util
util.get_infohash("9c14ecc29ea476350cd58668dab080fa20b48b1e", "master")
|
DarioGT/OMS-PluginXML
|
refs/heads/master
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/email/base64MIME.py
|
1
|
# Copyright (C) 2002 Python Software Foundation
# Author: che@debian.org (Ben Gertzfield)
"""Base64 content transfer encoding per RFCs 2045-2047.
This module handles the content transfer encoding method defined in RFC 2045
to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
characters encoding known as Base64.
It is used in the MIME standards for email to attach images, audio, and text
using some 8-bit character sets to messages.
This module provides an interface to encode and decode both headers and bodies
with Base64 encoding.
RFC 2045 defines a method for including character set information in an
`encoded-word' in a header. This method is commonly used for 8-bit real names
in To:, From:, Cc:, etc. fields, as well as Subject: lines.
This module does not do the line wrapping or end-of-line character conversion
necessary for proper internationalized headers; it only does dumb encoding and
decoding. To deal with the various line wrapping issues, use the email.Header
module.
"""
import re
from binascii import b2a_base64, a2b_base64
from email.Utils import fix_eols
try:
from email._compat22 import _floordiv
except SyntaxError:
# Python 2.1 spells integer division differently
from email._compat21 import _floordiv
CRLF = '\r\n'
NL = '\n'
EMPTYSTRING = ''
# See also Charset.py
MISC_LEN = 7
try:
True, False
except NameError:
True = 1
False = 0
# Helpers
def base64_len(s):
"""Return the length of s when it is encoded with base64."""
groups_of_3, leftover = divmod(len(s), 3)
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
# Thanks, Tim!
n = groups_of_3 * 4
if leftover:
n += 4
return n
def header_encode(header, charset='iso-8859-1', keep_eols=False,
maxlinelen=76, eol=NL):
"""Encode a single header line with Base64 encoding in a given charset.
Defined in RFC 2045, this Base64 encoding is identical to normal Base64
encoding, except that each line must be intelligently wrapped (respecting
the Base64 encoding), and subsequent lines must start with a space.
charset names the character set to use to encode the header. It defaults
to iso-8859-1.
End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
to the canonical email line separator \\r\\n unless the keep_eols
parameter is True (the default is False).
Each line of the header will be terminated in the value of eol, which
defaults to "\\n". Set this to "\\r\\n" if you are using the result of
this function directly in email.
The resulting string will be in the form:
"=?charset?b?WW/5ciBtYXp66XLrIHf8eiBhIGhhbXBzdGHuciBBIFlv+XIgbWF6euly?=\\n
=?charset?b?6yB3/HogYSBoYW1wc3Rh7nIgQkMgWW/5ciBtYXp66XLrIHf8eiBhIGhh?="
with each line wrapped at, at most, maxlinelen characters (defaults to 76
characters).
"""
# Return empty headers unchanged
if not header:
return header
if not keep_eols:
header = fix_eols(header)
# Base64 encode each line, in encoded chunks no greater than maxlinelen in
# length, after the RFC chrome is added in.
base64ed = []
max_encoded = maxlinelen - len(charset) - MISC_LEN
max_unencoded = _floordiv(max_encoded * 3, 4)
for i in range(0, len(header), max_unencoded):
base64ed.append(b2a_base64(header[i:i+max_unencoded]))
# Now add the RFC chrome to each encoded chunk
lines = []
for line in base64ed:
# Ignore the last character of each line if it is a newline
if line.endswith(NL):
line = line[:-1]
# Add the chrome
lines.append('=?%s?b?%s?=' % (charset, line))
# Glue the lines together and return it. BAW: should we be able to
# specify the leading whitespace in the joiner?
joiner = eol + ' '
return joiner.join(lines)
def encode(s, binary=True, maxlinelen=76, eol=NL):
"""Encode a string with base64.
Each line will be wrapped at, at most, maxlinelen characters (defaults to
76 characters).
If binary is False, end-of-line characters will be converted to the
canonical email end-of-line sequence \\r\\n. Otherwise they will be left
verbatim (this is the default).
Each line of encoded text will end with eol, which defaults to "\\n". Set
this to "\r\n" if you will be using the result of this function directly
in an email.
"""
if not s:
return s
if not binary:
s = fix_eols(s)
encvec = []
max_unencoded = _floordiv(maxlinelen * 3, 4)
for i in range(0, len(s), max_unencoded):
# BAW: should encode() inherit b2a_base64()'s dubious behavior in
# adding a newline to the encoded string?
enc = b2a_base64(s[i:i + max_unencoded])
if enc.endswith(NL) and eol <> NL:
enc = enc[:-1] + eol
encvec.append(enc)
return EMPTYSTRING.join(encvec)
# For convenience and backwards compatibility w/ standard base64 module
body_encode = encode
encodestring = encode
def decode(s, convert_eols=None):
"""Decode a raw base64 string.
If convert_eols is set to a string value, all canonical email linefeeds,
e.g. "\\r\\n", in the decoded text will be converted to the value of
convert_eols. os.linesep is a good choice for convert_eols if you are
decoding a text attachment.
This function does not parse a full MIME header value encoded with
base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high
level email.Header class for that functionality.
"""
if not s:
return s
dec = a2b_base64(s)
if convert_eols:
return dec.replace(CRLF, convert_eols)
return dec
# For convenience and backwards compatibility w/ standard base64 module
body_decode = decode
decodestring = decode
|
google/llvm-propeller
|
refs/heads/bb-clusters
|
lldb/test/API/get_darwin_real_python.py
|
8
|
# On macOS, system python binaries like /usr/bin/python and $(xcrun -f python3)
# are shims. They do some light validation work and then spawn the "real" python
# binary. Find the "real" python by asking dyld -- sys.executable reports the
# wrong thing more often than not. This is also useful when we're running under
# a Homebrew python3 binary, which also appears to be some kind of shim.
def getDarwinRealPythonExecutable():
import ctypes
dyld = ctypes.cdll.LoadLibrary('/usr/lib/system/libdyld.dylib')
namelen = ctypes.c_ulong(1024)
name = ctypes.create_string_buffer(b'\000', namelen.value)
dyld._NSGetExecutablePath(ctypes.byref(name), ctypes.byref(namelen))
return name.value.decode('utf-8').strip()
print(getDarwinRealPythonExecutable())
|
pdellaert/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_gallery_info.py
|
11
|
#!/usr/bin/python
#
# Copyright (c) 2019 Liu Qingyi, (@smile37773)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_gallery_info
version_added: '2.9'
short_description: Get Azure Shared Image Gallery info.
description:
- Get info of Azure Shared Image Gallery.
options:
resource_group:
description:
- The name of the resource group.
type: str
name:
description:
- Resource name
type: str
extends_documentation_fragment:
- azure
author:
- Liu Qingyi (@smile37773)
'''
EXAMPLES = '''
- name: List galleries in a subscription.
azure_rm_gallery_info:
- name: List galleries in a resource group.
azure_rm_gallery_info:
resource_group: myResourceGroup
- name: Get a gallery.
azure_rm_gallery_info:
resource_group: myResourceGroup
name: myGallery
'''
RETURN = '''
galleries:
description: >-
A list of dict results where the key is the name of the gallery and the
values are the info for that gallery.
returned: always
type: complex
contains:
id:
description:
- Resource Id
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGallery"
name:
description:
- Resource name
returned: always
type: str
sample: "myGallery"
location:
description:
- Resource location
returned: always
type: str
sample: "eastus"
tags:
description:
- Resource tags
returned: always
type: dict
sample: { "tag": "value" }
description:
description:
- This is the gallery description.
type: str
sample: "This is the gallery description."
provisioning_state:
description:
- The current state of the gallery.
type: str
sample: "Succeeded"
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# handled in azure_rm_common
pass
class AzureRMGalleriesInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str'
),
name=dict(
type='str'
)
)
self.resource_group = None
self.name = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-03-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMGalleriesInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if (self.resource_group is not None and self.name is not None):
# self.results['galleries'] = self.format_item(self.get())
self.results['galleries'] = self.get()
elif (self.resource_group is not None):
# self.results['galleries'] = self.format_item(self.listbyresourcegroup())
self.results['galleries'] = self.listbyresourcegroup()
else:
# self.results['galleries'] = [self.format_item(self.list())]
self.results['galleries'] = self.list()
return self.results
def get(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.Compute' +
'/galleries' +
'/{{ gallery_name }}')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ gallery_name }}', self.name)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return self.format_item(results)
def listbyresourcegroup(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.Compute' +
'/galleries')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return [self.format_item(x) for x in results['value']] if results['value'] else []
def list(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/providers' +
'/Microsoft.Compute' +
'/galleries')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return [self.format_item(x) for x in results['value']] if results['value'] else []
def format_item(self, item):
d = {
'id': item['id'],
'name': item['name'],
'location': item['location'],
'tags': item.get('tags'),
'description': item['properties']['description'],
'provisioning_state': item['properties']['provisioningState']
}
return d
def main():
AzureRMGalleriesInfo()
if __name__ == '__main__':
main()
|
CalvinHsu1223/LinuxCNC-EtherCAT-HAL-Driver
|
refs/heads/master
|
lib/python/gladevcp/hal_meter.py
|
39
|
# vim: sts=4 sw=4 et
# GladeVcp Widgets
#
# Copyright (c) 2010 Pavel Shramov <shramov@mexmat.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import gtk
import gobject
import cairo
import math
import gtk.glade
from hal_widgets import _HalWidgetBase, hal, hal_pin_changed_signal
MAX_INT = 0x7fffffff
def gdk_color_tuple(c):
if not c:
return 0, 0, 0
return c.red_float, c.green_float, c.blue_float
class HAL_Meter(gtk.DrawingArea, _HalWidgetBase):
__gtype_name__ = 'HAL_Meter'
__gsignals__ = dict([hal_pin_changed_signal])
__gproperties__ = {
'invert' : ( gobject.TYPE_BOOLEAN, 'Inverted', 'Invert min-max direction',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'min' : ( gobject.TYPE_FLOAT, 'Min', 'Minimum value',
-MAX_INT, MAX_INT, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'max' : ( gobject.TYPE_FLOAT, 'Max', 'Maximum value',
-MAX_INT, MAX_INT, 100, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'value' : ( gobject.TYPE_FLOAT, 'Value', 'Current meter value (for glade testing)',
-MAX_INT, MAX_INT, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'majorscale' : ( gobject.TYPE_FLOAT, 'Major scale', 'Major ticks',
-MAX_INT, MAX_INT, 10, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'minorscale' : ( gobject.TYPE_FLOAT, 'Minor scale', 'Minor ticks',
-MAX_INT, MAX_INT, 2, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'z0_color' : ( gtk.gdk.Color.__gtype__, 'Zone 0 color', "Set color for first zone",
gobject.PARAM_READWRITE),
'z1_color' : ( gtk.gdk.Color.__gtype__, 'Zone 1 color', "Set color for second zone",
gobject.PARAM_READWRITE),
'z2_color' : ( gtk.gdk.Color.__gtype__, 'Zone 2 color', "Set color for third zone",
gobject.PARAM_READWRITE),
'z0_border' : ( gobject.TYPE_FLOAT, 'Zone 0 up limit', 'Up limit of zone 0',
-MAX_INT, MAX_INT, MAX_INT, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'z1_border' : ( gobject.TYPE_FLOAT, 'Zone 1 up limit', 'Up limit of zone 1',
-MAX_INT, MAX_INT, MAX_INT, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'bg_color' : ( gtk.gdk.Color.__gtype__, 'Background', "Choose background color",
gobject.PARAM_READWRITE),
'force_size' : ( gobject.TYPE_INT, 'Forced size', 'Force meter size not dependent on widget size. -1 to disable',
-1, MAX_INT, -1, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'text_template' : ( gobject.TYPE_STRING, 'Text template',
'Text template to display. Python formatting may be used for one variable',
"%s", gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT),
'label' : ( gobject.TYPE_STRING, 'Meter label', 'Label to display',
"", gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT),
'sublabel' : ( gobject.TYPE_STRING, 'Meter sub label', 'Sub text to display',
"", gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT),
}
__gproperties = __gproperties__
def __init__(self):
super(HAL_Meter, self).__init__()
self.bg_color = gtk.gdk.Color('white')
self.z0_color = gtk.gdk.Color('green')
self.z1_color = gtk.gdk.Color('yellow')
self.z2_color = gtk.gdk.Color('red')
self.force_radius = None
self.connect("expose-event", self.expose)
def _hal_init(self):
_HalWidgetBase._hal_init(self)
self.hal_pin = self.hal.newpin(self.hal_name, hal.HAL_FLOAT, hal.HAL_IN)
self.hal_pin.connect('value-changed', lambda p: self.set_value(p.value))
self.hal_pin.connect('value-changed', lambda s: self.emit('hal-pin-changed', s))
def expose(self, widget, event):
if self.flags() & gtk.PARENT_SENSITIVE:
alpha = 1
else:
alpha = 0.3
w = self.allocation.width
h = self.allocation.height
r = min(w, h) / 2
fr = self.force_size
if fr > 0: r = min(fr, r)
if r < 20:
r = 40
self.set_size_request(2 * r, 2 * r)
cr = widget.window.cairo_create()
def set_color(c):
return cr.set_source_rgba(c.red_float, c.green_float, c.blue_float, alpha)
cr.set_line_width(2)
set_color(gtk.gdk.Color('black'))
#print w, h, aw, ah, fw, fh
cr.translate(w / 2, h / 2)
cr.arc(0, 0, r, 0, 2*math.pi)
cr.clip_preserve()
cr.stroke()
r -= 1
cr.set_line_width(1)
set_color(self.bg_color)
cr.arc(0, 0, r, 0, 2*math.pi)
cr.stroke_preserve()
cr.fill()
a_delta = math.pi / 6
a_start = 0.5 * math.pi + a_delta
a_size = 2 * math.pi - 2 * a_delta
def angle(v):
size = self.max - self.min
v = max(self.min, v)
v = min(self.max, v)
return a_start + a_size * (v - self.min) / size
set_color(self.z2_color)
self.draw_zone(cr, r, angle(self.z1_border), angle(self.max))
set_color(self.z1_color)
self.draw_zone(cr, r, angle(self.z0_border), angle(self.z1_border))
set_color(self.z0_color)
self.draw_zone(cr, r, angle(self.min), angle(self.z0_border))
set_color(gtk.gdk.Color('black'))
cr.set_font_size(r/10)
v = self.min
while v <= self.max:
if int(v) - v == 0: v = int(v)
self.draw_tick(cr, r, 0.15 * r, angle(v), text=str(v))
v += self.majorscale
v = self.min
while v <= self.max:
self.draw_tick(cr, r, 0.05 * r, angle(v))
v += self.minorscale
self.text_at(cr, self.sublabel, 0, r/5)
cr.set_font_size(r/5)
self.text_at(cr, self.label, 0, -r/5)
set_color(gtk.gdk.Color('red'))
self.draw_arrow(cr, r, angle(self.value))
set_color(gtk.gdk.Color('black'))
self.text_at(cr, self.text_template % self.value, 0, 0.8 * r)
return True
def draw_zone(self, cr, r, start, stop):
cr.arc(0, 0, r, start, stop)
cr.line_to(0.9 * r * math.cos(stop), 0.9 * r * math.sin(stop))
cr.arc_negative(0, 0, 0.9 * r, stop, start)
cr.line_to(r * math.cos(start), r * math.sin(start))
cr.stroke_preserve()
cr.fill()
def draw_tick(self, cr, r, sz, a, text=None):
cr.move_to((r - sz) * math.cos(a), (r - sz) * math.sin(a))
cr.line_to(r * math.cos(a), r * math.sin(a))
cr.stroke()
if not text:
return
self.text_at(cr, text, 0.75 * r * math.cos(a), 0.75 * r * math.sin(a))
def text_at(self, cr, text, x, y, xalign='center', yalign='center'):
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(text)
#print xbearing, ybearing, width, height, xadvance, yadvance
if xalign == 'center':
x = x - width/2
elif xalign == 'right':
x = x - width
if yalign == 'center':
y = y + height/2
elif yalign == 'top':
y = y + height
cr.move_to(x, y)
cr.show_text(text)
def draw_arrow(self, cr, r, a):
cr.rotate(a)
cr.move_to(0, 0)
cr.line_to(-r/10, r/20)
cr.line_to(0.8 * r, 0)
cr.line_to(-r/10, -r/20)
cr.line_to(0, 0)
cr.stroke_preserve()
cr.fill()
cr.rotate(-a)
def set_value(self, v):
self.value = v
self.queue_draw()
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name in self.__gproperties.keys():
return getattr(self, name)
else:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name == 'text_template':
try:
v = value % 0.0
except Exception, e:
print "Invalid format string '%s': %s" % (value, e)
return False
if name in ['bg_color', 'z0_color', 'z1_color', 'z2_color']:
if not value:
return False
if name in self.__gproperties.keys():
setattr(self, name, value)
self.queue_draw()
else:
raise AttributeError('unknown property %s' % property.name)
if name in ['force_size', 'force_size']:
#print "Forcing size request %s" % name
self.set_size_request(self.force_size, self.force_size)
self.queue_draw()
return True
|
mkhutornenko/incubator-aurora
|
refs/heads/master
|
src/main/python/apache/aurora/client/cli/bridge.py
|
1
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
class CommandProcessor(object):
"""A wrapper for anything which can receive a set of command-line parameters and execute
something using them.
This is built assuming that the first command-line parameter is the name of
a command to be executed. For example, if this was being used to build a command-line
tool named "tool", then a typical invocation from the command-line would look like
"tool cmd arg1 arg2". "cmd" would be the name of the command to execute, and
"arg1" and "arg2" would be the parameters to that command.
"""
@property
def name(self):
"""Get the name of this command processor"""
def execute(self, args):
"""Execute the command-line tool wrapped by this processor.
:param args: a list of the parameters used to invoke the command. Typically,
this will be sys.argv.
"""
pass
def get_commands(self):
"""Get a list of the commands that this processor can handle."""
pass
def show_help(self):
self.execute(["", "help"])
class Bridge(object):
"""Given multiple command line programs, each represented by a "CommandProcessor" object,
refer command invocations to the command line that knows how to process them.
"""
def __init__(self, command_processors, default=None):
"""
:param command_processors: a list of command-processors.
:param default: the default command processor. any command which is not
reported by "get_commands" as part of any of the registered processors
will be passed to the default.
"""
self.command_processors = command_processors
self.default = default
def show_help(self, args):
"""Dispatch a help request to the appropriate sub-command"""
if len(args) == 2: # command was just "help":
print("This is a merged command line, consisting of %s" %
[cp.name for cp in self.command_processors])
for cp in self.command_processors:
print("========== help for %s ==========" % cp.name)
cp.show_help()
return 0
elif len(args) >= 3:
discriminator = args[2]
for cp in self.command_processors:
if discriminator in cp.get_commands():
return cp.execute(args)
if self.default is not None:
return self.default.execute(args)
def execute(self, args):
"""Dispatch a command line to the appropriate CommandProcessor"""
if len(args) == 1:
args.append('help')
for cl in self.command_processors:
if args[1] == 'help' or args[1] == '--help':
self.show_help(args)
return 0
if args[1] in cl.get_commands():
return cl.execute(args)
if self.default is not None:
return self.default.execute(args)
else:
print('Unknown command: %s' % args[1])
sys.exit(1)
|
kmoocdev2/edx-platform
|
refs/heads/real_2019
|
cms/djangoapps/api/urls.py
|
20
|
from django.conf.urls import include, url
urlpatterns = [
url(r'^v1/', include('cms.djangoapps.api.v1.urls', namespace='v1')),
]
|
ecoal95/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/service-workers/service-worker/resources/trickle.py
|
31
|
import time
def main(request, response):
delay = float(request.GET.first("ms", 500)) / 1E3
count = int(request.GET.first("count", 50))
# Read request body
request.body
time.sleep(delay)
response.headers.set("Content-type", "text/plain")
response.write_status_headers()
time.sleep(delay);
for i in xrange(count):
response.writer.write_content("TEST_TRICKLE\n")
time.sleep(delay)
|
theicfire/djangofun
|
refs/heads/master
|
django/http/multipartparser.py
|
87
|
"""
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import cgi
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_unicode
from django.utils.text import unescape_entities
from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``. If
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handler:
An UploadHandler instance that performs operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should containt multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type)
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
#
# Content-Length should contain the length of the body we are about
# to receive.
#
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH',0)))
except (ValueError, TypeError):
# For now set it to 0; we'll try again later on down.
content_length = 0
if content_length <= 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2**31-4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
limited_input_data = LimitBytes(self._input_data, self._content_length)
# See if the handler will want to take care of the parsing.
# This allows overriding everything if somebody wants it.
for handler in handlers:
result = handler.handle_raw_input(limited_input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(limited_input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
field_name = force_unicode(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = str(raw_data).decode('base64')
except:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_unicode(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_unicode(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type = meta_data.get('content-type', ('',))[0].strip()
content_type_extra = meta_data.get('content-type', (0,{}))[1]
if content_type_extra is None:
content_type_extra = {}
try:
charset = content_type_extra.get('charset', None)
except:
charset = None
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra.copy())
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
try:
chunk = str(chunk).decode('base64')
except Exception, e:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data: %r" % e)
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile, e:
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload, e:
if not e.connection_reset:
exhaust(limited_input_data)
else:
# Make sure that the request data is all fed
exhaust(limited_input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signalling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_unicode(old_field_name,
self._encoding,
errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\")+1:].strip()
class LazyStream(object):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = ''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = (size is not None and [size] or [self._remaining])[0]
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield ''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = self.next()
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = ''.join(parts())
return out
def next(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = ''
else:
output = self._producer.next()
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = ''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousOperation(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(object):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def next(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class LimitBytes(object):
""" Limit bytes for a file object. """
def __init__(self, fileobject, length):
self._file = fileobject
self.remaining = length
def read(self, num_bytes=None):
"""
Read data from the underlying file.
If you ask for too much or there isn't anything left,
this will raise an InputStreamExhausted error.
"""
if self.remaining <= 0:
raise InputStreamExhausted()
if num_bytes is None:
num_bytes = self.remaining
else:
num_bytes = min(num_bytes, self.remaining)
self.remaining -= num_bytes
return self._file.read(num_bytes)
class InterBoundaryIter(object):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def next(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(object):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to .next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
try:
from mx.TextTools import FS
self._fs = FS(boundary).find
except ImportError:
self._fs = lambda data: data.find(boundary)
def __iter__(self):
return self
def next(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = ''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we dont treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]:# and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof = False):
"""
Finds a multipart boundary in data.
Should no boundry exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = self._fs(data)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
if data[max(0,end-1)] == '\n':
end -= 1
if data[max(0,end-1)] == '\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find('\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split('\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = '--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value. """
plist = _parse_header_params(';' + line)
key = plist.pop(0).lower()
pdict = {}
for p in plist:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
|
leibowitz/perfmonitor
|
refs/heads/master
|
bin/tasks.py
|
1
|
from celery import Celery
from subprocess import check_output, CalledProcessError
import os
import json
from pymongo import MongoClient
NETSNIFF_UTIL = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tools', 'netsniff.js')
celery = Celery('tasks')
celery.config_from_object('celeryconfig')
@celery.task
def processtest(content):
try:
harcontent = check_output(['phantomjs', NETSNIFF_UTIL, content['url'], content['agent']])
except CalledProcessError:
print ' [x] Sub-process failed'
return False
try:
jscontent = json.loads(harcontent)
except:
print ' [x] Unable to parse JSON output'
return False
jscontent['site'] = content['site']
jscontent['agent'] = content['agent']
dbcon = MongoClient()
try:
dbcon.perfmonitor.har.insert(jscontent)
print ' [x] HAR response saved'
content['nb'] -= 1
if content['nb'] > 0:
print ' [x] More tests to do, sending back msg to queue'
processtest.delay(content)
return True
except:
print ' [x] Unable to save HAR response, sending back'
return False
@celery.task
def processcron(minutes):
print 'Running cron of tasks for every %d minutes' % (minutes)
dbcon = MongoClient()
rows = dbcon.perfmonitor.sites.aggregate([
{
'$match': {'interval': minutes}
},
{'$unwind': "$urls"}
])
if not rows['result']:
print 'No tasks found to run every %d minutes' % (minutes)
return False
for row in rows['result']:
msg = {
'url': str(row['urls']),
'site': str(row['site']),
'account': 'me',
'type': 'har',
'nb': int(row['nb']),
'agent': str(row['agent'])
}
processtest.delay(msg)
print 'Done running tasks for every %d minutes' % (minutes)
return True
|
fluxw42/youtube-dl
|
refs/heads/master
|
youtube_dl/postprocessor/ffmpeg.py
|
6
|
from __future__ import unicode_literals
import io
import os
import subprocess
import time
from .common import AudioConversionError, PostProcessor
from ..compat import (
compat_subprocess_get_DEVNULL,
)
from ..utils import (
encodeArgument,
encodeFilename,
get_exe_version,
is_outdated_version,
PostProcessingError,
prepend_extension,
shell_quote,
subtitles_filename,
dfxp2srt,
ISO639Utils,
)
EXT_TO_OUT_FORMATS = {
'aac': 'adts',
'flac': 'flac',
'm4a': 'ipod',
'mka': 'matroska',
'mkv': 'matroska',
'mpg': 'mpeg',
'ogv': 'ogg',
'ts': 'mpegts',
'wma': 'asf',
'wmv': 'asf',
}
ACODECS = {
'mp3': 'libmp3lame',
'aac': 'aac',
'flac': 'flac',
'm4a': 'aac',
'opus': 'opus',
'vorbis': 'libvorbis',
'wav': None,
}
class FFmpegPostProcessorError(PostProcessingError):
pass
class FFmpegPostProcessor(PostProcessor):
def __init__(self, downloader=None):
PostProcessor.__init__(self, downloader)
self._determine_executables()
def check_version(self):
if not self.available:
raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.')
required_version = '10-0' if self.basename == 'avconv' else '1.0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
self.basename, self.basename, required_version)
if self._downloader:
self._downloader.report_warning(warning)
@staticmethod
def get_versions(downloader=None):
return FFmpegPostProcessor(downloader)._versions
def _determine_executables(self):
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
prefer_ffmpeg = False
self.basename = None
self.probe_basename = None
self._paths = None
self._versions = None
if self._downloader:
prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False)
location = self._downloader.params.get('ffmpeg_location')
if location is not None:
if not os.path.exists(location):
self._downloader.report_warning(
'ffmpeg-location %s does not exist! '
'Continuing without avconv/ffmpeg.' % (location))
self._versions = {}
return
elif not os.path.isdir(location):
basename = os.path.splitext(os.path.basename(location))[0]
if basename not in programs:
self._downloader.report_warning(
'Cannot identify executable %s, its basename should be one of %s. '
'Continuing without avconv/ffmpeg.' %
(location, ', '.join(programs)))
self._versions = {}
return None
location = os.path.dirname(os.path.abspath(location))
if basename in ('ffmpeg', 'ffprobe'):
prefer_ffmpeg = True
self._paths = dict(
(p, os.path.join(location, p)) for p in programs)
self._versions = dict(
(p, get_exe_version(self._paths[p], args=['-version']))
for p in programs)
if self._versions is None:
self._versions = dict(
(p, get_exe_version(p, args=['-version'])) for p in programs)
self._paths = dict((p, p) for p in programs)
if prefer_ffmpeg:
prefs = ('ffmpeg', 'avconv')
else:
prefs = ('avconv', 'ffmpeg')
for p in prefs:
if self._versions[p]:
self.basename = p
break
if prefer_ffmpeg:
prefs = ('ffprobe', 'avprobe')
else:
prefs = ('avprobe', 'ffprobe')
for p in prefs:
if self._versions[p]:
self.probe_basename = p
break
@property
def available(self):
return self.basename is not None
@property
def executable(self):
return self._paths[self.basename]
@property
def probe_available(self):
return self.probe_basename is not None
@property
def probe_executable(self):
return self._paths[self.probe_basename]
def get_audio_codec(self, path):
if not self.probe_available:
raise PostProcessingError('ffprobe or avprobe not found. Please install one.')
try:
cmd = [
encodeFilename(self.probe_executable, True),
encodeArgument('-show_streams'),
encodeFilename(self._ffmpeg_filename_argument(path), True)]
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] %s command line: %s' % (self.basename, shell_quote(cmd)))
handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE, stdin=subprocess.PIPE)
output = handle.communicate()[0]
if handle.wait() != 0:
return None
except (IOError, OSError):
return None
audio_codec = None
for line in output.decode('ascii', 'ignore').split('\n'):
if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec
return None
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
self.check_version()
oldest_mtime = min(
os.stat(encodeFilename(path)).st_mtime for path in input_paths)
opts += self._configuration_args()
files_cmd = []
for path in input_paths:
files_cmd.extend([
encodeArgument('-i'),
encodeFilename(self._ffmpeg_filename_argument(path), True)
])
cmd = ([encodeFilename(self.executable, True), encodeArgument('-y')] +
files_cmd +
[encodeArgument(o) for o in opts] +
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode('utf-8', 'replace')
msg = stderr.strip().split('\n')[-1]
raise FFmpegPostProcessorError(msg)
self.try_utime(out_path, oldest_mtime, oldest_mtime)
def run_ffmpeg(self, path, out_path, opts):
self.run_ffmpeg_multiple_files([path], out_path, opts)
def _ffmpeg_filename_argument(self, fn):
# Always use 'file:' because the filename may contain ':' (ffmpeg
# interprets that as a protocol) or can start with '-' (-- is broken in
# ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details)
# Also leave '-' intact in order not to break streaming to stdout.
return 'file:' + fn if fn != '-' else fn
class FFmpegExtractAudioPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
FFmpegPostProcessor.__init__(self, downloader)
if preferredcodec is None:
preferredcodec = 'best'
self._preferredcodec = preferredcodec
self._preferredquality = preferredquality
self._nopostoverwrites = nopostoverwrites
def run_ffmpeg(self, path, out_path, codec, more_opts):
if codec is None:
acodec_opts = []
else:
acodec_opts = ['-acodec', codec]
opts = ['-vn'] + acodec_opts + more_opts
try:
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
except FFmpegPostProcessorError as err:
raise AudioConversionError(err.msg)
def run(self, information):
path = information['filepath']
filecodec = self.get_audio_codec(path)
if filecodec is None:
raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
# Lossless, but in another container
acodec = 'copy'
extension = 'm4a'
more_opts = ['-bsf:a', 'aac_adtstoasc']
elif filecodec in ['aac', 'flac', 'mp3', 'vorbis', 'opus']:
# Lossless if possible
acodec = 'copy'
extension = filecodec
if filecodec == 'aac':
more_opts = ['-f', 'adts']
if filecodec == 'vorbis':
extension = 'ogg'
else:
# MP3 otherwise.
acodec = 'libmp3lame'
extension = 'mp3'
more_opts = []
if self._preferredquality is not None:
if int(self._preferredquality) < 10:
more_opts += ['-q:a', self._preferredquality]
else:
more_opts += ['-b:a', self._preferredquality + 'k']
else:
# We convert the audio (lossy if codec is lossy)
acodec = ACODECS[self._preferredcodec]
extension = self._preferredcodec
more_opts = []
if self._preferredquality is not None:
# The opus codec doesn't support the -aq option
if int(self._preferredquality) < 10 and extension != 'opus':
more_opts += ['-q:a', self._preferredquality]
else:
more_opts += ['-b:a', self._preferredquality + 'k']
if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts']
if self._preferredcodec == 'm4a':
more_opts += ['-bsf:a', 'aac_adtstoasc']
if self._preferredcodec == 'vorbis':
extension = 'ogg'
if self._preferredcodec == 'wav':
extension = 'wav'
more_opts += ['-f', 'wav']
prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups
new_path = prefix + sep + extension
information['filepath'] = new_path
information['ext'] = extension
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
if (new_path == path or
(self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))):
self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path)
return [], information
try:
self._downloader.to_screen('[ffmpeg] Destination: ' + new_path)
self.run_ffmpeg(path, new_path, acodec, more_opts)
except AudioConversionError as e:
raise PostProcessingError(
'audio conversion failed: ' + e.msg)
except Exception:
raise PostProcessingError('error running ' + self.basename)
# Try to update the date time for extracted audio file.
if information.get('filetime') is not None:
self.try_utime(
new_path, time.time(), information['filetime'],
errnote='Cannot update utime of audio file')
return [path], information
class FFmpegVideoConvertorPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferedformat=None):
super(FFmpegVideoConvertorPP, self).__init__(downloader)
self._preferedformat = preferedformat
def run(self, information):
path = information['filepath']
if information['ext'] == self._preferedformat:
self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
return [], information
options = []
if self._preferedformat == 'avi':
options.extend(['-c:v', 'libxvid', '-vtag', 'XVID'])
prefix, sep, ext = path.rpartition('.')
outpath = prefix + sep + self._preferedformat
self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
self.run_ffmpeg(path, outpath, options)
information['filepath'] = outpath
information['format'] = self._preferedformat
information['ext'] = self._preferedformat
return [path], information
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
def run(self, information):
if information['ext'] not in ('mp4', 'webm', 'mkv'):
self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4, webm or mkv files')
return [], information
subtitles = information.get('requested_subtitles')
if not subtitles:
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed')
return [], information
filename = information['filepath']
ext = information['ext']
sub_langs = []
sub_filenames = []
webm_vtt_warn = False
for lang, sub_info in subtitles.items():
sub_ext = sub_info['ext']
if ext != 'webm' or ext == 'webm' and sub_ext == 'vtt':
sub_langs.append(lang)
sub_filenames.append(subtitles_filename(filename, lang, sub_ext))
else:
if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt':
webm_vtt_warn = True
self._downloader.to_screen('[ffmpeg] Only WebVTT subtitles can be embedded in webm files')
if not sub_langs:
return [], information
input_files = [filename] + sub_filenames
opts = [
'-map', '0',
'-c', 'copy',
# Don't copy the existing subtitles, we may be running the
# postprocessor a second time
'-map', '-0:s',
]
if information['ext'] == 'mp4':
opts += ['-c:s', 'mov_text']
for (i, lang) in enumerate(sub_langs):
opts.extend(['-map', '%d:0' % (i + 1)])
lang_code = ISO639Utils.short2long(lang)
if lang_code is not None:
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
temp_filename = prepend_extension(filename, 'temp')
self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename)
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return sub_filenames, information
class FFmpegMetadataPP(FFmpegPostProcessor):
def run(self, info):
metadata = {}
def add(meta_list, info_list=None):
if not info_list:
info_list = meta_list
if not isinstance(meta_list, (list, tuple)):
meta_list = (meta_list,)
if not isinstance(info_list, (list, tuple)):
info_list = (info_list,)
for info_f in info_list:
if info.get(info_f) is not None:
for meta_f in meta_list:
metadata[meta_f] = info[info_f]
break
add('title', ('track', 'title'))
add('date', 'upload_date')
add(('description', 'comment'), 'description')
add('purl', 'webpage_url')
add('track', 'track_number')
add('artist', ('artist', 'creator', 'uploader', 'uploader_id'))
add('genre')
add('album')
add('album_artist')
add('disc', 'disc_number')
if not metadata:
self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add')
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
if info['ext'] == 'm4a':
options = ['-vn', '-acodec', 'copy']
else:
options = ['-c', 'copy']
for (name, value) in metadata.items():
options.extend(['-metadata', '%s=%s' % (name, value)])
self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegMergerPP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0']
self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename)
self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args)
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return info['__files_to_merge'], info
def can_merge(self):
# TODO: figure out merge-capable ffmpeg version
if self.basename != 'avconv':
return True
required_version = '10-0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, '
'youtube-dl will download single file media. '
'Update %s to version %s or newer to fix this.') % (
self.basename, self.basename, required_version)
if self._downloader:
self._downloader.report_warning(warning)
return False
return True
class FFmpegFixupStretchedPP(FFmpegPostProcessor):
def run(self, info):
stretched_ratio = info.get('stretched_ratio')
if stretched_ratio is None or stretched_ratio == 1:
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-aspect', '%f' % stretched_ratio]
self._downloader.to_screen('[ffmpeg] Fixing aspect ratio in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegFixupM4aPP(FFmpegPostProcessor):
def run(self, info):
if info.get('container') != 'm4a_dash':
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-f', 'mp4']
self._downloader.to_screen('[ffmpeg] Correcting container in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegFixupM3u8PP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
if self.get_audio_codec(filename) == 'aac':
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
self._downloader.to_screen('[ffmpeg] Fixing malformated aac bitstream in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
def __init__(self, downloader=None, format=None):
super(FFmpegSubtitlesConvertorPP, self).__init__(downloader)
self.format = format
def run(self, info):
subs = info.get('requested_subtitles')
filename = info['filepath']
new_ext = self.format
new_format = new_ext
if new_format == 'vtt':
new_format = 'webvtt'
if subs is None:
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to convert')
return [], info
self._downloader.to_screen('[ffmpeg] Converting subtitles')
sub_filenames = []
for lang, sub in subs.items():
ext = sub['ext']
if ext == new_ext:
self._downloader.to_screen(
'[ffmpeg] Subtitle file for %s is already in the requested format' % new_ext)
continue
old_file = subtitles_filename(filename, lang, ext)
sub_filenames.append(old_file)
new_file = subtitles_filename(filename, lang, new_ext)
if ext in ('dfxp', 'ttml', 'tt'):
self._downloader.report_warning(
'You have requested to convert dfxp (TTML) subtitles into another format, '
'which results in style information loss')
dfxp_file = old_file
srt_file = subtitles_filename(filename, lang, 'srt')
with io.open(dfxp_file, 'rt', encoding='utf-8') as f:
srt_data = dfxp2srt(f.read())
with io.open(srt_file, 'wt', encoding='utf-8') as f:
f.write(srt_data)
old_file = srt_file
subs[lang] = {
'ext': 'srt',
'data': srt_data
}
if new_ext == 'srt':
continue
else:
sub_filenames.append(srt_file)
self.run_ffmpeg(old_file, new_file, ['-f', new_format])
with io.open(new_file, 'rt', encoding='utf-8') as f:
subs[lang] = {
'ext': new_ext,
'data': f.read(),
}
return sub_filenames, info
|
nmrao/robotframework
|
refs/heads/master
|
src/robot/utils/frange.py
|
18
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .robottypes import is_integer, is_string
def frange(*args):
"""Like ``range()`` but accepts float arguments."""
if all(is_integer(arg) for arg in args):
return range(*args)
start, stop, step = _get_start_stop_step(args)
digits = max(_digits(start), _digits(stop), _digits(step))
factor = pow(10, digits)
return [x/float(factor) for x in range(int(round(start*factor)),
int(round(stop*factor)),
int(round(step*factor)))]
def _get_start_stop_step(args):
if len(args) == 1:
return 0, args[0], 1
if len(args) == 2:
return args[0], args[1], 1
if len(args) == 3:
return args
raise TypeError('frange expected 1-3 arguments, got %d.' % len(args))
def _digits(number):
if not is_string(number):
number = repr(number)
if 'e' in number:
return _digits_with_exponent(number)
if '.' in number:
return _digits_with_fractional(number)
return 0
def _digits_with_exponent(number):
mantissa, exponent = number.split('e')
mantissa_digits = _digits(mantissa)
exponent_digits = int(exponent) * -1
return max(mantissa_digits + exponent_digits, 0)
def _digits_with_fractional(number):
fractional = number.split('.')[1]
if fractional == '0':
return 0
return len(fractional)
|
stableShip/python_learn
|
refs/heads/master
|
fileTest.py
|
1
|
# coding=utf-8
# 读取文件方式一
a_file = open("./fileTest.py")
text = a_file.read()
# 显示调用close
a_file.close()
print ("文件类型: %s, 第一行内容: %s" % (type(text), text[0:15]))
# 读取文件方式二
with open('./fileTest.py', 'r') as f:
print("第一行内容: %s" % f.readline())
# 写文件
with open('./test.txt', 'w') as f:
f.write('Hello, world!')
|
shurihell/testasia
|
refs/heads/test1
|
common/djangoapps/auth_exchange/tests/test_forms.py
|
113
|
# pylint: disable=no-member
"""
Tests for OAuth token exchange forms
"""
import unittest
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import TestCase
from django.test.client import RequestFactory
import httpretty
from provider import scope
import social.apps.django_app.utils as social_utils
from auth_exchange.forms import AccessTokenExchangeForm
from auth_exchange.tests.utils import AccessTokenExchangeTestMixin
from third_party_auth.tests.utils import ThirdPartyOAuthTestMixinFacebook, ThirdPartyOAuthTestMixinGoogle
class AccessTokenExchangeFormTest(AccessTokenExchangeTestMixin):
"""
Mixin that defines test cases for AccessTokenExchangeForm
"""
def setUp(self):
super(AccessTokenExchangeFormTest, self).setUp()
self.request = RequestFactory().post("dummy_url")
redirect_uri = 'dummy_redirect_url'
SessionMiddleware().process_request(self.request)
self.request.social_strategy = social_utils.load_strategy(self.request)
# pylint: disable=no-member
self.request.backend = social_utils.load_backend(self.request.social_strategy, self.BACKEND, redirect_uri)
def _assert_error(self, data, expected_error, expected_error_description):
form = AccessTokenExchangeForm(request=self.request, data=data)
self.assertEqual(
form.errors,
{"error": expected_error, "error_description": expected_error_description}
)
self.assertNotIn("partial_pipeline", self.request.session)
def _assert_success(self, data, expected_scopes):
form = AccessTokenExchangeForm(request=self.request, data=data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data["user"], self.user)
self.assertEqual(form.cleaned_data["client"], self.oauth_client)
self.assertEqual(scope.to_names(form.cleaned_data["scope"]), expected_scopes)
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
@httpretty.activate
class AccessTokenExchangeFormTestFacebook(
AccessTokenExchangeFormTest,
ThirdPartyOAuthTestMixinFacebook,
TestCase
):
"""
Tests for AccessTokenExchangeForm used with Facebook
"""
pass
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
@httpretty.activate
class AccessTokenExchangeFormTestGoogle(
AccessTokenExchangeFormTest,
ThirdPartyOAuthTestMixinGoogle,
TestCase
):
"""
Tests for AccessTokenExchangeForm used with Google
"""
pass
|
scragg0x/FFXI-Scraper
|
refs/heads/master
|
ffxiscraper/scrapemark.py
|
1
|
import re
import unicodedata
import urllib, urllib2
import urlparse
import cgi
import cookielib
from htmlentitydefs import name2codepoint
verbose = False
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.8.1.3) Gecko/20070309 Firefox/2.0.0.3'
# todo: throw invalid arguments error if neither html nor url ar given
# todo: better support for comment stuff (js in javascript)?
def scrape(pattern, html=None, url=None, get=None, post=None, headers=None, cookie_jar=None):
"""
*Pattern* is either a string or a :class:`ScrapeMarkPattern` object and is applied
to *html*. If *html* is not present, *url* is used to fetch the html, along with
the optional parameters *get*, *post*, *header*, and *cookie_jar*. If specified,
*get*, *post*, and *header* must be dictionary-like objects. If specified,
*cookie_jar* must be an instance of :class:`cookielib.CookieJar`.
If a match is found, this function returns a dictionary, list, string, int, float or
bool, depending upon *pattern*. See the notes on :ref:`PatternSyntax` for more
information. If no match is found, ``None`` is returned.
To effectively simulate a browser request when fetching the html at *url*, if
``headers['User-Agent']`` is not specified, :data:`scrapemark.user_agent` is used
instead. Also, if *cookie_jar* is not specified, an empty :class:`cookielib.CookieJar`
is instantiated and used in the http transaction.
"""
if type(pattern) == str:
pattern = compile(pattern)
return pattern.scrape(html, url, get, post, headers, cookie_jar)
def compile(pattern):
"""
Compiles a pattern into a :class:`ScrapeMarkPattern` object.
Using this object is optimal if you want to apply a single pattern multiple
times.
"""
return _Pattern(_compile(pattern, True))
def fetch_html(url, get=None, post=None, headers=None, cookie_jar=None):
"""
Fetches and returns the html at the given *url*, optionally using *get*, *post*,
*header*, and *cookie_jar*. No scraping occurs. This function is used internally
by :func:`scrapemark.scrape`. For the behavior of ``headers['User-Agent']`` and *cookie_jar*, read
the :func:`scrapemark.scrape` documentation.
"""
if get:
if type(get) == str:
get = cgi.parse_qs(get)
l = list(urlparse.urlparse(url))
g = cgi.parse_qs(l[4])
g.update(get)
l[4] = urllib.urlencode(g)
url = urlparse.urlunparse(l)
if post and type(post) != str:
post = urllib.urlencode(post)
if cookie_jar == None:
cookie_jar = cookielib.CookieJar()
if not headers:
headers = {'User-Agent': user_agent}
else:
if 'User-Agent' not in headers:
headers['User-Agent'] = user_agent
if verbose:
print 'fetching', url, '...'
request = urllib2.Request(url, post, headers)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar))
res = opener.open(request).read()
if verbose:
print 'DONE fetching.'
return res
# INTERNALS
# ----------------------------------------------------------------------
class _Pattern:
def __init__(self, nodes):
self._nodes = nodes
def scrape(self, html=None, url=None, get=None, post=None, headers=None, cookie_jar=None):
if cookie_jar == None:
cookie_jar = cookielib.CookieJar()
if html == None:
html = fetch_html(url, get, post, headers, cookie_jar)
captures = {}
if _match(self._nodes, _remove_comments(html), 0, captures, url, cookie_jar) == -1:
return None
if len(captures) == 1 and '' in captures:
return captures['']
return captures
# node types # information in tuple
_TEXT = 1 # (_TEXT, regex)
_TAG = 2 # (_TAG, open_regex, close_regex, skip, attributes, children) attributes {name: (regex, [[special_nodes]]) ...}
_CAPTURE = 3 # (_CAPTURE, name_parts, filters)
_SCAN = 4 # (_SCAN, children)
_GOTO = 5 # (_GOTO, filters, children)
_space_re = re.compile(r'\s+')
_tag_re = re.compile(r'<[^>]*>')
_tag_skip_re = re.compile(r'\((.*)\)$')
_attr_re = re.compile(r'([\w-]+)(?:\s*=\s*(?:(["\'])(.*?)\2|(\S+)))?', re.S)
_attr_start_re = re.compile(r'([\w-]+)(?:\s*=\s*)?')
_comment_re = re.compile(r'<!--.*?-->', re.S)
_script_re = re.compile(r'<script[^>]*>.*?</script>', re.S | re.I)
_entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8});")
_closure_start_re = re.compile(r'<|\{[\{\*\@\#]')
_capture_list_re = re.compile(r'\[(\w*)\]')
# functions for compiling a pattern into nodes
# --------------------------------------------------------------
def _compile(s, re_compile):
slen = len(s)
i = 0
nodes = []
stack = []
while i < slen:
m = _closure_start_re.search(s, i)
if not m:
break
closure_name = m.group(0)
# text since last closure
text = s[i:m.start()].strip()
if text:
nodes.append((_TEXT, _make_text_re(text, re_compile)))
i = m.end()
# an HTML tag
if closure_name == '<':
inner, i = _next_closure(s, i, '<', '>')
inner = inner.strip()
if inner:
# end tag
if inner[0] == '/':
if stack:
nodes = stack.pop()
# standalone tag
elif inner[-1] == '/':
l = inner[:-1].split(None, 1)
name = l[0].strip()
name, skip = _tag_skip(name)
attrs = {} if len(l) == 1 else _compile_attrs(l[1], re_compile)
nodes.append((_TAG, _make_start_tag_re(name, re_compile), _make_end_tag_re(name, re_compile), skip, attrs, []))
# start tag
else:
l = inner.split(None, 1)
name = l[0].strip()
name, skip = _tag_skip(name)
attrs = {} if len(l) == 1 else _compile_attrs(l[1], re_compile)
new_nodes = []
nodes.append((_TAG, _make_start_tag_re(name, re_compile), _make_end_tag_re(name, re_compile), skip, attrs, new_nodes))
stack.append(nodes)
nodes = new_nodes
# special brackets
else:
special_type = closure_name[1]
# capture
if special_type == '{':
inner, i = _next_closure(s, i, '{{', '}}')
nodes.append(_compile_capture(inner))
# scan
elif special_type == '*':
inner, i = _next_closure(s, i, '{*', '*}')
nodes.append((_SCAN, _compile(inner, re_compile)))
# goto
elif special_type == '@':
inner, i = _next_closure(s, i, '{@', '@}')
if inner:
filters = []
if inner[0] == '|':
filters, inner = (inner.split(None, 1) + [''])[:2]
filters = filters.split('|')[1:]
nodes.append((_GOTO, filters, _compile(inner, True)))
# comment
elif special_type == '#':
i = s.find('#}')
if i == -1:
break
i += 2
# ending text
text = s[i:].strip()
if text:
nodes.append((_TEXT, _make_text_re(text, re_compile)))
stack.append(nodes)
return stack[0]
def _compile_capture(s): # returns the tuple with _CAPTURE
filters = s.strip().split('|')
name = filters.pop(0)
name_parts = []
for part in name.split('.'):
m = _capture_list_re.match(part)
if m:
name_parts.append((m.group(1),))
else:
name_parts.append(part)
return (_CAPTURE, name_parts, filters)
def _compile_attrs(s, re_compile):
attrs = {}
i = 0
slen = len(s)
while i < slen:
m = _attr_start_re.search(s, i)
if not m:
break
name = m.group(1).lower()
i = m.end()
if i >= slen:
break
quote = s[i]
# no quotes, value ends at next whitespace
if quote != '"' and quote != "'":
m = _space_re.search(s, i)
if m:
val = s[i:m.start()]
i = m.end()
else:
val = s[i:]
i = slen
# quotes
else:
i += 1
start = i
# find the ending quote, skipping over { }
while i < slen:
quote_i = s.find(quote, i)
bracket_i = s.find('{', i)
if quote_i == -1:
i = slen
break
elif bracket_i == -1 or quote_i < bracket_i:
i = quote_i
break
else:
_, i = _next_closure(s, bracket_i + 1, '{', '}')
val = s[start:i]
val = val.strip()
regex = ''
special_nodes = []
if val: # if there is no value, empty regex string won't be compiled
nodes = _compile(val, False)
prev_special = False
# concatenate regexes
for node in nodes:
if node[0] == _TEXT:
regex += node[1]
prev_special = False
elif node[0] != _TAG:
if prev_special:
special_nodes[-1].append(node)
else:
regex += '(.*)'
special_nodes.append([node])
prev_special = True
if regex != '(.*)':
regex = '(?:^|\s)' + regex + '(?:\s|$)' # match must be flush with whitespace or start/end
if re_compile:
regex = re.compile(regex, re.I)
attrs[name] = (regex, special_nodes)
return attrs
def _tag_skip(name):
match = _tag_skip_re.search(name)
if match:
try:
val = match.group(1)
return name[:match.start()], -1 if val == 'last' else int(val)
except ValueError:
return name[:match.start()], 0
return name, 0
def _make_start_tag_re(name, re_compile):
regex = r'<\s*' + re.escape(name) + r'(?:\s+([^>]*?)|\s*)(/)?>'
if re_compile:
regex = re.compile(regex, re.I)
return regex
def _make_end_tag_re(name, re_compile):
regex = r'</\s*' + re.escape(name) + r'\s*>'
if re_compile:
regex = re.compile(regex, re.I)
return regex
def _make_text_re(text, re_compile):
regex = r'\s+'.join([re.escape(w) for w in text.split()])
if re_compile:
regex = re.compile(regex, re.I)
return regex
# functions for running pattern nodes on html
# ---------------------------------------------------------------
def _match(nodes, html, i, captures, base_url, cookie_jar): # returns substring index after match, -1 if no match
anchor_i = i
special = []
for node in nodes:
# match text node
if node[0] == _TEXT:
m = node[1].search(html, i)
if not m:
return - 1
# run previous special nodes
if not _run_special_nodes(special, html[anchor_i:m.start()], captures, base_url, cookie_jar):
return - 1
special = []
i = anchor_i = m.end()
# match html tag
elif node[0] == _TAG:
if node[3] < 0:
# backwards from last tag
starts = []
while True:
m = node[1].search(html, i)
if not m:
break
starts.append(m.start())
i = m.end()
if not m.group(2): # not standalone
body, i = _next_tag(html, i, node[1], node[2])
i = starts[max(node[3], -len(starts))] # todo::::::::::::::::should throw -1 if not enough
else:
# skip forward
for _ in range(node[3]):
m = node[1].search(html, i)
if not m:
return - 1
i = m.end()
if not m.group(2): # not standalone
body, i = _next_tag(html, i, node[1], node[2])
while True:
# cycle through tags until all attributes match
while True:
nested_captures = {}
m = node[1].search(html, i)
if not m:
return - 1
i = m.end()
attrs = _parse_attrs(m.group(1) or '')
attrs_matched = _match_attrs(node[4], attrs, nested_captures, base_url, cookie_jar)
if attrs_matched == -1:
return - 1
if attrs_matched:
break
if m.group(2): # standalone tag
_merge_captures(captures, nested_captures)
break
else: # make sure children match
body, i = _next_tag(html, i, node[1], node[2])
if _match(node[5], body, 0, nested_captures, base_url, cookie_jar) != -1:
_merge_captures(captures, nested_captures)
break
# run previous special nodes
if not _run_special_nodes(special, html[anchor_i:m.start()], captures, base_url, cookie_jar):
return - 1
special = []
anchor_i = i
else:
special.append(node)
if not _run_special_nodes(special, html[i:], captures, base_url, cookie_jar):
return - 1
return i
def _match_attrs(attr_nodes, attrs, captures, base_url, cookie_jar): # returns True/False, -1 if failed _run_special_node
for name, attr_node in attr_nodes.items():
if name not in attrs:
return False
if attr_node[0]: # if attr_node[0] is empty string, done matching
m = attr_node[0].match(attrs[name])
if not m:
return False
# run regex captures over parallel list of special nodes
for i, special_nodes in enumerate(attr_node[1]):
for n in special_nodes:
if not _run_special_node(n, m.group(i + 1), captures, base_url, cookie_jar):
return - 1
return True
def _run_special_nodes(nodes, s, captures, base_url, cookie_jar): # returns True/False
for node in nodes:
if not _run_special_node(node, s, captures, base_url, cookie_jar):
return False
return True
def _run_special_node(node, s, captures, base_url, cookie_jar): # returns True/False
if node[0] == _CAPTURE:
s = _apply_filters(s, node[2], base_url)
_set_capture(captures, node[1], s)
elif node[0] == _SCAN:
i = 0
while True:
nested_captures = {}
i = _match(node[1], s, i, nested_captures, base_url, cookie_jar)
if i == -1:
break
else:
_merge_captures(captures, nested_captures)
# scan always ends with an usuccessful match, so fill in captures that weren't set
_fill_captures(node[1], captures)
elif node[0] == _GOTO:
s = s.strip()
if not s:
return False
new_url = _apply_filters(s, node[1] + ['abs'], base_url)
new_html = fetch_html(new_url, cookie_jar=cookie_jar)
if _match(node[2], new_html, 0, captures, new_url, cookie_jar) == -1:
return False
return True
def _set_capture(captures, name_parts, val, list_append=True):
obj = captures
last = len(name_parts) - 1
for i, part in enumerate(name_parts):
if i == last:
new_obj = val
else:
new_obj = {}
if type(part) == tuple:
if part[0] not in obj:
if list_append:
obj[part[0]] = [new_obj]
else:
obj[part[0]] = []
break
else:
if type(obj[part[0]]) != list:
break
if i == last or len(obj[part[0]]) == 0 or name_parts[i + 1] in obj[part[0]][-1]:
if list_append:
obj[part[0]].append(new_obj)
else:
break
else:
new_obj = obj[part[0]][-1]
else:
if part not in obj:
obj[part] = new_obj
else:
new_obj = obj[part]
obj = new_obj
def _merge_captures(master, slave):
for name, val in slave.items():
if name not in master:
master[name] = val
else:
if type(val) == dict and type(master[name]) == dict:
_merge_captures(master[name], val)
elif type(val) == list and type(master[name]) == list:
for e in val:
if type(e) == dict:
for n, v in e.items():
if len(master[name]) == 0 or type(master[name][-1]) != dict or n in master[name][-1]:
master[name].append({n: v})
else:
master[name][-1][n] = v
else:
master[name].append(e)
def _fill_captures(nodes, captures):
for node in nodes:
if node[0] == _TAG:
_fill_captures(node[5], captures)
for attr in node[4].values():
for special_nodes in attr[1]:
_fill_captures(special_nodes, captures)
elif node[0] == _CAPTURE:
_set_capture(captures, node[1], _apply_filters(None, node[2], None), False)
elif node[0] == _SCAN:
_fill_captures(node[1], captures)
elif node[0] == _GOTO:
_fill_captures(node[2], captures)
def _apply_filters(s, filters, base_url):
if 'html' not in filters and issubclass(type(s), basestring):
s = _remove_html(s)
for f in filters:
if f == 'unescape':
if issubclass(type(s), basestring):
s = s.decode('string_escape')
elif f == 'abs':
if issubclass(type(s), basestring):
s = urlparse.urljoin(base_url, s)
elif f == 'int':
try:
s = int(s)
except:
s = 0
elif f == 'float':
try:
s = float(s)
except:
s = 0.0
elif f == 'bool':
s = bool(s)
return s
# html/text utilities
# ---------------------------------------------------------------
def _remove_comments(s):
return _comment_re.sub('', s)
def _remove_html(s):
s = _comment_re.sub('', s)
s = _script_re.sub('', s)
s = _tag_re.sub('', s)
s = _space_re.sub(' ', s)
s = _decode_entities(s)
s = s.strip()
return s
def _decode_entities(s):
if type(s) is not unicode:
s = unicode(s, 'utf-8', 'ignore')
s = unicodedata.normalize('NFKD', s)
return _entity_re.sub(_substitute_entity, s)
def _substitute_entity(m):
ent = m.group(2)
if m.group(1) == "#":
return unichr(int(ent))
else:
cp = name2codepoint.get(ent)
if cp:
return unichr(cp)
else:
return m.group()
def _parse_attrs(s):
attrs = {}
for m in _attr_re.finditer(s):
# attrs[m.group(1)] = m.group(3) or m.group(4)
value = m.group(3)
if value is None:
value = m.group(4)
attrs[m.group(1)] = value
return attrs
def _next_tag(s, i, tag_open_re, tag_close_re, depth=1): # returns (tag body, substring index after tag)
slen = len(s)
start = i
while i < slen:
tag_open = tag_open_re.search(s, i)
tag_close = tag_close_re.search(s, i)
if not tag_close:
i = len(s)
break
elif not tag_open or tag_close.start() < tag_open.start():
i = tag_close.end()
depth -= 1
if depth == 0:
return s[start:tag_close.start()], i
else:
if not (tag_open and tag_open.group(2)): # not a standalone tag
depth += 1
i = tag_open.end()
return s[start:i], i
def _next_closure(s, i, left_str, right_str, depth=1): # returns (closure body, substring index after closure)
slen = len(s)
start = i
while i < slen:
left = s.find(left_str, i)
right = s.find(right_str, i)
if right == -1:
i = len(s)
break
elif left == -1 or right < left:
i = right + len(right_str)
depth -= 1
if depth == 0:
return s[start:right], i
else:
depth += 1
i = left + len(left_str)
return s[start:i], i
|
IndonesiaX/edx-platform
|
refs/heads/master
|
common/djangoapps/status/models.py
|
103
|
"""
Store status messages in the database.
"""
from django.db import models
from django.contrib import admin
from django.core.cache import cache
from xmodule_django.models import CourseKeyField
from config_models.models import ConfigurationModel
from config_models.admin import ConfigurationModelAdmin
class GlobalStatusMessage(ConfigurationModel):
"""
Model that represents the current status message.
"""
message = models.TextField(blank=True, null=True)
def full_message(self, course_key):
""" Returns the full status message, including any course-specific status messages. """
cache_key = "status_message.{course_id}".format(course_id=unicode(course_key))
if cache.get(cache_key):
return cache.get(cache_key)
msg = self.message
if course_key:
try:
course_message = self.coursemessage_set.get(course_key=course_key)
# Don't add the message if course_message is blank.
if course_message:
msg = u"{} <br /> {}".format(msg, course_message.message)
except CourseMessage.DoesNotExist:
# We don't have a course-specific message, so pass.
pass
cache.set(cache_key, msg)
return msg
def __unicode__(self):
return "{} - {} - {}".format(self.change_date, self.enabled, self.message)
class CourseMessage(models.Model):
"""
Model that allows the user to specify messages for individual courses.
This is not a ConfigurationModel because using it's not designed to support multiple configurations at once,
which would be problematic if separate courses need separate error messages.
"""
global_message = models.ForeignKey(GlobalStatusMessage)
course_key = CourseKeyField(max_length=255, blank=True, db_index=True)
message = models.TextField(blank=True, null=True)
def __unicode__(self):
return unicode(self.course_key)
admin.site.register(GlobalStatusMessage, ConfigurationModelAdmin)
admin.site.register(CourseMessage)
|
kennedyshead/home-assistant
|
refs/heads/dev
|
homeassistant/components/hive/binary_sensor.py
|
5
|
"""Support for the Hive binary sensors."""
from datetime import timedelta
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
BinarySensorEntity,
)
from . import HiveEntity
from .const import ATTR_MODE, DOMAIN
DEVICETYPE = {
"contactsensor": DEVICE_CLASS_OPENING,
"motionsensor": DEVICE_CLASS_MOTION,
"Connectivity": DEVICE_CLASS_CONNECTIVITY,
"SMOKE_CO": DEVICE_CLASS_SMOKE,
"DOG_BARK": DEVICE_CLASS_SOUND,
"GLASS_BREAK": DEVICE_CLASS_SOUND,
}
PARALLEL_UPDATES = 0
SCAN_INTERVAL = timedelta(seconds=15)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Hive thermostat based on a config entry."""
hive = hass.data[DOMAIN][entry.entry_id]
devices = hive.session.deviceList.get("binary_sensor")
entities = []
if devices:
for dev in devices:
entities.append(HiveBinarySensorEntity(hive, dev))
async_add_entities(entities, True)
class HiveBinarySensorEntity(HiveEntity, BinarySensorEntity):
"""Representation of a Hive binary sensor."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {
"identifiers": {(DOMAIN, self.device["device_id"])},
"name": self.device["device_name"],
"model": self.device["deviceData"]["model"],
"manufacturer": self.device["deviceData"]["manufacturer"],
"sw_version": self.device["deviceData"]["version"],
"via_device": (DOMAIN, self.device["parentDevice"]),
}
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICETYPE.get(self.device["hiveType"])
@property
def name(self):
"""Return the name of the binary sensor."""
return self.device["haName"]
@property
def available(self):
"""Return if the device is available."""
if self.device["hiveType"] != "Connectivity":
return self.device["deviceData"]["online"]
return True
@property
def extra_state_attributes(self):
"""Show Device Attributes."""
return {
ATTR_MODE: self.attributes.get(ATTR_MODE),
}
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.device["status"]["state"]
async def async_update(self):
"""Update all Node data from Hive."""
await self.hive.session.updateData(self.device)
self.device = await self.hive.sensor.getSensor(self.device)
self.attributes = self.device.get("attributes", {})
|
louiscarrese/lc-formations
|
refs/heads/master
|
node_modules/laravel-elixir/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/tools/graphviz.py
|
2679
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
UPDDI/mps-database-server
|
refs/heads/master
|
drugtrials/migrations/0010.py
|
1
|
# Generated by Django 2.1.11 on 2019-08-12 19:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('drugtrials', '0009'),
]
operations = [
migrations.AlterField(
model_name='drugtrial',
name='locked',
field=models.BooleanField(default=False, help_text='Check the box and save to block automatic migration to *Public Access*, 1-year after sign off. Uncheck and save to enable automatic migration to *Public Access*, 1-year after sign off. While this is checked, automatic approvals for Stakeholders are also prevented.', verbose_name='Keep Private Indefinitely (Locked)'),
),
migrations.AlterField(
model_name='finding',
name='locked',
field=models.BooleanField(default=False, help_text='Check the box and save to block automatic migration to *Public Access*, 1-year after sign off. Uncheck and save to enable automatic migration to *Public Access*, 1-year after sign off. While this is checked, automatic approvals for Stakeholders are also prevented.', verbose_name='Keep Private Indefinitely (Locked)'),
),
migrations.AlterField(
model_name='findingtype',
name='locked',
field=models.BooleanField(default=False, help_text='Check the box and save to block automatic migration to *Public Access*, 1-year after sign off. Uncheck and save to enable automatic migration to *Public Access*, 1-year after sign off. While this is checked, automatic approvals for Stakeholders are also prevented.', verbose_name='Keep Private Indefinitely (Locked)'),
),
migrations.AlterField(
model_name='openfdacompound',
name='locked',
field=models.BooleanField(default=False, help_text='Check the box and save to block automatic migration to *Public Access*, 1-year after sign off. Uncheck and save to enable automatic migration to *Public Access*, 1-year after sign off. While this is checked, automatic approvals for Stakeholders are also prevented.', verbose_name='Keep Private Indefinitely (Locked)'),
),
migrations.AlterField(
model_name='resultdescriptor',
name='locked',
field=models.BooleanField(default=False, help_text='Check the box and save to block automatic migration to *Public Access*, 1-year after sign off. Uncheck and save to enable automatic migration to *Public Access*, 1-year after sign off. While this is checked, automatic approvals for Stakeholders are also prevented.', verbose_name='Keep Private Indefinitely (Locked)'),
),
migrations.AlterField(
model_name='species',
name='locked',
field=models.BooleanField(default=False, help_text='Check the box and save to block automatic migration to *Public Access*, 1-year after sign off. Uncheck and save to enable automatic migration to *Public Access*, 1-year after sign off. While this is checked, automatic approvals for Stakeholders are also prevented.', verbose_name='Keep Private Indefinitely (Locked)'),
),
migrations.AlterField(
model_name='test',
name='locked',
field=models.BooleanField(default=False, help_text='Check the box and save to block automatic migration to *Public Access*, 1-year after sign off. Uncheck and save to enable automatic migration to *Public Access*, 1-year after sign off. While this is checked, automatic approvals for Stakeholders are also prevented.', verbose_name='Keep Private Indefinitely (Locked)'),
),
migrations.AlterField(
model_name='testtype',
name='locked',
field=models.BooleanField(default=False, help_text='Check the box and save to block automatic migration to *Public Access*, 1-year after sign off. Uncheck and save to enable automatic migration to *Public Access*, 1-year after sign off. While this is checked, automatic approvals for Stakeholders are also prevented.', verbose_name='Keep Private Indefinitely (Locked)'),
),
migrations.AlterField(
model_name='trialsource',
name='locked',
field=models.BooleanField(default=False, help_text='Check the box and save to block automatic migration to *Public Access*, 1-year after sign off. Uncheck and save to enable automatic migration to *Public Access*, 1-year after sign off. While this is checked, automatic approvals for Stakeholders are also prevented.', verbose_name='Keep Private Indefinitely (Locked)'),
),
]
|
nemesisdesign/django
|
refs/heads/master
|
tests/view_tests/generic_urls.py
|
28
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.views.generic import RedirectView
from . import views
from .models import Article, DateArticle
date_based_info_dict = {
'queryset': Article.objects.all(),
'date_field': 'date_created',
'month_format': '%m',
}
object_list_dict = {
'queryset': Article.objects.all(),
'paginate_by': 2,
}
object_list_no_paginate_by = {
'queryset': Article.objects.all(),
}
numeric_days_info_dict = dict(date_based_info_dict, day_format='%d')
date_based_datefield_info_dict = dict(date_based_info_dict, queryset=DateArticle.objects.all())
urlpatterns = [
url(r'^accounts/login/$', auth_views.LoginView.as_view(template_name='login.html')),
url(r'^accounts/logout/$', auth_views.LogoutView.as_view()),
# Special URLs for particular regression cases.
url('^中文/target/$', views.index_page),
]
# redirects, both temporary and permanent, with non-ASCII targets
urlpatterns += [
url('^nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=False)),
url('^permanent_nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=True)),
]
# json response
urlpatterns += [
url(r'^json/response/$', views.json_response_view),
]
|
jroyal/plexpy
|
refs/heads/master
|
lib/mutagen/oggspeex.py
|
31
|
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write Ogg Speex comments.
This module handles Speex files wrapped in an Ogg bitstream. The
first Speex stream found is used.
Read more about Ogg Speex at http://www.speex.org/. This module is
based on the specification at http://www.speex.org/manual2/node7.html
and clarifications after personal communication with Jean-Marc,
http://lists.xiph.org/pipermail/speex-dev/2006-July/004676.html.
"""
__all__ = ["OggSpeex", "Open", "delete"]
from mutagen import StreamInfo
from mutagen._vorbis import VCommentDict
from mutagen.ogg import OggPage, OggFileType, error as OggError
from mutagen._util import cdata
class error(OggError):
pass
class OggSpeexHeaderError(error):
pass
class OggSpeexInfo(StreamInfo):
"""Ogg Speex stream information.
Attributes:
* bitrate - nominal bitrate in bits per second
* channels - number of channels
* length - file length in seconds, as a float
The reference encoder does not set the bitrate; in this case,
the bitrate will be 0.
"""
length = 0
def __init__(self, fileobj):
page = OggPage(fileobj)
while not page.packets[0].startswith(b"Speex "):
page = OggPage(fileobj)
if not page.first:
raise OggSpeexHeaderError(
"page has ID header, but doesn't start a stream")
self.sample_rate = cdata.uint_le(page.packets[0][36:40])
self.channels = cdata.uint_le(page.packets[0][48:52])
self.bitrate = max(0, cdata.int_le(page.packets[0][52:56]))
self.serial = page.serial
def _post_tags(self, fileobj):
page = OggPage.find_last(fileobj, self.serial)
self.length = page.position / float(self.sample_rate)
def pprint(self):
return u"Ogg Speex, %.2f seconds" % self.length
class OggSpeexVComment(VCommentDict):
"""Speex comments embedded in an Ogg bitstream."""
def __init__(self, fileobj, info):
pages = []
complete = False
while not complete:
page = OggPage(fileobj)
if page.serial == info.serial:
pages.append(page)
complete = page.complete or (len(page.packets) > 1)
data = OggPage.to_packets(pages)[0] + b"\x01"
super(OggSpeexVComment, self).__init__(data, framing=False)
def _inject(self, fileobj):
"""Write tag data into the Speex comment packet/page."""
fileobj.seek(0)
# Find the first header page, with the stream info.
# Use it to get the serial number.
page = OggPage(fileobj)
while not page.packets[0].startswith(b"Speex "):
page = OggPage(fileobj)
# Look for the next page with that serial number, it'll start
# the comment packet.
serial = page.serial
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
# Then find all the pages with the comment packet.
old_pages = [page]
while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1):
page = OggPage(fileobj)
if page.serial == old_pages[0].serial:
old_pages.append(page)
packets = OggPage.to_packets(old_pages, strict=False)
# Set the new comment packet.
packets[0] = self.write(framing=False)
new_pages = OggPage.from_packets(packets, old_pages[0].sequence)
OggPage.replace(fileobj, old_pages, new_pages)
class OggSpeex(OggFileType):
"""An Ogg Speex file."""
_Info = OggSpeexInfo
_Tags = OggSpeexVComment
_Error = OggSpeexHeaderError
_mimes = ["audio/x-speex"]
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"OggS") * (b"Speex " in header))
Open = OggSpeex
def delete(filename):
"""Remove tags from a file."""
OggSpeex(filename).delete()
|
IllusionRom-deprecated/android_platform_external_chromium_org
|
refs/heads/illusion-4.4
|
native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/__init__.py
|
155
|
#!/usr/bin/env python
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software
# Foundation; All Rights Reserved
"""A HTTPSConnection/Handler with additional proxy and cert validation features.
In particular, monkey patches in Python r74203 to provide support for CONNECT
proxies and adds SSL cert validation if the ssl module is present.
"""
__author__ = "{frew,nick.johnson}@google.com (Fred Wulff and Nick Johnson)"
import base64
import httplib
import logging
import re
import socket
import urllib2
from urllib import splittype
from urllib import splituser
from urllib import splitpasswd
class InvalidCertificateException(httplib.HTTPException):
"""Raised when a certificate is provided with an invalid hostname."""
def __init__(self, host, cert, reason):
"""Constructor.
Args:
host: The hostname the connection was made to.
cert: The SSL certificate (as a dictionary) the host returned.
"""
httplib.HTTPException.__init__(self)
self.host = host
self.cert = cert
self.reason = reason
def __str__(self):
return ('Host %s returned an invalid certificate (%s): %s\n'
'To learn more, see '
'http://code.google.com/appengine/kb/general.html#rpcssl' %
(self.host, self.reason, self.cert))
def can_validate_certs():
"""Return True if we have the SSL package and can validate certificates."""
try:
import ssl
return True
except ImportError:
return False
def _create_fancy_connection(tunnel_host=None, key_file=None,
cert_file=None, ca_certs=None):
# This abomination brought to you by the fact that
# the HTTPHandler creates the connection instance in the middle
# of do_open so we need to add the tunnel host to the class.
class PresetProxyHTTPSConnection(httplib.HTTPSConnection):
"""An HTTPS connection that uses a proxy defined by the enclosing scope."""
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self._tunnel_host = tunnel_host
if tunnel_host:
logging.debug("Creating preset proxy https conn: %s", tunnel_host)
self.key_file = key_file
self.cert_file = cert_file
self.ca_certs = ca_certs
try:
import ssl
if self.ca_certs:
self.cert_reqs = ssl.CERT_REQUIRED
else:
self.cert_reqs = ssl.CERT_NONE
except ImportError:
pass
def _tunnel(self):
self._set_hostport(self._tunnel_host, None)
logging.info("Connecting through tunnel to: %s:%d",
self.host, self.port)
self.send("CONNECT %s:%d HTTP/1.0\r\n\r\n" % (self.host, self.port))
response = self.response_class(self.sock, strict=self.strict,
method=self._method)
(_, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error, "Tunnel connection failed: %d %s" % (
code, message.strip())
while True:
line = response.fp.readline()
if line == "\r\n":
break
def _get_valid_hosts_for_cert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns']
else:
# Return a list of commonName fields
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _validate_certificate_hostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._get_valid_hosts_for_cert(cert)
for host in hosts:
# Convert the glob-style hostname expression (eg, '*.google.com') into a
# valid regular expression.
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
# TODO(frew): When we drop support for <2.6 (in the far distant future),
# change this to socket.create_connection.
self.sock = _create_connection((self.host, self.port))
if self._tunnel_host:
self._tunnel()
# ssl and FakeSocket got deprecated. Try for the new hotness of wrap_ssl,
# with fallback.
try:
import ssl
self.sock = ssl.wrap_socket(self.sock,
keyfile=self.key_file,
certfile=self.cert_file,
ca_certs=self.ca_certs,
cert_reqs=self.cert_reqs)
if self.cert_reqs & ssl.CERT_REQUIRED:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._validate_certificate_hostname(cert, hostname):
raise InvalidCertificateException(hostname, cert,
'hostname mismatch')
except ImportError:
ssl = socket.ssl(self.sock,
keyfile=self.key_file,
certfile=self.cert_file)
self.sock = httplib.FakeSocket(self.sock, ssl)
return PresetProxyHTTPSConnection
# Here to end of _create_connection copied wholesale from Python 2.6"s socket.py
_GLOBAL_DEFAULT_TIMEOUT = object()
def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used.
"""
msg = "getaddrinfo returns an empty list"
host, port = address
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.connect(sa)
return sock
except socket.error, msg:
if sock is not None:
sock.close()
raise socket.error, msg
class FancyRequest(urllib2.Request):
"""A request that allows the use of a CONNECT proxy."""
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self._tunnel_host = None
self._key_file = None
self._cert_file = None
self._ca_certs = None
def set_proxy(self, host, type):
saved_type = None
if self.get_type() == "https" and not self._tunnel_host:
self._tunnel_host = self.get_host()
saved_type = self.get_type()
urllib2.Request.set_proxy(self, host, type)
if saved_type:
# Don't set self.type, we want to preserve the
# type for tunneling.
self.type = saved_type
def set_ssl_info(self, key_file=None, cert_file=None, ca_certs=None):
self._key_file = key_file
self._cert_file = cert_file
self._ca_certs = ca_certs
class FancyProxyHandler(urllib2.ProxyHandler):
"""A ProxyHandler that works with CONNECT-enabled proxies."""
# Taken verbatim from /usr/lib/python2.5/urllib2.py
def _parse_proxy(self, proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
def proxy_open(self, req, proxy, type):
# This block is copied wholesale from Python2.6 urllib2.
# It is idempotent, so the superclass method call executes as normal
# if invoked.
orig_type = req.get_type()
proxy_type, user, password, hostport = self._parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if user and password:
user_pass = "%s:%s" % (urllib2.unquote(user), urllib2.unquote(password))
creds = base64.b64encode(user_pass).strip()
# Later calls overwrite earlier calls for the same header
req.add_header("Proxy-authorization", "Basic " + creds)
hostport = urllib2.unquote(hostport)
req.set_proxy(hostport, proxy_type)
# This condition is the change
if orig_type == "https":
return None
return urllib2.ProxyHandler.proxy_open(self, req, proxy, type)
class FancyHTTPSHandler(urllib2.HTTPSHandler):
"""An HTTPSHandler that works with CONNECT-enabled proxies."""
def do_open(self, http_class, req):
# Intentionally very specific so as to opt for false negatives
# rather than false positives.
try:
return urllib2.HTTPSHandler.do_open(
self,
_create_fancy_connection(req._tunnel_host,
req._key_file,
req._cert_file,
req._ca_certs),
req)
except urllib2.URLError, url_error:
try:
import ssl
if (type(url_error.reason) == ssl.SSLError and
url_error.reason.args[0] == 1):
# Display the reason to the user. Need to use args for python2.5
# compat.
raise InvalidCertificateException(req.host, '',
url_error.reason.args[1])
except ImportError:
pass
raise url_error
# We have to implement this so that we persist the tunneling behavior
# through redirects.
class FancyRedirectHandler(urllib2.HTTPRedirectHandler):
"""A redirect handler that persists CONNECT-enabled proxy information."""
def redirect_request(self, req, *args, **kwargs):
new_req = urllib2.HTTPRedirectHandler.redirect_request(
self, req, *args, **kwargs)
# Same thing as in our set_proxy implementation, but in this case
# we"ve only got a Request to work with, so it was this or copy
# everything over piecemeal.
#
# Note that we do not persist tunneling behavior from an http request
# to an https request, because an http request does not set _tunnel_host.
#
# Also note that in Python < 2.6, you will get an error in
# FancyHTTPSHandler.do_open() on an https urllib2.Request that uses an http
# proxy, since the proxy type will be set to http instead of https.
# (FancyRequest, and urllib2.Request in Python >= 2.6 set the proxy type to
# https.) Such an urllib2.Request could result from this redirect
# if you are redirecting from an http request (since an an http request
# does not have _tunnel_host set, and thus you will not set the proxy
# in the code below), and if you have defined a proxy for https in, say,
# FancyProxyHandler, and that proxy has type http.
if hasattr(req, "_tunnel_host") and isinstance(new_req, urllib2.Request):
if new_req.get_type() == "https":
if req._tunnel_host:
# req is proxied, so copy the proxy info.
new_req._tunnel_host = new_req.get_host()
new_req.set_proxy(req.host, "https")
else:
# req is not proxied, so just make sure _tunnel_host is defined.
new_req._tunnel_host = None
new_req.type = "https"
if hasattr(req, "_key_file") and isinstance(new_req, urllib2.Request):
# Copy the auxiliary data in case this or any further redirect is https
new_req._key_file = req._key_file
new_req._cert_file = req._cert_file
new_req._ca_certs = req._ca_certs
return new_req
|
yjmade/odoo
|
refs/heads/8.0
|
addons/account/wizard/account_journal_select.py
|
385
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_journal_select(osv.osv_memory):
"""
Account Journal Select
"""
_name = "account.journal.select"
_description = "Account Journal Select"
def action_open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if context is None:
context = {}
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_select')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id])[0]
cr.execute('select journal_id, period_id from account_journal_period where id=%s', (context['active_id'],))
res = cr.fetchone()
if res:
journal_id, period_id = res
result['domain'] = str([('journal_id', '=', journal_id), ('period_id', '=', period_id)])
result['context'] = str({'journal_id': journal_id, 'period_id': period_id})
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
solome/jyp
|
refs/heads/master
|
misc/virtenv/lib/python2.7/site-packages/pip/_vendor/requests/structures.py
|
279
|
# -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import os
import collections
from itertools import islice
class IteratorProxy(object):
"""docstring for IteratorProxy"""
def __init__(self, i):
self.i = i
# self.i = chain.from_iterable(i)
def __iter__(self):
return self.i
def __len__(self):
if hasattr(self.i, '__len__'):
return len(self.i)
if hasattr(self.i, 'len'):
return self.i.len
if hasattr(self.i, 'fileno'):
return os.fstat(self.i.fileno()).st_size
def read(self, n):
return "".join(islice(self.i, None, n))
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive:
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
|
srimai/odoo
|
refs/heads/8.0
|
openerp/addons/base/tests/test_xmlrpc.py
|
200
|
# -*- coding: utf-8 -*-
import openerp.tests.common
class test_xmlrpc(openerp.tests.common.HttpCase):
at_install = False
post_install = True
def test_01_xmlrpc_login(self):
""" Try to login on the common service. """
db_name = openerp.tests.common.get_db_name()
uid = self.xmlrpc_common.login(db_name, 'admin', 'admin')
self.assertEqual(uid, 1)
def test_xmlrpc_ir_model_search(self):
""" Try a search on the object service. """
o = self.xmlrpc_object
db_name = openerp.tests.common.get_db_name()
ids = o.execute(db_name, 1, 'admin', 'ir.model', 'search', [])
self.assertIsInstance(ids, list)
ids = o.execute(db_name, 1, 'admin', 'ir.model', 'search', [], {})
self.assertIsInstance(ids, list)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
vnsofthe/odoo
|
refs/heads/8.0
|
openerp/tools/pdf_utils.py
|
456
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:contact@logilab.fr
manipulate pdf and fdf files. pdftk recommended.
Notes regarding pdftk, pdf forms and fdf files (form definition file)
fields names can be extracted with:
pdftk orig.pdf generate_fdf output truc.fdf
to merge fdf and pdf:
pdftk orig.pdf fill_form test.fdf output result.pdf [flatten]
without flatten, one could further edit the resulting form.
with flatten, everything is turned into text.
"""
from __future__ import with_statement
import os
import tempfile
HEAD="""%FDF-1.2
%\xE2\xE3\xCF\xD3
1 0 obj
<<
/FDF
<<
/Fields [
"""
TAIL="""]
>>
>>
endobj
trailer
<<
/Root 1 0 R
>>
%%EOF
"""
def output_field(f):
return "\xfe\xff" + "".join( [ "\x00"+c for c in f ] )
def extract_keys(lines):
keys = []
for line in lines:
if line.startswith('/V'):
pass #print 'value',line
elif line.startswith('/T'):
key = line[7:-2]
key = ''.join(key.split('\x00'))
keys.append( key )
return keys
def write_field(out, key, value):
out.write("<<\n")
if value:
out.write("/V (%s)\n" %value)
else:
out.write("/V /\n")
out.write("/T (%s)\n" % output_field(key) )
out.write(">> \n")
def write_fields(out, fields):
out.write(HEAD)
for key in fields:
value = fields[key]
write_field(out, key, value)
# write_field(out, key+"a", value) # pour copie-carbone sur autres pages
out.write(TAIL)
def extract_keys_from_pdf(filename):
# what about using 'pdftk filename dump_data_fields' and parsing the output ?
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
os.system('pdftk %s generate_fdf output \"%s\"' % (filename, tmp_file))
with open(tmp_file, "r") as ofile:
lines = ofile.readlines()
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
return extract_keys(lines)
def fill_pdf(infile, outfile, fields):
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
with open(tmp_file, "w") as ofile:
write_fields(ofile, fields)
os.system('pdftk %s fill_form \"%s\" output %s flatten' % (infile, tmp_file, outfile))
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
def testfill_pdf(infile, outfile):
keys = extract_keys_from_pdf(infile)
fields = []
for key in keys:
fields.append( (key, key, '') )
fill_pdf(infile, outfile, fields)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
minhphung171093/GreenERP_V7
|
refs/heads/master
|
openerp/addons/base/ir/ir_needaction.py
|
455
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv
class ir_needaction_mixin(osv.AbstractModel):
"""Mixin class for objects using the need action feature.
Need action feature can be used by models that have to be able to
signal that an action is required on a particular record. If in
the business logic an action must be performed by somebody, for
instance validation by a manager, this mechanism allows to set a
list of users asked to perform an action.
Models using the 'need_action' feature should override the
``_needaction_domain_get`` method. This method returns a
domain to filter records requiring an action for a specific user.
This class also offers several global services:
- ``_needaction_count``: returns the number of actions uid has to perform
"""
_name = 'ir.needaction_mixin'
_needaction = True
#------------------------------------------------------
# Addons API
#------------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
""" Returns the domain to filter records that require an action
:return: domain or False is no action
"""
return False
#------------------------------------------------------
# "Need action" API
#------------------------------------------------------
def _needaction_count(self, cr, uid, domain=None, context=None):
""" Get the number of actions uid has to perform. """
dom = self._needaction_domain_get(cr, uid, context=context)
if not dom:
return 0
res = self.search(cr, uid, (domain or []) + dom, limit=100, order='id DESC', context=context)
return len(res)
|
adaur/SickRage
|
refs/heads/master
|
lib/unidecode/x00c.py
|
252
|
data = (
'[?]', # 0x00
'N', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'[?]', # 0x0d
'e', # 0x0e
'ee', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'o', # 0x12
'oo', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'rr', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'v', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'RR', # 0x44
'[?]', # 0x45
'e', # 0x46
'ee', # 0x47
'ai', # 0x48
'[?]', # 0x49
'o', # 0x4a
'oo', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'+', # 0x55
'+', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'RR', # 0x60
'LL', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'R', # 0x8b
'L', # 0x8c
'[?]', # 0x8d
'e', # 0x8e
'ee', # 0x8f
'ai', # 0x90
'[?]', # 0x91
'o', # 0x92
'oo', # 0x93
'au', # 0x94
'k', # 0x95
'kh', # 0x96
'g', # 0x97
'gh', # 0x98
'ng', # 0x99
'c', # 0x9a
'ch', # 0x9b
'j', # 0x9c
'jh', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'tth', # 0xa0
'dd', # 0xa1
'ddh', # 0xa2
'nn', # 0xa3
't', # 0xa4
'th', # 0xa5
'd', # 0xa6
'dh', # 0xa7
'n', # 0xa8
'[?]', # 0xa9
'p', # 0xaa
'ph', # 0xab
'b', # 0xac
'bh', # 0xad
'm', # 0xae
'y', # 0xaf
'r', # 0xb0
'rr', # 0xb1
'l', # 0xb2
'll', # 0xb3
'[?]', # 0xb4
'v', # 0xb5
'sh', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'R', # 0xc3
'RR', # 0xc4
'[?]', # 0xc5
'e', # 0xc6
'ee', # 0xc7
'ai', # 0xc8
'[?]', # 0xc9
'o', # 0xca
'oo', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'+', # 0xd5
'+', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'lll', # 0xde
'[?]', # 0xdf
'RR', # 0xe0
'LL', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
WSDC-NITWarangal/django
|
refs/heads/master
|
django/db/migrations/operations/base.py
|
356
|
from __future__ import unicode_literals
from django.db import router
class Operation(object):
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')
def describe(self):
"""
Outputs a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allow_migrate_model(self, connection_alias, model):
"""
Returns if we're allowed to migrate the model.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model)
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
)
|
Wizmann/codeforces-cdn
|
refs/heads/master
|
BAE/cf-cdn-refresh/qiniu/httplib_chunk.py
|
2
|
"""
Modified from standard httplib
1. HTTPConnection can send trunked data.
2. Remove httplib's automatic Content-Length insertion when data is a file-like object.
"""
# -*- coding: utf-8 -*-
import httplib
from httplib import _CS_REQ_STARTED, _CS_REQ_SENT, CannotSendHeader, NotConnected
import string
import os
from array import array
class HTTPConnection(httplib.HTTPConnection):
def send(self, data, is_chunked=False):
"""Send `data' to the server."""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print "send:", repr(data)
blocksize = 8192
if hasattr(data,'read') and not isinstance(data, array):
if self.debuglevel > 0: print "sendIng a read()able"
datablock = data.read(blocksize)
while datablock:
if self.debuglevel > 0:
print 'chunked:', is_chunked
if is_chunked:
if self.debuglevel > 0: print 'send: with trunked data'
lenstr = string.upper(hex(len(datablock))[2:])
self.sock.sendall('%s\r\n%s\r\n' % (lenstr, datablock))
else:
self.sock.sendall(datablock)
datablock = data.read(blocksize)
if is_chunked:
self.sock.sendall('0\r\n\r\n')
else:
self.sock.sendall(data)
def _set_content_length(self, body):
# Set the content-length based on the body.
thelen = None
try:
thelen = str(len(body))
except (TypeError, AttributeError), te:
# Don't send a length if this failed
if self.debuglevel > 0: print "Cannot stat!!"
if thelen is not None:
self.putheader('Content-Length', thelen)
return True
return False
def _send_request(self, method, url, body, headers):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
is_chunked = False
if body and header_names.get('Transfer-Encoding') == 'chunked':
is_chunked = True
elif body and ('content-length' not in header_names):
is_chunked = not self._set_content_length(body)
if is_chunked:
self.putheader('Transfer-Encoding', 'chunked')
for hdr, value in headers.iteritems():
self.putheader(hdr, value)
self.endheaders(body, is_chunked=is_chunked)
def endheaders(self, message_body=None, is_chunked=False):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional
message_body argument can be used to pass a message body
associated with the request. The message body will be sent in
the same packet as the message headers if it is string, otherwise it is
sent as a separate packet.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body, is_chunked=is_chunked)
def _send_output(self, message_body=None, is_chunked=False):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend(("", ""))
msg = "\r\n".join(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, str):
msg += message_body
message_body = None
self.send(msg)
if message_body is not None:
#message_body was not a string (i.e. it is a file) and
#we must run the risk of Nagle
self.send(message_body, is_chunked=is_chunked)
|
asfin/electrum
|
refs/heads/master
|
electrum/gui/kivy/uix/drawer.py
|
33
|
'''Drawer Widget to hold the main window and the menu/hidden section that
can be swiped in from the left. This Menu would be only hidden in phone mode
and visible in Tablet Mode.
This class is specifically in lined to save on start up speed(minimize i/o).
'''
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import OptionProperty, NumericProperty, ObjectProperty
from kivy.clock import Clock
from kivy.lang import Builder
import gc
# delayed imports
app = None
class Drawer(Factory.RelativeLayout):
'''Drawer Widget to hold the main window and the menu/hidden section that
can be swiped in from the left. This Menu would be only hidden in phone mode
and visible in Tablet Mode.
'''
state = OptionProperty('closed',
options=('closed', 'open', 'opening', 'closing'))
'''This indicates the current state the drawer is in.
:attr:`state` is a `OptionProperty` defaults to `closed`. Can be one of
`closed`, `open`, `opening`, `closing`.
'''
scroll_timeout = NumericProperty(200)
'''Timeout allowed to trigger the :data:`scroll_distance`,
in milliseconds. If the user has not moved :data:`scroll_distance`
within the timeout, the scrolling will be disabled and the touch event
will go to the children.
:data:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty`
and defaults to 200 (milliseconds)
'''
scroll_distance = NumericProperty('9dp')
'''Distance to move before scrolling the :class:`Drawer` in pixels.
As soon as the distance has been traveled, the :class:`Drawer` will
start to scroll, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target
device's screen.
:data:`scroll_distance` is a :class:`~kivy.properties.NumericProperty`
and defaults to 20dp.
'''
drag_area = NumericProperty('9dp')
'''The percentage of area on the left edge that triggers the opening of
the drawer. from 0-1
:attr:`drag_area` is a `NumericProperty` defaults to 2
'''
hidden_widget = ObjectProperty(None)
''' This is the widget that is hidden in phone mode on the left side of
drawer or displayed on the left of the overlay widget in tablet mode.
:attr:`hidden_widget` is a `ObjectProperty` defaults to None.
'''
overlay_widget = ObjectProperty(None)
'''This a pointer to the default widget that is overlayed either on top or
to the right of the hidden widget.
'''
def __init__(self, **kwargs):
super(Drawer, self).__init__(**kwargs)
self._triigger_gc = Clock.create_trigger(self._re_enable_gc, .2)
def toggle_drawer(self):
if app.ui_mode[0] == 't':
return
Factory.Animation.cancel_all(self.overlay_widget)
anim = Factory.Animation(x=self.hidden_widget.width
if self.state in ('opening', 'closed') else 0,
d=.1, t='linear')
anim.bind(on_complete = self._complete_drawer_animation)
anim.start(self.overlay_widget)
def _re_enable_gc(self, dt):
global gc
gc.enable()
def on_touch_down(self, touch):
if self.disabled:
return
if not self.collide_point(*touch.pos):
return
touch.grab(self)
# disable gc for smooth interaction
# This is still not enough while wallet is synchronising
# look into pausing all background tasks while ui interaction like this
gc.disable()
global app
if not app:
app = App.get_running_app()
# skip on tablet mode
if app.ui_mode[0] == 't':
return super(Drawer, self).on_touch_down(touch)
state = self.state
touch.ud['send_touch_down'] = False
start = 0 #if state[0] == 'c' else self.hidden_widget.right
drag_area = self.drag_area\
if self.state[0] == 'c' else\
(self.overlay_widget.x)
if touch.x < start or touch.x > drag_area:
if self.state == 'open':
self.toggle_drawer()
return
return super(Drawer, self).on_touch_down(touch)
self._touch = touch
Clock.schedule_once(self._change_touch_mode,
self.scroll_timeout/1000.)
touch.ud['in_drag_area'] = True
touch.ud['send_touch_down'] = True
return
def on_touch_move(self, touch):
if not touch.grab_current is self:
return
self._touch = False
# skip on tablet mode
if app.ui_mode[0] == 't':
return super(Drawer, self).on_touch_move(touch)
if not touch.ud.get('in_drag_area', None):
return super(Drawer, self).on_touch_move(touch)
ov = self.overlay_widget
ov.x=min(self.hidden_widget.width,
max(ov.x + touch.dx*2, 0))
#_anim = Animation(x=x, duration=1/2, t='in_out_quart')
#_anim.cancel_all(ov)
#_anim.start(ov)
if abs(touch.x - touch.ox) < self.scroll_distance:
return
touch.ud['send_touch_down'] = False
Clock.unschedule(self._change_touch_mode)
self._touch = None
self.state = 'opening' if touch.dx > 0 else 'closing'
touch.ox = touch.x
return
def _change_touch_mode(self, *args):
if not self._touch:
return
touch = self._touch
touch.ungrab(self)
touch.ud['in_drag_area'] = False
touch.ud['send_touch_down'] = False
self._touch = None
super(Drawer, self).on_touch_down(touch)
return
def on_touch_up(self, touch):
if not touch.grab_current is self:
return
self._triigger_gc()
touch.ungrab(self)
touch.grab_current = None
# skip on tablet mode
get = touch.ud.get
if app.ui_mode[0] == 't':
return super(Drawer, self).on_touch_up(touch)
self.old_x = [1, ] * 10
self.speed = sum((
(self.old_x[x + 1] - self.old_x[x]) for x in range(9))) / 9.
if get('send_touch_down', None):
# touch up called before moving
Clock.unschedule(self._change_touch_mode)
self._touch = None
Clock.schedule_once(
lambda dt: super(Drawer, self).on_touch_down(touch))
if get('in_drag_area', None):
if abs(touch.x - touch.ox) < self.scroll_distance:
anim_to = (0 if self.state[0] == 'c'
else self.hidden_widget.width)
Factory.Animation(x=anim_to, d=.1).start(self.overlay_widget)
return
touch.ud['in_drag_area'] = False
if not get('send_touch_down', None):
self.toggle_drawer()
Clock.schedule_once(lambda dt: super(Drawer, self).on_touch_up(touch))
def _complete_drawer_animation(self, *args):
self.state = 'open' if self.state in ('opening', 'closed') else 'closed'
def add_widget(self, widget, index=1):
if not widget:
return
iget = self.ids.get
if not iget('hidden_widget') or not iget('overlay_widget'):
super(Drawer, self).add_widget(widget)
return
if not self.hidden_widget:
self.hidden_widget = self.ids.hidden_widget
if not self.overlay_widget:
self.overlay_widget = self.ids.overlay_widget
if self.overlay_widget.children and self.hidden_widget.children:
Logger.debug('Drawer: Accepts only two widgets. discarding rest')
return
if not self.hidden_widget.children:
self.hidden_widget.add_widget(widget)
else:
self.overlay_widget.add_widget(widget)
widget.x = 0
def remove_widget(self, widget):
if self.overlay_widget.children[0] == widget:
self.overlay_widget.clear_widgets()
return
if widget == self.hidden_widget.children:
self.hidden_widget.clear_widgets()
return
def clear_widgets(self):
self.overlay_widget.clear_widgets()
self.hidden_widget.clear_widgets()
if __name__ == '__main__':
from kivy.app import runTouchApp
from kivy.lang import Builder
runTouchApp(Builder.load_string('''
Drawer:
Button:
Button
'''))
|
botify-labs/simpleflow
|
refs/heads/continue_as_new
|
simpleflow/swf/process/worker/__init__.py
|
1
|
from . import command # NOQA
|
alshedivat/tensorflow
|
refs/heads/master
|
tensorflow/contrib/lookup/__init__.py
|
78
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for lookup operations.
@@string_to_index
@@string_to_index_table_from_file
@@string_to_index_table_from_tensor
@@index_table_from_file
@@index_table_from_tensor
@@index_to_string
@@index_to_string_table_from_file
@@index_to_string_table_from_tensor
@@LookupInterface
@@InitializableLookupTableBase
@@IdTableWithHashBuckets
@@HashTable
@@MutableHashTable
@@MutableDenseHashTable
@@TableInitializerBase
@@KeyValueTensorInitializer
@@TextFileIndex
@@TextFileInitializer
@@TextFileIdTableInitializer
@@TextFileStringTableInitializer
@@HasherSpec
@@StrongHashSpec
@@FastHashSpec
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.lookup.lookup_ops import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
tysonclugg/django
|
refs/heads/master
|
django/middleware/common.py
|
30
|
import re
import warnings
from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.mail import mail_managers
from django.http import HttpResponsePermanentRedirect
from django.urls import is_valid_path
from django.utils.cache import (
cc_delim_re, get_conditional_response, set_response_etag,
)
from django.utils.deprecation import MiddlewareMixin, RemovedInDjango21Warning
class CommonMiddleware(MiddlewareMixin):
"""
"Common" middleware for taking care of some basic operations:
- Forbid access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
append missing slashes and/or prepends missing "www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, form a new URL by
appending a slash at the end. If this new URL is found in
urlpatterns, return an HTTP redirect to this new URL; otherwise
process the initial URL as usual.
This behavior can be customized by subclassing CommonMiddleware and
overriding the response_redirect_class attribute.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately. USE_ETAGS is deprecated in favor of
ConditionalGetMiddleware.
"""
response_redirect_class = HttpResponsePermanentRedirect
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
raise PermissionDenied('Forbidden user agent')
# Check for a redirect based on settings.PREPEND_WWW
host = request.get_host()
must_prepend = settings.PREPEND_WWW and host and not host.startswith('www.')
redirect_url = ('%s://www.%s' % (request.scheme, host)) if must_prepend else ''
# Check if a slash should be appended
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
# Return a redirect if necessary
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
def should_redirect_with_slash(self, request):
"""
Return True if settings.APPEND_SLASH is True and appending a slash to
the request path turns an invalid path into a valid one.
"""
if settings.APPEND_SLASH and not request.path_info.endswith('/'):
urlconf = getattr(request, 'urlconf', None)
return (
not is_valid_path(request.path_info, urlconf) and
is_valid_path('%s/' % request.path_info, urlconf)
)
return False
def get_full_path_with_slash(self, request):
"""
Return the full path of the request with a trailing slash appended.
Raise a RuntimeError if settings.DEBUG is True and request.method is
POST, PUT, or PATCH.
"""
new_path = request.get_full_path(force_append_slash=True)
if settings.DEBUG and request.method in ('POST', 'PUT', 'PATCH'):
raise RuntimeError(
"You called this URL via %(method)s, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining %(method)s data. "
"Change your form to point to %(url)s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django settings." % {
'method': request.method,
'url': request.get_host() + new_path,
}
)
return new_path
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
When the status code of the response is 404, it may redirect to a path
with an appended slash if should_redirect_with_slash() returns True.
"""
# If the given URL is "Not Found", then check if we should redirect to
# a path with a slash appended.
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(self.get_full_path_with_slash(request))
if settings.USE_ETAGS and self.needs_etag(response):
warnings.warn(
"The USE_ETAGS setting is deprecated in favor of "
"ConditionalGetMiddleware which sets the ETag regardless of "
"the setting. CommonMiddleware won't do ETag processing in "
"Django 2.1.",
RemovedInDjango21Warning
)
if not response.has_header('ETag'):
set_response_etag(response)
if response.has_header('ETag'):
return get_conditional_response(
request,
etag=response['ETag'],
response=response,
)
# Add the Content-Length header to non-streaming responses if not
# already set.
if not response.streaming and not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
return response
def needs_etag(self, response):
"""Return True if an ETag header should be added to response."""
cache_control_headers = cc_delim_re.split(response.get('Cache-Control', ''))
return all(header.lower() != 'no-store' for header in cache_control_headers)
class BrokenLinkEmailsMiddleware(MiddlewareMixin):
def process_response(self, request, response):
"""Send broken link emails for relevant 404 NOT FOUND responses."""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = request.META.get('HTTP_REFERER', '')
if not self.is_ignorable_request(request, path, domain, referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Return True if the referring URL is the same domain as the current
request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Return True if the given request *shouldn't* notify the site managers
according to project settings or in situations outlined by the inline
comments.
"""
# The referer is empty.
if not referer:
return True
# APPEND_SLASH is enabled and the referer is equal to the current URL
# without a trailing slash indicating an internal redirect.
if settings.APPEND_SLASH and uri.endswith('/') and referer == uri[:-1]:
return True
# A '?' in referer is identified as a search engine source.
if not self.is_internal_request(domain, referer) and '?' in referer:
return True
# The referer is equal to the current URL, ignoring the scheme (assumed
# to be a poorly implemented bot).
parsed_referer = urlparse(referer)
if parsed_referer.netloc in ['', domain] and parsed_referer.path == uri:
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
|
marmyshev/item_title
|
refs/heads/master
|
openlp/plugins/images/lib/__init__.py
|
2
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from .mediaitem import ImageMediaItem
from .imagetab import ImageTab
|
mheap/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_cloudproperties.py
|
41
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudproperties
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of CloudProperties Avi RESTful Object
description:
- This module is used to configure CloudProperties object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
cc_props:
description:
- Cloudconnector properties.
cc_vtypes:
description:
- Cloud types supported by cloudconnector.
- Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP,
- CLOUD_RANCHER, CLOUD_OSHIFT_K8S, CLOUD_AZURE.
hyp_props:
description:
- Hypervisor properties.
info:
description:
- Properties specific to a cloud type.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create CloudProperties object
avi_cloudproperties:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_cloudproperties
"""
RETURN = '''
obj:
description: CloudProperties (api/cloudproperties) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
cc_props=dict(type='dict',),
cc_vtypes=dict(type='list',),
hyp_props=dict(type='list',),
info=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloudproperties',
set([]))
if __name__ == '__main__':
main()
|
JakeColtman/bartpy
|
refs/heads/master
|
bartpy/data.py
|
1
|
from operator import gt, le
from typing import Any, List, Optional, Union
import numpy as np
import pandas as pd
from bartpy.errors import NoSplittableVariableException
from bartpy.splitcondition import SplitCondition
def is_not_constant(series: np.ndarray) -> bool:
"""
Quickly identify whether a series contains more than 1 distinct value
Parameters
----------
series: np.ndarray
The series to assess
Returns
-------
bool
True if more than one distinct value found
"""
if len(series) <= 1:
return False
first_value = None
for i in range(1, len(series)):
# if not series.mask[i] and series.data[i] != first_value:
if series[i] != first_value:
if first_value is None:
first_value = series.data[i]
else:
return True
return False
def ensure_numpy_array(X: Union[np.ndarray, pd.DataFrame]) -> np.ndarray:
if isinstance(X, pd.DataFrame):
return X.values
else:
return X
def ensure_float_array(X: np.ndarray) -> np.ndarray:
return X.astype(float)
def format_covariate_matrix(X: Union[np.ndarray, pd.DataFrame]) -> np.ndarray:
X = ensure_numpy_array(X)
return ensure_float_array(X)
def make_bartpy_data(X: Union[np.ndarray, pd.DataFrame],
y: np.ndarray,
normalize: bool=True) -> 'Data':
X = format_covariate_matrix(X)
y = y.astype(float)
return Data(X, y, normalize=normalize)
class CovariateMatrix(object):
def __init__(self,
X: np.ndarray,
mask: np.ndarray,
n_obsv: int,
unique_columns: List[int],
splittable_variables: List[int]):
if type(X) == pd.DataFrame:
X: pd.DataFrame = X
X = X.values
self._X = X
self._n_obsv = n_obsv
self._n_features = X.shape[1]
self._mask = mask
# Cache iniialization
if unique_columns is not None:
self._unique_columns = [x if x is True else None for x in unique_columns]
else:
self._unique_columns = [None for _ in range(self._n_features)]
if splittable_variables is not None:
self._splittable_variables = [x if x is False else None for x in splittable_variables]
else:
self._splittable_variables = [None for _ in range(self._n_features)]
self._max_values = [None] * self._n_features
self._X_column_cache = [None] * self._n_features
self._max_value_cache = [None] * self._n_features
self._X_cache = None
@property
def mask(self) -> np.ndarray:
return self._mask
@property
def values(self) -> np.ndarray:
return self._X
def get_column(self, i: int) -> np.ndarray:
if self._X_cache is None:
self._X_cache = self.values[~self.mask, :]
return self._X_cache[:, i]
def splittable_variables(self) -> List[int]:
"""
List of columns that can be split on, i.e. that have more than one unique value
Returns
-------
List[int]
List of column numbers that can be split on
"""
for i in range(0, self._n_features):
if self._splittable_variables[i] is None:
self._splittable_variables[i] = is_not_constant(self.get_column(i))
return [i for (i, x) in enumerate(self._splittable_variables) if x is True]
@property
def n_splittable_variables(self) -> int:
return len(self.splittable_variables())
def is_at_least_one_splittable_variable(self) -> bool:
if any(self._splittable_variables):
return True
else:
return len(self.splittable_variables()) > 0
def random_splittable_variable(self) -> str:
"""
Choose a variable at random from the set of splittable variables
Returns
-------
str - a variable name that can be split on
"""
if self.is_at_least_one_splittable_variable():
return np.random.choice(np.array(self.splittable_variables()), 1)[0]
else:
raise NoSplittableVariableException()
def is_column_unique(self, i: int) -> bool:
"""
Identify whether feature contains only unique values, i.e. it has no duplicated values
Useful to provide a faster way to calculate the probability of a value being selected in a variable
Returns
-------
List[int]
"""
if self._unique_columns[i] is None:
self._unique_columns[i] = len(np.unique(self.get_column(i))) == self._n_obsv
return self._unique_columns[i]
def max_value_of_column(self, i: int):
if self._max_value_cache[i] is None:
self._max_value_cache[i] = self.get_column(i).max()
return self._max_value_cache[i]
def random_splittable_value(self, variable: int) -> Any:
"""
Return a random value of a variable
Useful for choosing a variable to split on
Parameters
----------
variable - str
Name of the variable to split on
Returns
-------
Any
Notes
-----
- Won't create degenerate splits, all splits will have at least one row on both sides of the split
"""
if variable not in self.splittable_variables():
raise NoSplittableVariableException()
max_value = self.max_value_of_column(variable)
candidate = np.random.choice(self.get_column(variable))
while candidate == max_value:
candidate = np.random.choice(self.get_column(variable))
return candidate
def proportion_of_value_in_variable(self, variable: int, value: float) -> float:
if self.is_column_unique(variable):
return 1. / self.n_obsv
else:
return float(np.mean(self.get_column(variable) == value))
def update_mask(self, other: SplitCondition) -> np.ndarray:
if other.operator == gt:
column_mask = self.values[:, other.splitting_variable] <= other.splitting_value
elif other.operator == le:
column_mask = self.values[:, other.splitting_variable] > other.splitting_value
else:
raise TypeError("Operator type not matched, only {} and {} supported".format(gt, le))
return self.mask | column_mask
@property
def variables(self) -> List[int]:
return list(range(self._n_features))
@property
def n_obsv(self) -> int:
return self._n_obsv
class Target(object):
def __init__(self, y, mask, n_obsv, normalize, y_sum=None):
if normalize:
self.original_y_min, self.original_y_max = y.min(), y.max()
self._y = self.normalize_y(y)
else:
self._y = y
self._mask = mask
self._inverse_mask_int = (~self._mask).astype(int)
self._n_obsv = n_obsv
if y_sum is None:
self.y_sum_cache_up_to_date = False
self._summed_y = None
else:
self.y_sum_cache_up_to_date = True
self._summed_y = y_sum
@staticmethod
def normalize_y(y: np.ndarray) -> np.ndarray:
"""
Normalize y into the range (-0.5, 0.5)
Useful for allowing the leaf parameter prior to be 0, and to standardize the sigma prior
Parameters
----------
y - np.ndarray
Returns
-------
np.ndarray
Examples
--------
>>> Data.normalize_y([1, 2, 3])
array([-0.5, 0. , 0.5])
"""
y_min, y_max = np.min(y), np.max(y)
return -0.5 + ((y - y_min) / (y_max - y_min))
def unnormalize_y(self, y: np.ndarray) -> np.ndarray:
distance_from_min = y - (-0.5)
total_distance = (self.original_y_max - self.original_y_min)
return self.original_y_min + (distance_from_min * total_distance)
@property
def unnormalized_y(self) -> np.ndarray:
return self.unnormalize_y(self.values)
@property
def normalizing_scale(self) -> float:
return self.original_y_max - self.original_y_min
def summed_y(self) -> float:
if self.y_sum_cache_up_to_date:
return self._summed_y
else:
self._summed_y = np.sum(self._y * self._inverse_mask_int)
self.y_sum_cache_up_to_date = True
return self._summed_y
def update_y(self, y) -> None:
self._y = y
self.y_sum_cache_up_to_date = False
@property
def values(self):
return self._y
class Data(object):
"""
Encapsulates the data within a split of feature space.
Primarily used to cache computations on the data for better performance
Parameters
----------
X: np.ndarray
The subset of the covariate matrix that falls into the split
y: np.ndarray
The subset of the target array that falls into the split
normalize: bool
Whether to map the target into -0.5, 0.5
cache: bool
Whether to cache common values.
You really only want to turn this off if you're not going to the resulting object for anything (e.g. when testing)
"""
def __init__(self,
X: np.ndarray,
y: np.ndarray,
mask: Optional[np.ndarray]=None,
normalize: bool=False,
unique_columns: List[int]=None,
splittable_variables: Optional[List[Optional[bool]]]=None,
y_sum: float=None,
n_obsv: int=None):
if mask is None:
mask = np.zeros_like(y).astype(bool)
self._mask: np.ndarray = mask
if n_obsv is None:
n_obsv = (~self.mask).astype(int).sum()
self._n_obsv = n_obsv
self._X = CovariateMatrix(X, mask, n_obsv, unique_columns, splittable_variables)
self._y = Target(y, mask, n_obsv, normalize, y_sum)
@property
def y(self) -> Target:
return self._y
@property
def X(self) -> CovariateMatrix:
return self._X
@property
def mask(self) -> np.ndarray:
return self._mask
def update_y(self, y: np.ndarray) -> None:
self._y.update_y(y)
def __add__(self, other: SplitCondition) -> 'Data':
updated_mask = self.X.update_mask(other)
return Data(self.X.values,
self.y.values,
updated_mask,
normalize=False,
unique_columns=self._X._unique_columns,
splittable_variables=self._X._splittable_variables,
y_sum=other.carry_y_sum,
n_obsv=other.carry_n_obsv)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.