repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
omprakasha/odoo | refs/heads/8.0 | addons/base_vat/base_vat.py | 238 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import string
import datetime
import re
_logger = logging.getLogger(__name__)
try:
import vatnumber
except ImportError:
_logger.warning("VAT validation partially unavailable because the `vatnumber` Python library cannot be found. "
"Install it to support more countries, for example with `easy_install vatnumber`.")
vatnumber = None
from openerp.osv import fields, osv
from openerp.tools.misc import ustr
from openerp.tools.translate import _
_ref_vat = {
'at': 'ATU12345675',
'be': 'BE0477472701',
'bg': 'BG1234567892',
'ch': 'CHE-123.456.788 TVA or CH TVA 123456', #Swiss by Yannick Vaucher @ Camptocamp
'cy': 'CY12345678F',
'cz': 'CZ12345679',
'de': 'DE123456788',
'dk': 'DK12345674',
'ee': 'EE123456780',
'el': 'EL12345670',
'es': 'ESA12345674',
'fi': 'FI12345671',
'fr': 'FR32123456789',
'gb': 'GB123456782',
'gr': 'GR12345670',
'hu': 'HU12345676',
'hr': 'HR01234567896', # Croatia, contributed by Milan Tribuson
'ie': 'IE1234567FA',
'it': 'IT12345670017',
'lt': 'LT123456715',
'lu': 'LU12345613',
'lv': 'LV41234567891',
'mt': 'MT12345634',
'mx': 'MXABC123456T1B',
'nl': 'NL123456782B90',
'no': 'NO123456785',
'pe': 'PER10254824220 or PED10254824220',
'pl': 'PL1234567883',
'pt': 'PT123456789',
'ro': 'RO1234567897',
'se': 'SE123456789701',
'si': 'SI12345679',
'sk': 'SK0012345675',
'tr': 'TR1234567890 (VERGINO) veya TR12345678901 (TCKIMLIKNO)' # Levent Karakas @ Eska Yazilim A.S.
}
class res_partner(osv.osv):
_inherit = 'res.partner'
def _split_vat(self, vat):
vat_country, vat_number = vat[:2].lower(), vat[2:].replace(' ', '')
return vat_country, vat_number
def simple_vat_check(self, cr, uid, country_code, vat_number, context=None):
'''
Check the VAT number depending of the country.
http://sima-pc.com/nif.php
'''
if not ustr(country_code).encode('utf-8').isalpha():
return False
check_func_name = 'check_vat_' + country_code
check_func = getattr(self, check_func_name, None) or \
getattr(vatnumber, check_func_name, None)
if not check_func:
# No VAT validation available, default to check that the country code exists
if country_code.upper() == 'EU':
# Foreign companies that trade with non-enterprises in the EU
# may have a VATIN starting with "EU" instead of a country code.
return True
res_country = self.pool.get('res.country')
return bool(res_country.search(cr, uid, [('code', '=ilike', country_code)], context=context))
return check_func(vat_number)
def vies_vat_check(self, cr, uid, country_code, vat_number, context=None):
try:
# Validate against VAT Information Exchange System (VIES)
# see also http://ec.europa.eu/taxation_customs/vies/
return vatnumber.check_vies(country_code.upper()+vat_number)
except Exception:
# see http://ec.europa.eu/taxation_customs/vies/checkVatService.wsdl
# Fault code may contain INVALID_INPUT, SERVICE_UNAVAILABLE, MS_UNAVAILABLE,
# TIMEOUT or SERVER_BUSY. There is no way we can validate the input
# with VIES if any of these arise, including the first one (it means invalid
# country code or empty VAT number), so we fall back to the simple check.
return self.simple_vat_check(cr, uid, country_code, vat_number, context=context)
def button_check_vat(self, cr, uid, ids, context=None):
if not self.check_vat(cr, uid, ids, context=context):
msg = self._construct_constraint_msg(cr, uid, ids, context=context)
raise osv.except_osv(_('Error!'), msg)
return True
def check_vat(self, cr, uid, ids, context=None):
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
if user_company.vat_check_vies:
# force full VIES online check
check_func = self.vies_vat_check
else:
# quick and partial off-line checksum validation
check_func = self.simple_vat_check
for partner in self.browse(cr, uid, ids, context=context):
if not partner.vat:
continue
vat_country, vat_number = self._split_vat(partner.vat)
if not check_func(cr, uid, vat_country, vat_number, context=context):
_logger.info(_("Importing VAT Number [%s] is not valid !" % vat_number))
return False
return True
def vat_change(self, cr, uid, ids, value, context=None):
return {'value': {'vat_subjected': bool(value)}}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['vat_subjected']
def _construct_constraint_msg(self, cr, uid, ids, context=None):
def default_vat_check(cn, vn):
# by default, a VAT number is valid if:
# it starts with 2 letters
# has more than 3 characters
return cn[0] in string.ascii_lowercase and cn[1] in string.ascii_lowercase
vat_country, vat_number = self._split_vat(self.browse(cr, uid, ids)[0].vat)
vat_no = "'CC##' (CC=Country Code, ##=VAT Number)"
error_partner = self.browse(cr, uid, ids, context=context)
if default_vat_check(vat_country, vat_number):
vat_no = _ref_vat[vat_country] if vat_country in _ref_vat else vat_no
if self.pool['res.users'].browse(cr, uid, uid).company_id.vat_check_vies:
return '\n' + _('The VAT number [%s] for partner [%s] either failed the VIES VAT validation check or did not respect the expected format %s.') % (error_partner[0].vat, error_partner[0].name, vat_no)
return '\n' + _('The VAT number [%s] for partner [%s] does not seem to be valid. \nNote: the expected format is %s') % (error_partner[0].vat, error_partner[0].name, vat_no)
_constraints = [(check_vat, _construct_constraint_msg, ["vat"])]
__check_vat_ch_re1 = re.compile(r'(MWST|TVA|IVA)[0-9]{6}$')
__check_vat_ch_re2 = re.compile(r'E([0-9]{9}|-[0-9]{3}\.[0-9]{3}\.[0-9]{3})(MWST|TVA|IVA)$')
def check_vat_ch(self, vat):
'''
Check Switzerland VAT number.
'''
# VAT number in Switzerland will change between 2011 and 2013
# http://www.estv.admin.ch/mwst/themen/00154/00589/01107/index.html?lang=fr
# Old format is "TVA 123456" we will admit the user has to enter ch before the number
# Format will becomes such as "CHE-999.999.99C TVA"
# Both old and new format will be accepted till end of 2013
# Accepted format are: (spaces are ignored)
# CH TVA ######
# CH IVA ######
# CH MWST #######
#
# CHE#########MWST
# CHE#########TVA
# CHE#########IVA
# CHE-###.###.### MWST
# CHE-###.###.### TVA
# CHE-###.###.### IVA
#
if self.__check_vat_ch_re1.match(vat):
return True
match = self.__check_vat_ch_re2.match(vat)
if match:
# For new TVA numbers, do a mod11 check
num = filter(lambda s: s.isdigit(), match.group(1)) # get the digits only
factor = (5,4,3,2,7,6,5,4)
csum = sum([int(num[i]) * factor[i] for i in range(8)])
check = (11 - (csum % 11)) % 11
return check == int(num[8])
return False
def _ie_check_char(self, vat):
vat = vat.zfill(8)
extra = 0
if vat[7] not in ' W':
if vat[7].isalpha():
extra = 9 * (ord(vat[7]) - 64)
else:
# invalid
return -1
checksum = extra + sum((8-i) * int(x) for i, x in enumerate(vat[:7]))
return 'WABCDEFGHIJKLMNOPQRSTUV'[checksum % 23]
def check_vat_ie(self, vat):
""" Temporary Ireland VAT validation to support the new format
introduced in January 2013 in Ireland, until upstream is fixed.
TODO: remove when fixed upstream"""
if len(vat) not in (8, 9) or not vat[2:7].isdigit():
return False
if len(vat) == 8:
# Normalize pre-2013 numbers: final space or 'W' not significant
vat += ' '
if vat[:7].isdigit():
return vat[7] == self._ie_check_char(vat[:7] + vat[8])
elif vat[1] in (string.ascii_uppercase + '+*'):
# Deprecated format
# See http://www.revenue.ie/en/online/third-party-reporting/reporting-payment-details/faqs.html#section3
return vat[7] == self._ie_check_char(vat[2:7] + vat[0] + vat[8])
return False
# Mexican VAT verification, contributed by Vauxoo
# and Panos Christeas <p_christ@hol.gr>
__check_vat_mx_re = re.compile(r"(?P<primeras>[A-Za-z\xd1\xf1&]{3,4})" \
r"[ \-_]?" \
r"(?P<ano>[0-9]{2})(?P<mes>[01][0-9])(?P<dia>[0-3][0-9])" \
r"[ \-_]?" \
r"(?P<code>[A-Za-z0-9&\xd1\xf1]{3})$")
def check_vat_mx(self, vat):
''' Mexican VAT verification
Verificar RFC México
'''
# we convert to 8-bit encoding, to help the regex parse only bytes
vat = ustr(vat).encode('iso8859-1')
m = self.__check_vat_mx_re.match(vat)
if not m:
#No valid format
return False
try:
ano = int(m.group('ano'))
if ano > 30:
ano = 1900 + ano
else:
ano = 2000 + ano
datetime.date(ano, int(m.group('mes')), int(m.group('dia')))
except ValueError:
return False
#Valid format and valid date
return True
# Norway VAT validation, contributed by Rolv Råen (adEgo) <rora@adego.no>
def check_vat_no(self, vat):
'''
Check Norway VAT number.See http://www.brreg.no/english/coordination/number.html
'''
if len(vat) != 9:
return False
try:
int(vat)
except ValueError:
return False
sum = (3 * int(vat[0])) + (2 * int(vat[1])) + \
(7 * int(vat[2])) + (6 * int(vat[3])) + \
(5 * int(vat[4])) + (4 * int(vat[5])) + \
(3 * int(vat[6])) + (2 * int(vat[7]))
check = 11 -(sum % 11)
if check == 11:
check = 0
if check == 10:
# 10 is not a valid check digit for an organization number
return False
return check == int(vat[8])
# Peruvian VAT validation, contributed by Vauxoo
def check_vat_pe(self, vat):
vat_type,vat = vat and len(vat)>=2 and (vat[0], vat[1:]) or (False, False)
if vat_type and vat_type.upper() == 'D':
#DNI
return True
elif vat_type and vat_type.upper() == 'R':
#verify RUC
factor = '5432765432'
sum = 0
dig_check = False
if len(vat) != 11:
return False
try:
int(vat)
except ValueError:
return False
for f in range(0,10):
sum += int(factor[f]) * int(vat[f])
subtraction = 11 - (sum % 11)
if subtraction == 10:
dig_check = 0
elif subtraction == 11:
dig_check = 1
else:
dig_check = subtraction
return int(vat[10]) == dig_check
else:
return False
# VAT validation in Turkey, contributed by # Levent Karakas @ Eska Yazilim A.S.
def check_vat_tr(self, vat):
if not (10 <= len(vat) <= 11):
return False
try:
int(vat)
except ValueError:
return False
# check vat number (vergi no)
if len(vat) == 10:
sum = 0
check = 0
for f in range(0,9):
c1 = (int(vat[f]) + (9-f)) % 10
c2 = ( c1 * (2 ** (9-f)) ) % 9
if (c1 != 0) and (c2 == 0): c2 = 9
sum += c2
if sum % 10 == 0:
check = 0
else:
check = 10 - (sum % 10)
return int(vat[9]) == check
# check personal id (tc kimlik no)
if len(vat) == 11:
c1a = 0
c1b = 0
c2 = 0
for f in range(0,9,2):
c1a += int(vat[f])
for f in range(1,9,2):
c1b += int(vat[f])
c1 = ( (7 * c1a) - c1b) % 10
for f in range(0,10):
c2 += int(vat[f])
c2 = c2 % 10
return int(vat[9]) == c1 and int(vat[10]) == c2
return False
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kosz85/django | refs/heads/master | django/contrib/sessions/backends/base.py | 9 | import base64
import logging
import string
from contextlib import suppress
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class UpdateError(Exception):
"""
Occurs if Django tries to update a session that was deleted.
"""
pass
class SessionBase:
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
__not_given = object()
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=__not_given):
self.modified = self.modified or key in self._session
args = () if default is self.__not_given else (default,)
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Return the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(str(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Return True when there is no session_key and the session is empty."
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Return session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily load session from storage (unless "no_load" is True, when only
an empty dict is stored) and store it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Set a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
with suppress(KeyError):
del self['_session_expiry']
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Return ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Remove the current session data from the database and regenerate the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Create a new session key, while retaining the current session data.
"""
data = self._session
key = self.session_key
self.create()
self._session_cache = data
if key:
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Return True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Create a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Save the session data. If 'must_create' is True, create a new session
object (or raise CreateError). Otherwise, only update an existing
object and don't create one (raise UpdateError if needed).
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Delete the session data under this key. If the key is None, use the
current session key value.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Load the session data and return a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
|
rosudrag/Freemium-winner | refs/heads/master | VirtualEnvironment/Lib/site-packages/sqlalchemy/dialects/mssql/adodbapi.py | 80 | # mssql/adodbapi.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+adodbapi
:name: adodbapi
:dbapi: adodbapi
:connectstring: mssql+adodbapi://<username>:<password>@<dsnname>
:url: http://adodbapi.sourceforge.net/
.. note::
The adodbapi dialect is not implemented SQLAlchemy versions 0.6 and
above at this time.
"""
import datetime
from sqlalchemy import types as sqltypes, util
from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect
import sys
class MSDateTime_adodbapi(MSDateTime):
def result_processor(self, dialect, coltype):
def process(value):
# adodbapi will return datetimes with empty time
# values as datetime.date() objects.
# Promote them back to full datetime.datetime()
if type(value) is datetime.date:
return datetime.datetime(value.year, value.month, value.day)
return value
return process
class MSDialect_adodbapi(MSDialect):
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_unicode = sys.maxunicode == 65535
supports_unicode_statements = True
driver = 'adodbapi'
@classmethod
def import_dbapi(cls):
import adodbapi as module
return module
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.DateTime: MSDateTime_adodbapi
}
)
def create_connect_args(self, url):
keys = url.query
connectors = ["Provider=SQLOLEDB"]
if 'port' in keys:
connectors.append("Data Source=%s, %s" %
(keys.get("host"), keys.get("port")))
else:
connectors.append("Data Source=%s" % keys.get("host"))
connectors.append("Initial Catalog=%s" % keys.get("database"))
user = keys.get("user")
if user:
connectors.append("User Id=%s" % user)
connectors.append("Password=%s" % keys.get("password", ""))
else:
connectors.append("Integrated Security=SSPI")
return [[";".join(connectors)], {}]
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \
"'connection failure'" in str(e)
dialect = MSDialect_adodbapi
|
Nic30/HWToolkit | refs/heads/master | hwt/serializer/vhdl/value.py | 1 | from hdlConvertorAst.hdlAst import HdlValueId, HdlValueInt, HdlOp,\
HdlOpType
from hdlConvertorAst.to.hdlUtils import bit_string
from hdlConvertorAst.translate.common.name_scope import LanguageKeyword
from hwt.hdl.operator import Operator
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.bitsVal import BitsVal
from hwt.hdl.types.defs import BOOL, BIT
from hwt.hdl.types.enumVal import HEnumVal
from hwt.hdl.types.sliceVal import SliceVal
from hwt.hdl.value import HValue
from hwt.serializer.generic.value import ToHdlAst_Value
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
class ToHdlAstVhdl2008_Value(ToHdlAst_Value):
TRUE = HdlValueId("TRUE", obj=LanguageKeyword())
FALSE = HdlValueId("FALSE", obj=LanguageKeyword())
#TO_UNSIGNED = HdlValueId("TO_UNSIGNED", obj=LanguageKeyword())
#TO_SIGNED = HdlValueId("TO_SIGNED", obj=LanguageKeyword())
def as_hdl_cond(self, c, forceBool):
assert isinstance(c, (RtlSignalBase, HValue)), c
if not forceBool or c._dtype == BOOL:
return self.as_hdl(c)
elif c._dtype == BIT:
return self.as_hdl(c._eq(1))
elif isinstance(c._dtype, Bits):
return self.as_hdl(c != 0)
else:
raise NotImplementedError()
def as_hdl_HEnumVal(self, val: HEnumVal):
name = self.name_scope.get_object_name(val)
return HdlValueId(name, obj=val)
def as_hdl_HArrayVal(self, val):
return [self.as_hdl_Value(v) for v in val]
def sensitivityListItem(self, item, anyIsEventDependnt):
if isinstance(item, Operator):
item = item.operands[0]
return self.as_hdl(item)
def as_hdl_BitString(self, v, width: int,
force_vector: bool, vld_mask: int, signed):
is_bit = not force_vector and width == 1
#if vld_mask != mask(width) or width >= 32 or is_bit:
v = bit_string(v, width, vld_mask)
if is_bit:
v.base = 256
return v
if signed is None:
return v
elif signed:
cast = self.SIGNED
else:
cast = self.UNSIGNED
return HdlOp(HdlOpType.APOSTROPHE, [cast, v])
#else:
# v = HdlValueInt(v, None, None)
#
# if signed is None:
# return v
# elif signed:
# cast_fn = self.TO_SIGNED
# else:
# cast_fn = self.TO_UNSIGNED
# return hdl_call(cast_fn, [v, HdlValueInt(width, None, None)])
def as_hdl_BoolVal(self, val: BitsVal):
if val.val:
return self.TRUE
else:
return self.FALSE
def as_hdl_BitsVal(self, val: BitsVal):
t = val._dtype
v = super(ToHdlAstVhdl2008_Value, self).as_hdl_BitsVal(val)
# handle '1' vs "1" difference (bit literal vs vector)
if not t.force_vector and t.bit_length() == 1 and t != BOOL:
if isinstance(v, HdlValueInt):
v.base = 256
else:
# assert is cast
assert isinstance(v, HdlOp) and v.fn == HdlOpType.CALL, v
_v = v.ops[1]
if isinstance(_v, HdlValueInt):
_v.base = 256
else:
raise NotImplementedError()
return v
def as_hdl_SliceVal(self, val: SliceVal):
upper = val.val.start
if int(val.val.step) == -1:
if isinstance(upper, HValue):
upper = HdlValueInt(int(upper) - 1, None, None)
else:
upper = HdlOp(HdlOpType.SUB, [self.as_hdl_Value(upper),
HdlValueInt(1, None, None)])
else:
raise NotImplementedError(val.val.step)
return HdlOp(HdlOpType.DOWNTO, [upper, self.as_hdl(val.val.stop)])
|
konieboy/Seng_403 | refs/heads/master | Gender Computer/reader.py | 2 | import csv
from utf8Recorder import UTF8Recoder
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", delimiter=';', **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, delimiter=delimiter, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self |
MilletPu/airvis | refs/heads/airvis_multidimensional | src/main/webapp/js/ol3/v3.1.1/closure-library/closure/bin/build/source_test.py | 153 | #!/usr/bin/env python
#
# Copyright 2010 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for source."""
__author__ = 'nnaze@google.com (Nathan Naze)'
import unittest
import source
class SourceTestCase(unittest.TestCase):
"""Unit test for source. Tests the parser on a known source input."""
def testSourceScan(self):
test_source = source.Source(_TEST_SOURCE)
self.assertEqual(set(['foo', 'foo.test']),
test_source.provides)
self.assertEqual(set(['goog.dom', 'goog.events.EventType']),
test_source.requires)
self.assertFalse(test_source.is_goog_module)
def testSourceScanBase(self):
test_source = source.Source(_TEST_BASE_SOURCE)
self.assertEqual(set(['goog']),
test_source.provides)
self.assertEqual(test_source.requires, set())
self.assertFalse(test_source.is_goog_module)
def testSourceScanBadBase(self):
def MakeSource():
source.Source(_TEST_BAD_BASE_SOURCE)
self.assertRaises(Exception, MakeSource)
def testSourceScanGoogModule(self):
test_source = source.Source(_TEST_MODULE_SOURCE)
self.assertEqual(set(['foo']),
test_source.provides)
self.assertEqual(set(['bar']),
test_source.requires)
self.assertTrue(test_source.is_goog_module)
def testStripComments(self):
self.assertEquals(
'\nvar foo = function() {}',
source.Source._StripComments((
'/* This is\n'
' a comment split\n'
' over multiple lines\n'
'*/\n'
'var foo = function() {}')))
def testGoogStatementsInComments(self):
test_source = source.Source(_TEST_COMMENT_SOURCE)
self.assertEqual(set(['foo']),
test_source.provides)
self.assertEqual(set(['goog.events.EventType']),
test_source.requires)
self.assertFalse(test_source.is_goog_module)
def testHasProvideGoog(self):
self.assertTrue(source.Source._HasProvideGoogFlag(_TEST_BASE_SOURCE))
self.assertTrue(source.Source._HasProvideGoogFlag(_TEST_BAD_BASE_SOURCE))
self.assertFalse(source.Source._HasProvideGoogFlag(_TEST_COMMENT_SOURCE))
_TEST_MODULE_SOURCE = """
goog.module('foo');
var b = goog.require('bar');
"""
_TEST_SOURCE = """// Fake copyright notice
/** Very important comment. */
goog.provide('foo');
goog.provide('foo.test');
goog.require('goog.dom');
goog.require('goog.events.EventType');
function foo() {
// Set bar to seventeen to increase performance.
this.bar = 17;
}
"""
_TEST_COMMENT_SOURCE = """// Fake copyright notice
goog.provide('foo');
/*
goog.provide('foo.test');
*/
/*
goog.require('goog.dom');
*/
// goog.require('goog.dom');
goog.require('goog.events.EventType');
function bar() {
this.baz = 55;
}
"""
_TEST_BASE_SOURCE = """
/**
* @fileoverview The base file.
* @provideGoog
*/
var goog = goog || {};
"""
_TEST_BAD_BASE_SOURCE = """
/**
* @fileoverview The base file.
* @provideGoog
*/
goog.provide('goog');
"""
if __name__ == '__main__':
unittest.main()
|
kevalds51/sympy | refs/heads/master | sympy/utilities/autowrap.py | 37 | """Module for compiling codegen output, and wrap the binary for use in
python.
.. note:: To use the autowrap module it must first be imported
>>> from sympy.utilities.autowrap import autowrap
This module provides a common interface for different external backends, such
as f2py, fwrap, Cython, SWIG(?) etc. (Currently only f2py and Cython are
implemented) The goal is to provide access to compiled binaries of acceptable
performance with a one-button user interface, i.e.
>>> from sympy.abc import x,y
>>> expr = ((x - y)**(25)).expand()
>>> binary_callable = autowrap(expr)
>>> binary_callable(1, 2)
-1.0
The callable returned from autowrap() is a binary python function, not a
SymPy object. If it is desired to use the compiled function in symbolic
expressions, it is better to use binary_function() which returns a SymPy
Function object. The binary callable is attached as the _imp_ attribute and
invoked when a numerical evaluation is requested with evalf(), or with
lambdify().
>>> from sympy.utilities.autowrap import binary_function
>>> f = binary_function('f', expr)
>>> 2*f(x, y) + y
y + 2*f(x, y)
>>> (2*f(x, y) + y).evalf(2, subs={x: 1, y:2})
0.e-110
The idea is that a SymPy user will primarily be interested in working with
mathematical expressions, and should not have to learn details about wrapping
tools in order to evaluate expressions numerically, even if they are
computationally expensive.
When is this useful?
1) For computations on large arrays, Python iterations may be too slow,
and depending on the mathematical expression, it may be difficult to
exploit the advanced index operations provided by NumPy.
2) For *really* long expressions that will be called repeatedly, the
compiled binary should be significantly faster than SymPy's .evalf()
3) If you are generating code with the codegen utility in order to use
it in another project, the automatic python wrappers let you test the
binaries immediately from within SymPy.
4) To create customized ufuncs for use with numpy arrays.
See *ufuncify*.
When is this module NOT the best approach?
1) If you are really concerned about speed or memory optimizations,
you will probably get better results by working directly with the
wrapper tools and the low level code. However, the files generated
by this utility may provide a useful starting point and reference
code. Temporary files will be left intact if you supply the keyword
tempdir="path/to/files/".
2) If the array computation can be handled easily by numpy, and you
don't need the binaries for another project.
"""
from __future__ import print_function, division
_doctest_depends_on = {'exe': ('f2py', 'gfortran', 'gcc'), 'modules': ('numpy',)}
import sys
import os
import shutil
import tempfile
from subprocess import STDOUT, CalledProcessError
from string import Template
from sympy.core.cache import cacheit
from sympy.core.compatibility import check_output, range
from sympy.core.function import Lambda
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy, Symbol
from sympy.tensor.indexed import Idx, IndexedBase
from sympy.utilities.codegen import (make_routine, get_code_generator,
OutputArgument, InOutArgument, InputArgument,
CodeGenArgumentListError, Result, ResultBase, CCodeGen)
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.decorator import doctest_depends_on
class CodeWrapError(Exception):
pass
class CodeWrapper(object):
"""Base Class for code wrappers"""
_filename = "wrapped_code"
_module_basename = "wrapper_module"
_module_counter = 0
@property
def filename(self):
return "%s_%s" % (self._filename, CodeWrapper._module_counter)
@property
def module_name(self):
return "%s_%s" % (self._module_basename, CodeWrapper._module_counter)
def __init__(self, generator, filepath=None, flags=[], verbose=False):
"""
generator -- the code generator to use
"""
self.generator = generator
self.filepath = filepath
self.flags = flags
self.quiet = not verbose
@property
def include_header(self):
return bool(self.filepath)
@property
def include_empty(self):
return bool(self.filepath)
def _generate_code(self, main_routine, routines):
routines.append(main_routine)
self.generator.write(
routines, self.filename, True, self.include_header,
self.include_empty)
def wrap_code(self, routine, helpers=[]):
workdir = self.filepath or tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routine, helpers)
self._prepare_files(routine)
self._process_files(routine)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, routine.name)
def _process_files(self, routine):
command = self.command
command.extend(self.flags)
try:
retoutput = check_output(command, stderr=STDOUT)
except CalledProcessError as e:
raise CodeWrapError(
"Error while executing command: %s. Command output is:\n%s" % (
" ".join(command), e.output.decode()))
if not self.quiet:
print(retoutput)
class DummyWrapper(CodeWrapper):
"""Class used for testing independent of backends """
template = """# dummy module for testing of SymPy
def %(name)s():
return "%(expr)s"
%(name)s.args = "%(args)s"
%(name)s.returns = "%(retvals)s"
"""
def _prepare_files(self, routine):
return
def _generate_code(self, routine, helpers):
with open('%s.py' % self.module_name, 'w') as f:
printed = ", ".join(
[str(res.expr) for res in routine.result_variables])
# convert OutputArguments to return value like f2py
args = filter(lambda x: not isinstance(
x, OutputArgument), routine.arguments)
retvals = []
for val in routine.result_variables:
if isinstance(val, Result):
retvals.append('nameless')
else:
retvals.append(val.result_var)
print(DummyWrapper.template % {
'name': routine.name,
'expr': printed,
'args': ", ".join([str(a.name) for a in args]),
'retvals': ", ".join([str(val) for val in retvals])
}, end="", file=f)
def _process_files(self, routine):
return
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
class CythonCodeWrapper(CodeWrapper):
"""Wrapper that uses Cython"""
setup_template = (
"from distutils.core import setup\n"
"from distutils.extension import Extension\n"
"from Cython.Distutils import build_ext\n"
"{np_import}"
"\n"
"setup(\n"
" cmdclass = {{'build_ext': build_ext}},\n"
" ext_modules = [Extension({ext_args},\n"
" extra_compile_args=['-std=c99'])],\n"
"{np_includes}"
" )")
pyx_imports = (
"import numpy as np\n"
"cimport numpy as np\n\n")
pyx_header = (
"cdef extern from '{header_file}.h':\n"
" {prototype}\n\n")
pyx_func = (
"def {name}_c({arg_string}):\n"
"\n"
"{declarations}"
"{body}")
def __init__(self, *args, **kwargs):
super(CythonCodeWrapper, self).__init__(*args, **kwargs)
self._need_numpy = False
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def _prepare_files(self, routine):
pyxfilename = self.module_name + '.pyx'
codefilename = "%s.%s" % (self.filename, self.generator.code_extension)
# pyx
with open(pyxfilename, 'w') as f:
self.dump_pyx([routine], f, self.filename)
# setup.py
ext_args = [repr(self.module_name), repr([pyxfilename, codefilename])]
if self._need_numpy:
np_import = 'import numpy as np\n'
np_includes = ' include_dirs = [np.get_include()],\n'
else:
np_import = ''
np_includes = ''
with open('setup.py', 'w') as f:
f.write(self.setup_template.format(ext_args=", ".join(ext_args),
np_import=np_import,
np_includes=np_includes))
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name + '_c')
def dump_pyx(self, routines, f, prefix):
"""Write a Cython file with python wrappers
This file contains all the definitions of the routines in c code and
refers to the header file.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to refer to the proper header file.
Only the basename of the prefix is used.
"""
headers = []
functions = []
for routine in routines:
prototype = self.generator.get_prototype(routine)
# C Function Header Import
headers.append(self.pyx_header.format(header_file=prefix,
prototype=prototype))
# Partition the C function arguments into categories
py_rets, py_args, py_loc, py_inf = self._partition_args(routine.arguments)
# Function prototype
name = routine.name
arg_string = ", ".join(self._prototype_arg(arg) for arg in py_args)
# Local Declarations
local_decs = []
for arg, val in py_inf.items():
proto = self._prototype_arg(arg)
mat, ind = val
local_decs.append(" cdef {0} = {1}.shape[{2}]".format(proto, mat, ind))
local_decs.extend([" cdef {0}".format(self._declare_arg(a)) for a in py_loc])
declarations = "\n".join(local_decs)
if declarations:
declarations = declarations + "\n"
# Function Body
args_c = ", ".join([self._call_arg(a) for a in routine.arguments])
rets = ", ".join([str(r.name) for r in py_rets])
if routine.results:
body = ' return %s(%s)' % (routine.name, args_c)
if rets:
body = body + ', ' + rets
else:
body = ' %s(%s)\n' % (routine.name, args_c)
body = body + ' return ' + rets
functions.append(self.pyx_func.format(name=name, arg_string=arg_string,
declarations=declarations, body=body))
# Write text to file
if self._need_numpy:
# Only import numpy if required
f.write(self.pyx_imports)
f.write('\n'.join(headers))
f.write('\n'.join(functions))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_args = []
py_returns = []
py_locals = []
py_inferred = {}
for arg in args:
if isinstance(arg, OutputArgument):
py_returns.append(arg)
py_locals.append(arg)
elif isinstance(arg, InOutArgument):
py_returns.append(arg)
py_args.append(arg)
else:
py_args.append(arg)
# Find arguments that are array dimensions. These can be inferred
# locally in the Cython code.
if isinstance(arg, (InputArgument, InOutArgument)) and arg.dimensions:
dims = [d[1] + 1 for d in arg.dimensions]
sym_dims = [(i, d) for (i, d) in enumerate(dims) if isinstance(d, Symbol)]
for (i, d) in sym_dims:
py_inferred[d] = (arg.name, i)
for arg in args:
if arg.name in py_inferred:
py_inferred[arg] = py_inferred.pop(arg.name)
# Filter inferred arguments from py_args
py_args = [a for a in py_args if a not in py_inferred]
return py_returns, py_args, py_locals, py_inferred
def _prototype_arg(self, arg):
mat_dec = "np.ndarray[{mtype}, ndim={ndim}] {name}"
np_types = {'double': 'np.double_t',
'int': 'np.int_t'}
t = arg.get_datatype('c')
if arg.dimensions:
self._need_numpy = True
ndim = len(arg.dimensions)
mtype = np_types[t]
return mat_dec.format(mtype=mtype, ndim=ndim, name=arg.name)
else:
return "%s %s" % (t, str(arg.name))
def _declare_arg(self, arg):
proto = self._prototype_arg(arg)
if arg.dimensions:
shape = '(' + ','.join(str(i[1] + 1) for i in arg.dimensions) + ')'
return proto + " = np.empty({shape})".format(shape=shape)
else:
return proto + " = 0"
def _call_arg(self, arg):
if arg.dimensions:
t = arg.get_datatype('c')
return "<{0}*> {1}.data".format(t, arg.name)
elif isinstance(arg, ResultBase):
return "&{0}".format(arg.name)
else:
return str(arg.name)
class F2PyCodeWrapper(CodeWrapper):
"""Wrapper that uses f2py"""
@property
def command(self):
filename = self.filename + '.' + self.generator.code_extension
args = ['-c', '-m', self.module_name, filename]
command = [sys.executable, "-c", "import numpy.f2py as f2py2e;f2py2e.main()"]+args
return command
def _prepare_files(self, routine):
pass
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
def _get_code_wrapper_class(backend):
wrappers = {'F2PY': F2PyCodeWrapper, 'CYTHON': CythonCodeWrapper,
'DUMMY': DummyWrapper}
return wrappers[backend.upper()]
# Here we define a lookup of backends -> tuples of languages. For now, each
# tuple is of length 1, but if a backend supports more than one language,
# the most preferable language is listed first.
_lang_lookup = {'CYTHON': ('C',),
'F2PY': ('F95',),
'NUMPY': ('C',),
'DUMMY': ('F95',)} # Dummy here just for testing
def _infer_language(backend):
"""For a given backend, return the top choice of language"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
return langs[0]
def _validate_backend_language(backend, language):
"""Throws error if backend and language are incompatible"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
if language.upper() not in langs:
raise ValueError(("Backend {0} and language {1} are "
"incompatible").format(backend, language))
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def autowrap(
expr, language=None, backend='f2py', tempdir=None, args=None, flags=None,
verbose=False, helpers=None):
"""Generates python callable binaries based on the math expression.
Parameters
----------
expr
The SymPy expression that should be wrapped as a binary routine.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'f2py' [default],
or 'cython'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in the
specified path.
args : iterable, optional
An iterable of symbols. Specifies the argument sequence for the function.
flags : iterable, optional
Additional option flags that will be passed to the backend.
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can be
helpful for debugging.
helpers : iterable, optional
Used to define auxillary expressions needed for the main expr. If the
main expression needs to call a specialized function it should be put
in the ``helpers`` iterable. Autowrap will then make sure that the
compiled main expression can link to the helper routine. Items should
be tuples with (<funtion_name>, <sympy_expression>, <arguments>). It
is mandatory to supply an argument sequence to helper routines.
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.autowrap import autowrap
>>> expr = ((x - y + z)**(13)).expand()
>>> binary_func = autowrap(expr)
>>> binary_func(1, 4, 2)
-1.0
"""
if language:
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
helpers = helpers if helpers else ()
flags = flags if flags else ()
code_generator = get_code_generator(language, "autowrap")
CodeWrapperClass = _get_code_wrapper_class(backend)
code_wrapper = CodeWrapperClass(code_generator, tempdir, flags, verbose)
try:
routine = make_routine('autofunc', expr, args)
except CodeGenArgumentListError as e:
# if all missing arguments are for pure output, we simply attach them
# at the end and try again, because the wrappers will silently convert
# them to return values anyway.
new_args = []
for missing in e.missing_args:
if not isinstance(missing, OutputArgument):
raise
new_args.append(missing.name)
routine = make_routine('autofunc', expr, args + new_args)
helps = []
for name, expr, args in helpers:
helps.append(make_routine(name, expr, args))
return code_wrapper.wrap_code(routine, helpers=helps)
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def binary_function(symfunc, expr, **kwargs):
"""Returns a sympy function with expr as binary implementation
This is a convenience function that automates the steps needed to
autowrap the SymPy expression and attaching it to a Function object
with implemented_function().
>>> from sympy.abc import x, y
>>> from sympy.utilities.autowrap import binary_function
>>> expr = ((x - y)**(25)).expand()
>>> f = binary_function('f', expr)
>>> type(f)
<class 'sympy.core.function.UndefinedFunction'>
>>> 2*f(x, y)
2*f(x, y)
>>> f(x, y).evalf(2, subs={x: 1, y: 2})
-1.0
"""
binary = autowrap(expr, **kwargs)
return implemented_function(symfunc, binary)
#################################################################
# UFUNCIFY #
#################################################################
_ufunc_top = Template("""\
#include "Python.h"
#include "math.h"
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
#include ${include_file}
static PyMethodDef ${module}Methods[] = {
{NULL, NULL, 0, NULL}
};""")
_ufunc_body = Template("""\
static void ${funcname}_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data)
{
npy_intp i;
npy_intp n = dimensions[0];
${declare_args}
${declare_steps}
for (i = 0; i < n; i++) {
*((double *)out1) = ${funcname}(${call_args});
${step_increments}
}
}
PyUFuncGenericFunction ${funcname}_funcs[1] = {&${funcname}_ufunc};
static char ${funcname}_types[${n_types}] = ${types}
static void *${funcname}_data[1] = {NULL};""")
_ufunc_bottom = Template("""\
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"${module}",
NULL,
-1,
${module}Methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_${module}(void)
{
PyObject *m, *d;
${function_creation}
m = PyModule_Create(&moduledef);
if (!m) {
return NULL;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
return m;
}
#else
PyMODINIT_FUNC init${module}(void)
{
PyObject *m, *d;
${function_creation}
m = Py_InitModule("${module}", ${module}Methods);
if (m == NULL) {
return;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
}
#endif\
""")
_ufunc_init_form = Template("""\
ufunc${ind} = PyUFunc_FromFuncAndData(${funcname}_funcs, ${funcname}_data, ${funcname}_types, 1, ${n_in}, ${n_out},
PyUFunc_None, "${module}", ${docstring}, 0);
PyDict_SetItemString(d, "${funcname}", ufunc${ind});
Py_DECREF(ufunc${ind});""")
_ufunc_setup = Template("""\
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('',
parent_package,
top_path)
config.add_extension('${module}', sources=['${module}.c', '${filename}.c'])
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)""")
class UfuncifyCodeWrapper(CodeWrapper):
"""Wrapper for Ufuncify"""
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def _prepare_files(self, routine):
# C
codefilename = self.module_name + '.c'
with open(codefilename, 'w') as f:
self.dump_c([routine], f, self.filename)
# setup.py
with open('setup.py', 'w') as f:
self.dump_setup(f)
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
def dump_setup(self, f):
setup = _ufunc_setup.substitute(module=self.module_name,
filename=self.filename)
f.write(setup)
def dump_c(self, routines, f, prefix):
"""Write a C file with python wrappers
This file contains all the definitions of the routines in c code.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to name the imported module.
"""
functions = []
function_creation = []
ufunc_init = []
module = self.module_name
include_file = "\"{0}.h\"".format(prefix)
top = _ufunc_top.substitute(include_file=include_file, module=module)
for r_index, routine in enumerate(routines):
name = routine.name
# Partition the C function arguments into categories
py_in, py_out = self._partition_args(routine.arguments)
n_in = len(py_in)
n_out = 1
# Declare Args
form = "char *{0}{1} = args[{2}];"
arg_decs = [form.format('in', i, i) for i in range(n_in)]
arg_decs.append(form.format('out', 1, n_in))
declare_args = '\n '.join(arg_decs)
# Declare Steps
form = "npy_intp {0}{1}_step = steps[{2}];"
step_decs = [form.format('in', i, i) for i in range(n_in)]
step_decs.append(form.format('out', 1, n_in))
declare_steps = '\n '.join(step_decs)
# Call Args
form = "*(double *)in{0}"
call_args = ', '.join([form.format(a) for a in range(n_in)])
# Step Increments
form = "{0}{1} += {0}{1}_step;"
step_incs = [form.format('in', i) for i in range(n_in)]
step_incs.append(form.format('out', 1))
step_increments = '\n '.join(step_incs)
# Types
n_types = n_in + n_out
types = "{" + ', '.join(["NPY_DOUBLE"]*n_types) + "};"
# Docstring
docstring = '"Created in SymPy with Ufuncify"'
# Function Creation
function_creation.append("PyObject *ufunc{0};".format(r_index))
# Ufunc initialization
init_form = _ufunc_init_form.substitute(module=module,
funcname=name,
docstring=docstring,
n_in=n_in, n_out=n_out,
ind=r_index)
ufunc_init.append(init_form)
body = _ufunc_body.substitute(module=module, funcname=name,
declare_args=declare_args,
declare_steps=declare_steps,
call_args=call_args,
step_increments=step_increments,
n_types=n_types, types=types)
functions.append(body)
body = '\n\n'.join(functions)
ufunc_init = '\n '.join(ufunc_init)
function_creation = '\n '.join(function_creation)
bottom = _ufunc_bottom.substitute(module=module,
ufunc_init=ufunc_init,
function_creation=function_creation)
text = [top, body, bottom]
f.write('\n\n'.join(text))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_in = []
py_out = []
for arg in args:
if isinstance(arg, OutputArgument):
if py_out:
msg = "Ufuncify doesn't support multiple OutputArguments"
raise ValueError(msg)
py_out.append(arg)
elif isinstance(arg, InOutArgument):
raise ValueError("Ufuncify doesn't support InOutArguments")
else:
py_in.append(arg)
return py_in, py_out
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran', 'gcc'), modules=('numpy',))
def ufuncify(args, expr, language=None, backend='numpy', tempdir=None,
flags=None, verbose=False, helpers=None):
"""Generates a binary function that supports broadcasting on numpy arrays.
Parameters
----------
args : iterable
Either a Symbol or an iterable of symbols. Specifies the argument
sequence for the function.
expr
A SymPy expression that defines the element wise operation.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'numpy' [default],
'cython', or 'f2py'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in the
specified path.
flags : iterable, optional
Additional option flags that will be passed to the backend
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can be
helpful for debugging.
helpers : iterable, optional
Used to define auxillary expressions needed for the main expr. If the
main expression needs to call a specialized function it should be put
in the ``helpers`` iterable. Autowrap will then make sure that the
compiled main expression can link to the helper routine. Items should
be tuples with (<funtion_name>, <sympy_expression>, <arguments>). It
is mandatory to supply an argument sequence to helper routines.
Note
----
The default backend ('numpy') will create actual instances of
``numpy.ufunc``. These support ndimensional broadcasting, and implicit type
conversion. Use of the other backends will result in a "ufunc-like"
function, which requires equal length 1-dimensional arrays for all
arguments, and will not perform any type conversions.
References
----------
[1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
Examples
========
>>> from sympy.utilities.autowrap import ufuncify
>>> from sympy.abc import x, y
>>> import numpy as np
>>> f = ufuncify((x, y), y + x**2)
>>> type(f)
numpy.ufunc
>>> f([1, 2, 3], 2)
array([ 3., 6., 11.])
>>> f(np.arange(5), 3)
array([ 3., 4., 7., 12., 19.])
For the F2Py and Cython backends, inputs are required to be equal length
1-dimensional arrays. The F2Py backend will perform type conversion, but
the Cython backend will error if the inputs are not of the expected type.
>>> f_fortran = ufuncify((x, y), y + x**2, backend='F2Py')
>>> f_fortran(1, 2)
3
>>> f_fortran(numpy.array([1, 2, 3]), numpy.array([1.0, 2.0, 3.0]))
array([2., 6., 12.])
>>> f_cython = ufuncify((x, y), y + x**2, backend='Cython')
>>> f_cython(1, 2)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Argument '_x' has incorrect type (expected numpy.ndarray, got int)
>>> f_cython(numpy.array([1.0]), numpy.array([2.0]))
array([ 3.])
"""
if isinstance(args, Symbol):
args = (args,)
else:
args = tuple(args)
if language:
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
helpers = helpers if helpers else ()
flags = flags if flags else ()
if backend.upper() == 'NUMPY':
routine = make_routine('autofunc', expr, args)
helps = []
for name, expr, args in helpers:
helps.append(make_routine(name, expr, args))
code_wrapper = UfuncifyCodeWrapper(CCodeGen("ufuncify"), tempdir,
flags, verbose)
return code_wrapper.wrap_code(routine, helpers=helps)
else:
# Dummies are used for all added expressions to prevent name clashes
# within the original expression.
y = IndexedBase(Dummy())
m = Dummy(integer=True)
i = Idx(Dummy(integer=True), m)
f = implemented_function(Dummy().name, Lambda(args, expr))
# For each of the args create an indexed version.
indexed_args = [IndexedBase(Dummy(str(a))) for a in args]
# Order the arguments (out, args, dim)
args = [y] + indexed_args + [m]
args_with_indices = [a[i] for a in indexed_args]
return autowrap(Eq(y[i], f(*args_with_indices)), language, backend,
tempdir, args, flags, verbose, helpers)
|
lanyuwen/openthread | refs/heads/master | tools/harness-automation/cases_R140/reed_5_5_5.py | 18 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class REED_5_5_5(HarnessCase):
role = HarnessCase.ROLE_REED
case = '5 5 5'
golden_devices_required = 16
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
warped-rudi/linux-sunxi | refs/heads/sunxi-3.4 | tools/perf/scripts/python/netdev-times.py | 11271 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
aktech/sympy | refs/heads/master | sympy/geometry/tests/test_plane.py | 22 | from __future__ import division
from sympy import Dummy, S, Symbol, pi, sqrt, asin
from sympy.geometry import Line, Point, Ray, Segment, Point3D, Line3D, Ray3D, Segment3D, Plane
from sympy.geometry.util import are_coplanar
from sympy.utilities.pytest import raises, slow
@slow
def test_plane():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
z = Symbol('z', real=True)
p1 = Point3D(0, 0, 0)
p2 = Point3D(1, 1, 1)
p3 = Point3D(1, 2, 3)
p4 = Point3D(x, x, x)
p5 = Point3D(y, y, y)
pl3 = Plane(p1, p2, p3)
pl4 = Plane(p1, normal_vector=(1, 1, 1))
pl4b = Plane(p1, p2)
pl5 = Plane(p3, normal_vector=(1, 2, 3))
pl6 = Plane(Point3D(2, 3, 7), normal_vector=(2, 2, 2))
pl7 = Plane(Point3D(1, -5, -6), normal_vector=(1, -2, 1))
l1 = Line3D(Point3D(5, 0, 0), Point3D(1, -1, 1))
l2 = Line3D(Point3D(0, -2, 0), Point3D(3, 1, 1))
l3 = Line3D(Point3D(0, -1, 0), Point3D(5, -1, 9))
assert Plane(p1, p2, p3) != Plane(p1, p3, p2)
assert Plane(p1, p2, p3).is_coplanar(Plane(p1, p3, p2))
assert pl3 == Plane(Point3D(0, 0, 0), normal_vector=(1, -2, 1))
assert pl3 != pl4
assert pl4 == pl4b
assert pl5 == Plane(Point3D(1, 2, 3), normal_vector=(1, 2, 3))
assert pl5.equation(x, y, z) == x + 2*y + 3*z - 14
assert pl3.equation(x, y, z) == x - 2*y + z
assert pl3.p1 == p1
assert pl4.p1 == p1
assert pl5.p1 == p3
assert pl4.normal_vector == (1, 1, 1)
assert pl5.normal_vector == (1, 2, 3)
assert p1 in pl3
assert p1 in pl4
assert p3 in pl5
assert pl3.projection(Point(0, 0)) == p1
p = pl3.projection(Point3D(1, 1, 0))
assert p == Point3D(7/6, 2/3, 1/6)
assert p in pl3
l = pl3.projection_line(Line(Point(0, 0), Point(1, 1)))
assert l == Line3D(Point3D(0, 0, 0), Point3D(7/6, 2/3, 1/6))
assert l in pl3
# get a segment that does not intersect the plane which is also
# parallel to pl3's normal veector
t = Dummy()
r = pl3.random_point()
a = pl3.perpendicular_line(r).arbitrary_point(t)
s = Segment3D(a.subs(t, 1), a.subs(t, 2))
assert s.p1 not in pl3 and s.p2 not in pl3
assert pl3.projection_line(s).equals(r)
assert pl3.projection_line(Segment(Point(1, 0), Point(1, 1))) == \
Segment3D(Point3D(5/6, 1/3, -1/6), Point3D(7/6, 2/3, 1/6))
assert pl6.projection_line(Ray(Point(1, 0), Point(1, 1))) == \
Ray3D(Point3D(14/3, 11/3, 11/3), Point3D(13/3, 13/3, 10/3))
assert pl3.perpendicular_line(r.args) == pl3.perpendicular_line(r)
assert pl3.is_parallel(pl6) is False
assert pl4.is_parallel(pl6)
assert pl6.is_parallel(l1) is False
assert pl3.is_perpendicular(pl6)
assert pl4.is_perpendicular(pl7)
assert pl6.is_perpendicular(pl7)
assert pl6.is_perpendicular(l1) is False
assert pl7.distance(Point3D(1, 3, 5)) == 5*sqrt(6)/6
assert pl6.distance(Point3D(0, 0, 0)) == 4*sqrt(3)
assert pl6.distance(pl6.p1) == 0
assert pl7.distance(pl6) == 0
assert pl7.distance(l1) == 0
assert pl6.distance(Segment3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == 0
pl6.distance(Plane(Point3D(5, 5, 5), normal_vector=(8, 8, 8))) == sqrt(3)
assert pl6.angle_between(pl3) == pi/2
assert pl6.angle_between(pl6) == 0
assert pl6.angle_between(pl4) == 0
assert pl7.angle_between(Line3D(Point3D(2, 3, 5), Point3D(2, 4, 6))) == \
-asin(sqrt(3)/6)
assert pl6.angle_between(Ray3D(Point3D(2, 4, 1), Point3D(6, 5, 3))) == \
asin(sqrt(7)/3)
assert pl7.angle_between(Segment3D(Point3D(5, 6, 1), Point3D(1, 2, 4))) == \
-asin(7*sqrt(246)/246)
assert are_coplanar(l1, l2, l3) is False
assert are_coplanar(l1) is False
assert are_coplanar(Point3D(2, 7, 2), Point3D(0, 0, 2),
Point3D(1, 1, 2), Point3D(1, 2, 2))
assert are_coplanar(Plane(p1, p2, p3), Plane(p1, p3, p2))
assert Plane.are_concurrent(pl3, pl4, pl5) is False
assert Plane.are_concurrent(pl6) is False
raises(ValueError, lambda: Plane.are_concurrent(Point3D(0, 0, 0)))
assert pl3.parallel_plane(Point3D(1, 2, 5)) == Plane(Point3D(1, 2, 5), \
normal_vector=(1, -2, 1))
# perpendicular_plane
p = Plane((0, 0, 0), (1, 0, 0))
# default
assert p.perpendicular_plane() == Plane(Point3D(0, 0, 0), (0, 1, 0))
# 1 pt
assert p.perpendicular_plane(Point3D(1, 0, 1)) == \
Plane(Point3D(1, 0, 1), (0, 1, 0))
# pts as tuples
assert p.perpendicular_plane((1, 0, 1), (1, 1, 1)) == \
Plane(Point3D(1, 0, 1), (0, 0, -1))
a, b = Point3D(0, 0, 0), Point3D(0, 1, 0)
Z = (0, 0, 1)
p = Plane(a, normal_vector=Z)
# case 4
assert p.perpendicular_plane(a, b) == Plane(a, (1, 0, 0))
n = Point3D(*Z)
# case 1
assert p.perpendicular_plane(a, n) == Plane(a, (-1, 0, 0))
# case 2
assert Plane(a, normal_vector=b.args).perpendicular_plane(a, a + b) == \
Plane(Point3D(0, 0, 0), (1, 0, 0))
# case 1&3
assert Plane(b, normal_vector=Z).perpendicular_plane(b, b + n) == \
Plane(Point3D(0, 1, 0), (-1, 0, 0))
# case 2&3
assert Plane(b, normal_vector=b.args).perpendicular_plane(n, n + b) == \
Plane(Point3D(0, 0, 1), (1, 0, 0))
assert pl6.intersection(pl6) == [pl6]
assert pl4.intersection(pl4.p1) == [pl4.p1]
assert pl3.intersection(pl6) == [
Line3D(Point3D(8, 4, 0), Point3D(2, 4, 6))]
assert pl3.intersection(Line3D(Point3D(1,2,4), Point3D(4,4,2))) == [
Point3D(2, 8/3, 10/3)]
assert pl3.intersection(Plane(Point3D(6, 0, 0), normal_vector=(2, -5, 3))
) == [Line3D(Point3D(-24, -12, 0), Point3D(-25, -13, -1))]
assert pl6.intersection(Ray3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == [
Point3D(-1, 3, 10)]
assert pl6.intersection(Segment3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == [
Point3D(-1, 3, 10)]
assert pl7.intersection(Line(Point(2, 3), Point(4, 2))) == [
Point3D(13/2, 3/4, 0)]
r = Ray(Point(2, 3), Point(4, 2))
assert Plane((1,2,0), normal_vector=(0,0,1)).intersection(r) == [
Ray3D(Point(2, 3), Point(4, 2))]
assert pl3.random_point() in pl3
# issue 8570
l2 = Line3D(Point3D(S(50000004459633)/5000000000000,
-S(891926590718643)/1000000000000000,
S(231800966893633)/100000000000000),
Point3D(S(50000004459633)/50000000000000,
-S(222981647679771)/250000000000000,
S(231800966893633)/100000000000000))
p2 = Plane(Point3D(S(402775636372767)/100000000000000,
-S(97224357654973)/100000000000000,
S(216793600814789)/100000000000000),
(-S('9.00000087501922'), -S('4.81170658872543e-13'),
S('0.0')))
assert str([i.n(2) for i in p2.intersection(l2)]) == \
'[Point3D(4.0, -0.89, 2.3)]'
|
boxycoin/boxycoin | refs/heads/master | contrib/wallettools/walletunlock.py | 2299 | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:8332")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60) |
EvanK/ansible | refs/heads/devel | lib/ansible/modules/network/fortios/fortios_log_webtrends_setting.py | 23 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_webtrends_setting
short_description: Settings for WebTrends in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_webtrends feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_webtrends_setting:
description:
- Settings for WebTrends.
default: null
suboptions:
server:
description:
- Address of the remote WebTrends server.
status:
description:
- Enable/disable logging to WebTrends.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Settings for WebTrends.
fortios_log_webtrends_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_webtrends_setting:
server: "192.168.100.40"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_webtrends_setting_data(json):
option_list = ['server', 'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def log_webtrends_setting(data, fos):
vdom = data['vdom']
log_webtrends_setting_data = data['log_webtrends_setting']
flattened_data = flatten_multilists_attributes(log_webtrends_setting_data)
filtered_data = filter_log_webtrends_setting_data(flattened_data)
return fos.set('log.webtrends',
'setting',
data=filtered_data,
vdom=vdom)
def fortios_log_webtrends(data, fos):
login(data)
if data['log_webtrends_setting']:
resp = log_webtrends_setting(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_webtrends_setting": {
"required": False, "type": "dict",
"options": {
"server": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_webtrends(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
cfarquhar/rpc-openstack | refs/heads/master | scripts/alarmparser.py | 3 | # Rackspace Monitoring as a Service (MaaS) Alarm Languague parser.
# Generated by the Waxeye Parser Generator - version 0.8.0
# www.waxeye.org
# Generated from the following grammar (retrieved from
# https://github.rackspace.com/CloudMonitoring/ele/blob/master/grammar/alarm.waxeye)
# AlarmDsl <- Ws *SetStatement Ws *IfStatement Ws *Return
#
#
# MetricName <- :'metric[' ( ( :'"' Name :'"' ) | ( :"'" Name :"'" ) ) :']'
#
# Param <- Ws (MetricName | Number) Ws
# FunctionName <- +([a-zA-Z_])
#
# Metric <= FunctionName :'(' ( Param | Param *( :',' Ws Param ) ) :')'
# | MetricName
#
#
# Name <- +([a-zA-Z_-]
# | ' '
# | [0-9]
# | '.'
# | '/')
#
#
# InExp <= Metric Ws Rhs
#
# Rhs <- NOp Ws NValue
# | TOp Ws TValue
# | CTOp Ws String
#
# CondExp <= Ws (:'(' Ws InExp Ws :')'
# | InExp ) Ws
#
# Conj <- "&&"
# | "||"
#
# IfStatement <- :'if' Ws
# :'(' Ws CondExp
# *(Conj Ws CondExp)
# :')' Ws Block Ws
#
# Block <- :'{' Ws Return Ws :'}'
#
# # Number comparators
# NValue <- Number
# | Metric
#
# # Text metrics, not the same comparators
# TValue <- String
# | Metric
#
# String <- ( :'"'
# *( :'\\' ( Unicode | Escaped )
# | !'\\' !'"' . )
# :'"' )
# |
# ( :"'"
# *( :'\\' ( Unicode | Escaped )
# | !'\\' !"'" . )
# :"'" )
#
#
# Unicode <- 'u' [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]
#
# Escaped <- ["'/\\bfnrt]
#
# Number <- ?'-'
# ('0' | [1-9] *[0-9])
# ?('.' +[0-9])
# ?([eE] ?[+-] +[0-9])
#
# WholeNumber <- [1-9] *[0-9]
#
# AlarmStatusArgs <- AlertStates ?( Ws :',' Ws AlertStateReason )
#
# DeprecatedAlarmStatus <- AlarmStatusArgs
#
# AlarmStatus <- :'new' Ws :'AlarmStatus(' Ws AlarmStatusArgs Ws :')' Ws :';'
#
# Return <- :'return ' Ws ( AlarmStatus | DeprecatedAlarmStatus ) Ws
#
# AlertStates <- 'CRITICAL'
# | 'OK'
# | 'WARNING'
#
# AlertStateReason <- String
#
# # Number operators that take metric or constant numbers on both sides
# NOp <- ('>='
# | '<='
# | '>'
# | '<'
# | '=='
# | '!=') Ws
#
# # Text operators that take metric or constant strings on both sides
# TOp <- ('==' | '!=') Ws
#
# # Text operators that only take constant strings on the rhs
# CTOp <- ('nregex' | 'regex') Ws
#
#
# SetStatement <- :':set' Ws InSetStatement Ws
#
#
# InSetStatement <= SetConsistencyLevel
# | SetConsecutiveCount
# | SetDimensionFilter
#
#
# ConsistencyLevel <- 'ONE'
# | 'QUORUM'
# | 'ALL'
#
#
# # Should these just be on the alarm itself?
# # Like check type filtering...
# SetConsistencyLevel <- :'consistencyLevel' Ws :'=' Ws ConsistencyLevel
#
# SetConsecutiveCount <- :'consecutiveCount' Ws :'=' Ws WholeNumber
#
# # We just parse out a string here, then validate it later so we can provide an
# # actually useful error message.
# SetDimensionFilter <- :'dimensionFilter' Ws :'=' Ws String
#
#
# SComment <: '#' *(!EndOfLine .) (EndOfLine | !.)
#
#
# MComment <: '/*' *(MComment | !'*/' . ) '*/'
#
#
# EndOfLine <: '\r\n' | '\n' | '\r'
#
# Wsp <: *[ \t]
#
# Ws <: *(EndOfLine | SComment | MComment | [ \t])
from waxeye import Edge, State, FA, WaxeyeParser
class Parser (WaxeyeParser):
start = 0
eof_check = True
automata = [FA("alarmDsl", [State([Edge(38, 1, False)], False),
State([Edge(28, 1, False),
Edge(38, 2, False)], False),
State([Edge(10, 2, False),
Edge(38, 3, False)], False),
State([Edge(22, 3, False)], True)], FA.LEFT),
FA("metricName", [State([Edge("m", 1, True)], False),
State([Edge("e", 2, True)], False),
State([Edge("t", 3, True)], False),
State([Edge("r", 4, True)], False),
State([Edge("i", 5, True)], False),
State([Edge("c", 6, True)], False),
State([Edge("[", 7, True)], False),
State([Edge("\"", 8, True),
Edge("\'", 12, True)], False),
State([Edge(5, 9, False)], False),
State([Edge("\"", 10, True)], False),
State([Edge("]", 11, True)], False),
State([], True),
State([Edge(5, 13, False)], False),
State([Edge("\'", 10, True)], False)], FA.LEFT),
FA("param", [State([Edge(38, 1, False)], False),
State([Edge(1, 2, False),
Edge(17, 2, False)], False),
State([Edge(38, 3, False)], False),
State([], True)], FA.LEFT),
FA("functionName", [State([Edge([(65, 90), "_", (97, 122)], 1, False)], False),
State([Edge([(65, 90), "_", (97, 122)], 1, False)], True)], FA.LEFT),
FA("metric", [State([Edge(3, 1, False),
Edge(1, 4, False)], False),
State([Edge("(", 2, True)], False),
State([Edge(2, 3, False)], False),
State([Edge(")", 4, True),
Edge(",", 5, True)], False),
State([], True),
State([Edge(38, 6, False)], False),
State([Edge(2, 7, False)], False),
State([Edge(",", 5, True),
Edge(")", 4, True)], False)], FA.PRUNE),
FA("name", [State([Edge(["-", (65, 90), "_", (97, 122)], 1, False),
Edge(" ", 1, False),
Edge([(48, 57)], 1, False),
Edge(".", 1, False),
Edge("/", 1, False)], False),
State([Edge(["-", (65, 90), "_", (97, 122)], 1, False),
Edge(" ", 1, False),
Edge([(48, 57)], 1, False),
Edge(".", 1, False),
Edge("/", 1, False)], True)], FA.LEFT),
FA("inExp", [State([Edge(4, 1, False)], False),
State([Edge(38, 2, False)], False),
State([Edge(7, 3, False)], False),
State([], True)], FA.PRUNE),
FA("rhs", [State([Edge(25, 1, False),
Edge(26, 4, False),
Edge(27, 6, False)], False),
State([Edge(38, 2, False)], False),
State([Edge(12, 3, False)], False),
State([], True),
State([Edge(38, 5, False)], False),
State([Edge(13, 3, False)], False),
State([Edge(38, 7, False)], False),
State([Edge(14, 3, False)], False)], FA.LEFT),
FA("condExp", [State([Edge(38, 1, False)], False),
State([Edge("(", 2, True),
Edge(6, 6, False)], False),
State([Edge(38, 3, False)], False),
State([Edge(6, 4, False)], False),
State([Edge(38, 5, False)], False),
State([Edge(")", 6, True)], False),
State([Edge(38, 7, False)], False),
State([], True)], FA.PRUNE),
FA("conj", [State([Edge("&", 1, False),
Edge("|", 3, False)], False),
State([Edge("&", 2, False)], False),
State([], True),
State([Edge("|", 2, False)], False)], FA.LEFT),
FA("ifStatement", [State([Edge("i", 1, True)], False),
State([Edge("f", 2, True)], False),
State([Edge(38, 3, False)], False),
State([Edge("(", 4, True)], False),
State([Edge(38, 5, False)], False),
State([Edge(8, 6, False)], False),
State([Edge(9, 7, False),
Edge(")", 9, True)], False),
State([Edge(38, 8, False)], False),
State([Edge(8, 6, False)], False),
State([Edge(38, 10, False)], False),
State([Edge(11, 11, False)], False),
State([Edge(38, 12, False)], False),
State([], True)], FA.LEFT),
FA("block", [State([Edge("{", 1, True)], False),
State([Edge(38, 2, False)], False),
State([Edge(22, 3, False)], False),
State([Edge(38, 4, False)], False),
State([Edge("}", 5, True)], False),
State([], True)], FA.LEFT),
FA("nValue", [State([Edge(17, 1, False),
Edge(4, 1, False)], False),
State([], True)], FA.LEFT),
FA("tValue", [State([Edge(14, 1, False),
Edge(4, 1, False)], False),
State([], True)], FA.LEFT),
FA("string", [State([Edge("\"", 1, True),
Edge("\'", 6, True)], False),
State([Edge("\\", 2, True),
Edge(40, 3, False),
Edge("\"", 5, True)], False),
State([Edge(15, 1, False),
Edge(16, 1, False)], False),
State([Edge(39, 4, False)], False),
State([Edge(-1, 1, False)], False),
State([], True),
State([Edge("\\", 7, True),
Edge(42, 8, False),
Edge("\'", 5, True)], False),
State([Edge(15, 6, False),
Edge(16, 6, False)], False),
State([Edge(41, 9, False)], False),
State([Edge(-1, 6, False)], False)], FA.LEFT),
FA("unicode", [State([Edge("u", 1, False)], False),
State([Edge([(48, 57), (65, 70), (97, 102)], 2, False)], False),
State([Edge([(48, 57), (65, 70), (97, 102)], 3, False)], False),
State([Edge([(48, 57), (65, 70), (97, 102)], 4, False)], False),
State([Edge([(48, 57), (65, 70), (97, 102)], 5, False)], False),
State([], True)], FA.LEFT),
FA("escaped", [State([Edge(["\"", "\'", "/", "\\", "b", "f", "n", "r", "t"], 1, False)], False),
State([], True)], FA.LEFT),
FA("number", [State([Edge("-", 1, False),
Edge("0", 2, False),
Edge([(49, 57)], 8, False)], False),
State([Edge("0", 2, False),
Edge([(49, 57)], 8, False)], False),
State([Edge(".", 3, False),
Edge(["E", "e"], 5, False)], True),
State([Edge([(48, 57)], 4, False)], False),
State([Edge([(48, 57)], 4, False),
Edge(["E", "e"], 5, False)], True),
State([Edge(["+", "-"], 6, False),
Edge([(48, 57)], 7, False)], False),
State([Edge([(48, 57)], 7, False)], False),
State([Edge([(48, 57)], 7, False)], True),
State([Edge([(48, 57)], 8, False),
Edge(".", 3, False),
Edge(["E", "e"], 5, False)], True)], FA.LEFT),
FA("wholeNumber", [State([Edge([(49, 57)], 1, False)], False),
State([Edge([(48, 57)], 1, False)], True)], FA.LEFT),
FA("alarmStatusArgs", [State([Edge(23, 1, False)], False),
State([Edge(38, 2, False)], True),
State([Edge(",", 3, True)], False),
State([Edge(38, 4, False)], False),
State([Edge(24, 5, False)], False),
State([], True)], FA.LEFT),
FA("deprecatedAlarmStatus", [State([Edge(19, 1, False)], False),
State([], True)], FA.LEFT),
FA("alarmStatus", [State([Edge("n", 1, True)], False),
State([Edge("e", 2, True)], False),
State([Edge("w", 3, True)], False),
State([Edge(38, 4, False)], False),
State([Edge("A", 5, True)], False),
State([Edge("l", 6, True)], False),
State([Edge("a", 7, True)], False),
State([Edge("r", 8, True)], False),
State([Edge("m", 9, True)], False),
State([Edge("S", 10, True)], False),
State([Edge("t", 11, True)], False),
State([Edge("a", 12, True)], False),
State([Edge("t", 13, True)], False),
State([Edge("u", 14, True)], False),
State([Edge("s", 15, True)], False),
State([Edge("(", 16, True)], False),
State([Edge(38, 17, False)], False),
State([Edge(19, 18, False)], False),
State([Edge(38, 19, False)], False),
State([Edge(")", 20, True)], False),
State([Edge(38, 21, False)], False),
State([Edge(";", 22, True)], False),
State([], True)], FA.LEFT),
FA("return", [State([Edge("r", 1, True)], False),
State([Edge("e", 2, True)], False),
State([Edge("t", 3, True)], False),
State([Edge("u", 4, True)], False),
State([Edge("r", 5, True)], False),
State([Edge("n", 6, True)], False),
State([Edge(" ", 7, True)], False),
State([Edge(38, 8, False)], False),
State([Edge(21, 9, False),
Edge(20, 9, False)], False),
State([Edge(38, 10, False)], False),
State([], True)], FA.LEFT),
FA("alertStates", [State([Edge("C", 1, False),
Edge("O", 9, False),
Edge("W", 10, False)], False),
State([Edge("R", 2, False)], False),
State([Edge("I", 3, False)], False),
State([Edge("T", 4, False)], False),
State([Edge("I", 5, False)], False),
State([Edge("C", 6, False)], False),
State([Edge("A", 7, False)], False),
State([Edge("L", 8, False)], False),
State([], True),
State([Edge("K", 8, False)], False),
State([Edge("A", 11, False)], False),
State([Edge("R", 12, False)], False),
State([Edge("N", 13, False)], False),
State([Edge("I", 14, False)], False),
State([Edge("N", 15, False)], False),
State([Edge("G", 8, False)], False)], FA.LEFT),
FA("alertStateReason", [State([Edge(14, 1, False)], False),
State([], True)], FA.LEFT),
FA("nOp", [State([Edge(">", 1, False),
Edge("<", 4, False),
Edge(">", 2, False),
Edge("<", 2, False),
Edge("=", 5, False),
Edge("!", 6, False)], False),
State([Edge("=", 2, False)], False),
State([Edge(38, 3, False)], False),
State([], True),
State([Edge("=", 2, False)], False),
State([Edge("=", 2, False)], False),
State([Edge("=", 2, False)], False)], FA.LEFT),
FA("tOp", [State([Edge("=", 1, False),
Edge("!", 4, False)], False),
State([Edge("=", 2, False)], False),
State([Edge(38, 3, False)], False),
State([], True),
State([Edge("=", 2, False)], False)], FA.LEFT),
FA("cTOp", [State([Edge("n", 1, False),
Edge("r", 8, False)], False),
State([Edge("r", 2, False)], False),
State([Edge("e", 3, False)], False),
State([Edge("g", 4, False)], False),
State([Edge("e", 5, False)], False),
State([Edge("x", 6, False)], False),
State([Edge(38, 7, False)], False),
State([], True),
State([Edge("e", 9, False)], False),
State([Edge("g", 10, False)], False),
State([Edge("e", 11, False)], False),
State([Edge("x", 6, False)], False)], FA.LEFT),
FA("setStatement", [State([Edge(":", 1, True)], False),
State([Edge("s", 2, True)], False),
State([Edge("e", 3, True)], False),
State([Edge("t", 4, True)], False),
State([Edge(38, 5, False)], False),
State([Edge(29, 6, False)], False),
State([Edge(38, 7, False)], False),
State([], True)], FA.LEFT),
FA("inSetStatement", [State([Edge(31, 1, False),
Edge(32, 1, False),
Edge(33, 1, False)], False),
State([], True)], FA.PRUNE),
FA("consistencyLevel", [State([Edge("O", 1, False),
Edge("Q", 4, False),
Edge("A", 9, False)], False),
State([Edge("N", 2, False)], False),
State([Edge("E", 3, False)], False),
State([], True),
State([Edge("U", 5, False)], False),
State([Edge("O", 6, False)], False),
State([Edge("R", 7, False)], False),
State([Edge("U", 8, False)], False),
State([Edge("M", 3, False)], False),
State([Edge("L", 10, False)], False),
State([Edge("L", 3, False)], False)], FA.LEFT),
FA("setConsistencyLevel", [State([Edge("c", 1, True)], False),
State([Edge("o", 2, True)], False),
State([Edge("n", 3, True)], False),
State([Edge("s", 4, True)], False),
State([Edge("i", 5, True)], False),
State([Edge("s", 6, True)], False),
State([Edge("t", 7, True)], False),
State([Edge("e", 8, True)], False),
State([Edge("n", 9, True)], False),
State([Edge("c", 10, True)], False),
State([Edge("y", 11, True)], False),
State([Edge("L", 12, True)], False),
State([Edge("e", 13, True)], False),
State([Edge("v", 14, True)], False),
State([Edge("e", 15, True)], False),
State([Edge("l", 16, True)], False),
State([Edge(38, 17, False)], False),
State([Edge("=", 18, True)], False),
State([Edge(38, 19, False)], False),
State([Edge(30, 20, False)], False),
State([], True)], FA.LEFT),
FA("setConsecutiveCount", [State([Edge("c", 1, True)], False),
State([Edge("o", 2, True)], False),
State([Edge("n", 3, True)], False),
State([Edge("s", 4, True)], False),
State([Edge("e", 5, True)], False),
State([Edge("c", 6, True)], False),
State([Edge("u", 7, True)], False),
State([Edge("t", 8, True)], False),
State([Edge("i", 9, True)], False),
State([Edge("v", 10, True)], False),
State([Edge("e", 11, True)], False),
State([Edge("C", 12, True)], False),
State([Edge("o", 13, True)], False),
State([Edge("u", 14, True)], False),
State([Edge("n", 15, True)], False),
State([Edge("t", 16, True)], False),
State([Edge(38, 17, False)], False),
State([Edge("=", 18, True)], False),
State([Edge(38, 19, False)], False),
State([Edge(18, 20, False)], False),
State([], True)], FA.LEFT),
FA("setDimensionFilter", [State([Edge("d", 1, True)], False),
State([Edge("i", 2, True)], False),
State([Edge("m", 3, True)], False),
State([Edge("e", 4, True)], False),
State([Edge("n", 5, True)], False),
State([Edge("s", 6, True)], False),
State([Edge("i", 7, True)], False),
State([Edge("o", 8, True)], False),
State([Edge("n", 9, True)], False),
State([Edge("F", 10, True)], False),
State([Edge("i", 11, True)], False),
State([Edge("l", 12, True)], False),
State([Edge("t", 13, True)], False),
State([Edge("e", 14, True)], False),
State([Edge("r", 15, True)], False),
State([Edge(38, 16, False)], False),
State([Edge("=", 17, True)], False),
State([Edge(38, 18, False)], False),
State([Edge(14, 19, False)], False),
State([], True)], FA.LEFT),
FA("sComment", [State([Edge("#", 1, False)], False),
State([Edge(44, 2, False),
Edge(36, 3, False),
Edge(43, 3, False)], False),
State([Edge(-1, 1, False)], False),
State([], True)], FA.VOID),
FA("mComment", [State([Edge("/", 1, False)], False),
State([Edge("*", 2, False)], False),
State([Edge(35, 2, False),
Edge(45, 3, False),
Edge("*", 4, False)], False),
State([Edge(-1, 2, False)], False),
State([Edge("/", 5, False)], False),
State([], True)], FA.VOID),
FA("endOfLine", [State([Edge("\r", 1, False),
Edge("\n", 2, False),
Edge("\r", 2, False)], False),
State([Edge("\n", 2, False)], False),
State([], True)], FA.VOID),
FA("wsp", [State([Edge(["\t", " "], 0, False)], True)], FA.VOID),
FA("ws", [State([Edge(36, 0, False),
Edge(34, 0, False),
Edge(35, 0, False),
Edge(["\t", " "], 0, False)], True)], FA.VOID),
FA("", [State([Edge("\"", 1, False)], False),
State([], True)], FA.NEG),
FA("", [State([Edge("\\", 1, False)], False),
State([], True)], FA.NEG),
FA("", [State([Edge("\'", 1, False)], False),
State([], True)], FA.NEG),
FA("", [State([Edge("\\", 1, False)], False),
State([], True)], FA.NEG),
FA("", [State([Edge(-1, 1, False)], False),
State([], True)], FA.NEG),
FA("", [State([Edge(36, 1, False)], False),
State([], True)], FA.NEG),
FA("", [State([Edge("*", 1, False)], False),
State([Edge("/", 2, False)], False),
State([], True)], FA.NEG)]
def __init__(self):
WaxeyeParser.__init__(self, Parser.start, Parser.eof_check, Parser.automata)
|
Mistobaan/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/gradient_correctness_test.py | 118 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.argmax_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class GradientCorrectnessTest(test.TestCase):
def testMultipleOutputChainedGradients(self):
with self.test_session() as sess:
x = constant_op.constant(1.0, dtype=dtypes.float32)
yexp = math_ops.exp(x)
yexplog = math_ops.log(yexp)
grads = gradients_impl.gradients([yexp, yexplog], [x])
grad_vals = sess.run(grads)
exp1_plus_one = (1.0 + np.exp(1.0)).astype(np.float32)
# [dexp(x)/dx + d(log(exp(x)))/dx] @ x=1 == exp(1) + 1
self.assertAllClose(grad_vals[0], exp1_plus_one)
if __name__ == '__main__':
test.main()
|
pschmitt/home-assistant | refs/heads/dev | tests/components/group/test_cover.py | 5 | """The tests for the group cover platform."""
from datetime import timedelta
import pytest
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
ATTR_POSITION,
ATTR_TILT_POSITION,
DOMAIN,
)
from homeassistant.components.group.cover import DEFAULT_NAME
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
CONF_ENTITIES,
SERVICE_CLOSE_COVER,
SERVICE_CLOSE_COVER_TILT,
SERVICE_OPEN_COVER,
SERVICE_OPEN_COVER_TILT,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_STOP_COVER,
SERVICE_STOP_COVER_TILT,
SERVICE_TOGGLE,
SERVICE_TOGGLE_COVER_TILT,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import assert_setup_component, async_fire_time_changed
COVER_GROUP = "cover.cover_group"
DEMO_COVER = "cover.kitchen_window"
DEMO_COVER_POS = "cover.hall_window"
DEMO_COVER_TILT = "cover.living_room_window"
DEMO_TILT = "cover.tilt_demo"
CONFIG_ALL = {
DOMAIN: [
{"platform": "demo"},
{
"platform": "group",
CONF_ENTITIES: [DEMO_COVER, DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT],
},
]
}
CONFIG_POS = {
DOMAIN: [
{"platform": "demo"},
{
"platform": "group",
CONF_ENTITIES: [DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT],
},
]
}
CONFIG_ATTRIBUTES = {
DOMAIN: {
"platform": "group",
CONF_ENTITIES: [DEMO_COVER, DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT],
}
}
@pytest.fixture
async def setup_comp(hass, config_count):
"""Set up group cover component."""
config, count = config_count
with assert_setup_component(count, DOMAIN):
await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
@pytest.mark.parametrize("config_count", [(CONFIG_ATTRIBUTES, 1)])
async def test_attributes(hass, setup_comp):
"""Test handling of state attributes."""
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_CLOSED
assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME
assert state.attributes[ATTR_ENTITY_ID] == [
DEMO_COVER,
DEMO_COVER_POS,
DEMO_COVER_TILT,
DEMO_TILT,
]
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert ATTR_CURRENT_POSITION not in state.attributes
assert ATTR_CURRENT_TILT_POSITION not in state.attributes
# Add Entity that supports open / close / stop
hass.states.async_set(DEMO_COVER, STATE_OPEN, {ATTR_SUPPORTED_FEATURES: 11})
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 11
assert ATTR_CURRENT_POSITION not in state.attributes
assert ATTR_CURRENT_TILT_POSITION not in state.attributes
# Add Entity that supports set_cover_position
hass.states.async_set(
DEMO_COVER_POS,
STATE_OPEN,
{ATTR_SUPPORTED_FEATURES: 4, ATTR_CURRENT_POSITION: 70},
)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 15
assert state.attributes[ATTR_CURRENT_POSITION] == 70
assert ATTR_CURRENT_TILT_POSITION not in state.attributes
# Add Entity that supports open tilt / close tilt / stop tilt
hass.states.async_set(DEMO_TILT, STATE_OPEN, {ATTR_SUPPORTED_FEATURES: 112})
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 127
assert state.attributes[ATTR_CURRENT_POSITION] == 70
assert ATTR_CURRENT_TILT_POSITION not in state.attributes
# Add Entity that supports set_tilt_position
hass.states.async_set(
DEMO_COVER_TILT,
STATE_OPEN,
{ATTR_SUPPORTED_FEATURES: 128, ATTR_CURRENT_TILT_POSITION: 60},
)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 255
assert state.attributes[ATTR_CURRENT_POSITION] == 70
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 60
# ### Test assumed state ###
# ##########################
# For covers
hass.states.async_set(
DEMO_COVER, STATE_OPEN, {ATTR_SUPPORTED_FEATURES: 4, ATTR_CURRENT_POSITION: 100}
)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_ASSUMED_STATE] is True
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 244
assert state.attributes[ATTR_CURRENT_POSITION] == 100
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 60
hass.states.async_remove(DEMO_COVER)
hass.states.async_remove(DEMO_COVER_POS)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 240
assert ATTR_CURRENT_POSITION not in state.attributes
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 60
# For tilts
hass.states.async_set(
DEMO_TILT,
STATE_OPEN,
{ATTR_SUPPORTED_FEATURES: 128, ATTR_CURRENT_TILT_POSITION: 100},
)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_ASSUMED_STATE] is True
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 128
assert ATTR_CURRENT_POSITION not in state.attributes
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 100
hass.states.async_remove(DEMO_COVER_TILT)
hass.states.async_set(DEMO_TILT, STATE_CLOSED)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_CLOSED
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert ATTR_CURRENT_POSITION not in state.attributes
assert ATTR_CURRENT_TILT_POSITION not in state.attributes
hass.states.async_set(DEMO_TILT, STATE_CLOSED, {ATTR_ASSUMED_STATE: True})
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.attributes[ATTR_ASSUMED_STATE] is True
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_open_covers(hass, setup_comp):
"""Test open cover function."""
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_POSITION] == 100
assert hass.states.get(DEMO_COVER).state == STATE_OPEN
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 100
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 100
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_close_covers(hass, setup_comp):
"""Test close cover function."""
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_CLOSED
assert state.attributes[ATTR_CURRENT_POSITION] == 0
assert hass.states.get(DEMO_COVER).state == STATE_CLOSED
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 0
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 0
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_toggle_covers(hass, setup_comp):
"""Test toggle cover function."""
# Start covers in open state
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
# Toggle will close covers
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_CLOSED
assert state.attributes[ATTR_CURRENT_POSITION] == 0
assert hass.states.get(DEMO_COVER).state == STATE_CLOSED
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 0
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 0
# Toggle again will open covers
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_POSITION] == 100
assert hass.states.get(DEMO_COVER).state == STATE_OPEN
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 100
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 100
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_stop_covers(hass, setup_comp):
"""Test stop cover function."""
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_POSITION] == 100
assert hass.states.get(DEMO_COVER).state == STATE_OPEN
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 20
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 80
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_set_cover_position(hass, setup_comp):
"""Test set cover position function."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: COVER_GROUP, ATTR_POSITION: 50},
blocking=True,
)
for _ in range(4):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_POSITION] == 50
assert hass.states.get(DEMO_COVER).state == STATE_CLOSED
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 50
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 50
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_open_tilts(hass, setup_comp):
"""Test open tilt function."""
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(5):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 100
assert (
hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 100
)
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_close_tilts(hass, setup_comp):
"""Test close tilt function."""
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(5):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 0
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_toggle_tilts(hass, setup_comp):
"""Test toggle tilt function."""
# Start tilted open
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 100
assert (
hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 100
)
# Toggle will tilt closed
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 0
# Toggle again will tilt open
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 100
assert (
hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 100
)
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_stop_tilts(hass, setup_comp):
"""Test stop tilts function."""
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_STOP_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 60
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 60
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_set_tilt_positions(hass, setup_comp):
"""Test set tilt position function."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: COVER_GROUP, ATTR_TILT_POSITION: 80},
blocking=True,
)
for _ in range(3):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 80
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 80
@pytest.mark.parametrize("config_count", [(CONFIG_POS, 2)])
async def test_is_opening_closing(hass, setup_comp):
"""Test is_opening property."""
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
assert hass.states.get(DEMO_COVER_POS).state == STATE_OPENING
assert hass.states.get(DEMO_COVER_TILT).state == STATE_OPENING
assert hass.states.get(COVER_GROUP).state == STATE_OPENING
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
assert hass.states.get(DEMO_COVER_POS).state == STATE_CLOSING
assert hass.states.get(DEMO_COVER_TILT).state == STATE_CLOSING
assert hass.states.get(COVER_GROUP).state == STATE_CLOSING
hass.states.async_set(DEMO_COVER_POS, STATE_OPENING, {ATTR_SUPPORTED_FEATURES: 11})
await hass.async_block_till_done()
assert hass.states.get(DEMO_COVER_POS).state == STATE_OPENING
assert hass.states.get(COVER_GROUP).state == STATE_OPENING
hass.states.async_set(DEMO_COVER_POS, STATE_CLOSING, {ATTR_SUPPORTED_FEATURES: 11})
await hass.async_block_till_done()
assert hass.states.get(DEMO_COVER_POS).state == STATE_CLOSING
assert hass.states.get(COVER_GROUP).state == STATE_CLOSING
|
flavour/Turkey | refs/heads/master | modules/s3/s3parser.py | 16 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
"""
This file parses messages using functions defined in in the template's
parser.py
@copyright: 2012-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3Parsing",)
import sys
from gluon import current
# =============================================================================
class S3Parsing(object):
"""
Core Message Parsing Framework
- reusable functions
"""
# -------------------------------------------------------------------------
@staticmethod
def parser(function_name, message_id, **kwargs):
"""
1st Stage Parser
- called by msg.parse()
Sets the appropriate Authorisation level and then calls the
parser function from the template
"""
reply = None
s3db = current.s3db
# Retrieve Message
table = s3db.msg_message
message = current.db(table.message_id == message_id).select(limitby=(0, 1)
).first()
from_address = message.from_address
if "<" in from_address:
from_address = from_address.split("<")[1].split(">")[0]
email = S3Parsing.is_session_alive(from_address)
if email:
current.auth.s3_impersonate(email)
else:
(email, password) = S3Parsing.parse_login(message)
if email and password:
current.auth.login_bare(email, password)
expiration = current.session.auth["expiration"]
table = s3db.msg_session
table.insert(email = email,
expiration_time = expiration,
from_address = from_address)
reply = "Login succesful"
# The message may have multiple purposes
#return reply
# Load the Parser template for this deployment
template = current.deployment_settings.get_msg_parser()
module_name = "applications.%s.modules.templates.%s.parser" \
% (current.request.application, template)
__import__(module_name)
mymodule = sys.modules[module_name]
S3Parser = mymodule.S3Parser()
# Pass the message to the parser
try:
fn = getattr(S3Parser, function_name)
except:
current.log.error("Parser not found: %s" % function_name)
return None
reply = fn(message, **kwargs) or reply
if not reply:
return None
# Send Reply
current.msg.send(from_address, reply)
# -------------------------------------------------------------------------
@staticmethod
def parse_login(message):
"""
Authenticate a login request
"""
if not message:
return None, None
words = message.body.split(" ")
login = False
email = None
password = None
if "LOGIN" in [word.upper() for word in words]:
login = True
if len(words) == 2 and login:
password = words[1]
elif len(words) == 3 and login:
email = words[1]
password = words[2]
if login:
if password and not email:
email = message.from_address
return email, password
else:
return None, None
# ---------------------------------------------------------------------
@staticmethod
def is_session_alive(from_address):
"""
Check whether there is an alive session from the same sender
"""
email = None
now = current.request.utcnow
stable = current.s3db.msg_session
query = (stable.is_expired == False) & \
(stable.from_address == from_address)
records = current.db(query).select(stable.id,
stable.created_datetime,
stable.expiration_time,
stable.email,
)
for record in records:
time = record.created_datetime
time = time - now
time = time.total_seconds()
if time < record.expiration_time:
email = record.email
break
else:
record.update_record(is_expired = True)
return email
# ---------------------------------------------------------------------
@staticmethod
def lookup_person(address):
"""
Lookup a Person from an Email Address
"""
s3db = current.s3db
if "<" in address:
address = address.split("<")[1].split(">")[0]
ptable = s3db.pr_person
ctable = s3db.pr_contact
query = (ctable.value == address) & \
(ctable.contact_method == "EMAIL") & \
(ctable.pe_id == ptable.pe_id) & \
(ptable.deleted == False) & \
(ctable.deleted == False)
possibles = current.db(query).select(ptable.id,
limitby=(0, 2))
if len(possibles) == 1:
return possibles.first().id
return None
# ---------------------------------------------------------------------
@staticmethod
def lookup_human_resource(address):
"""
Lookup a Human Resource from an Email Address
"""
db = current.db
s3db = current.s3db
if "<" in address:
address = address.split("<")[1].split(">")[0]
hrtable = s3db.hrm_human_resource
ptable = db.pr_person
ctable = s3db.pr_contact
query = (ctable.value == address) & \
(ctable.contact_method == "EMAIL") & \
(ctable.pe_id == ptable.pe_id) & \
(ptable.id == hrtable.person_id) & \
(ctable.deleted == False) & \
(ptable.deleted == False) & \
(hrtable.deleted == False)
possibles = db(query).select(hrtable.id,
limitby=(0, 2))
if len(possibles) == 1:
return possibles.first().id
return None
# END =========================================================================
|
Lorquas/subscription-manager | refs/heads/master | test/gui/test_facts_gui.py | 1 | from __future__ import print_function, division, absolute_import
from test.fixture import SubManFixture
from subscription_manager.injection import provide, IDENTITY
from test.stubs import StubUEP, StubFacts
from subscription_manager.gui import factsgui
from mock import NonCallableMock, patch
from nose.plugins.attrib import attr
@attr('gui')
class FactDialogTests(SubManFixture):
def setUp(self):
super(FactDialogTests, self).setUp()
expected_facts = {'fact1': 'one',
'fact2': 'two',
'system': 'Unknown',
'system.uuid': 'MOCKUUID'}
self.expected_facts = expected_facts
self.stub_facts = StubFacts(expected_facts)
def test_hides_environment_when_not_supported(self):
dialog = factsgui.SystemFactsDialog()
dialog.display_facts()
self.assertEqual(False, dialog.environment_title.get_property("visible"))
self.assertEqual(False, dialog.environment_label.get_property("visible"))
def test_shows_unknown_for_no_org(self):
dialog = factsgui.SystemFactsDialog()
dialog.display_facts()
#No owner id should show if we have no owner
self.assertEqual(False, dialog.owner_label.get_property("visible"))
self.assertEqual(False, dialog.owner_title.get_property("visible"))
@patch.object(StubUEP, 'getOwner')
def test_shows_org_id(self, mock_getOwner):
mock_getOwner.return_value = {'displayName': 'foo', 'key': 'bar'}
dialog = factsgui.SystemFactsDialog()
dialog.display_facts()
self.assertEqual(True, dialog.owner_label.get_property("visible"))
self.assertEqual(True, dialog.owner_title.get_property("visible"))
self.assertEqual('foo (bar)', dialog.owner_label.get_label())
@patch.object(StubUEP, 'supports_resource')
@patch.object(StubUEP, 'getConsumer')
def test_shows_environment_when_supported(self, mock_getConsumer, mock_supports_resource):
mock_supports_resource.return_value = True
mock_getConsumer.return_value = {'environment': {'name': 'foobar'}}
dialog = factsgui.SystemFactsDialog()
dialog.display_facts()
self.assertEqual(True, dialog.environment_title.get_property("visible"))
self.assertEqual(True, dialog.environment_label.get_property("visible"))
self.assertEqual("foobar", dialog.environment_label.get_text())
@patch.object(StubUEP, 'supports_resource')
@patch.object(StubUEP, 'getConsumer')
def test_shows_environment_when_empty(self, mock_getConsumer, mock_supports_resource):
mock_supports_resource.return_value = True
mock_getConsumer.return_value = {'environment': None}
dialog = factsgui.SystemFactsDialog()
dialog.display_facts()
self.assertEqual(True, dialog.environment_title.get_property("visible"))
self.assertEqual(True, dialog.environment_label.get_property("visible"))
self.assertEqual("None", dialog.environment_label.get_text())
def test_update_button_disabled(self):
# Need an unregistered consumer object:
id_mock = NonCallableMock()
id_mock.name = None
id_mock.uuid = None
def new_identity():
return id_mock
provide(IDENTITY, new_identity)
dialog = factsgui.SystemFactsDialog()
dialog.show()
enabled = dialog.update_button.get_property('sensitive')
self.assertFalse(enabled)
def test_update_button_enabled(self):
dialog = factsgui.SystemFactsDialog()
dialog.show()
enabled = dialog.update_button.get_property('sensitive')
self.assertTrue(enabled)
|
dudepare/django | refs/heads/master | django/test/runner.py | 89 | import collections
import ctypes
import itertools
import logging
import multiprocessing
import os
import pickle
import textwrap
import unittest
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS, connections
from django.test import SimpleTestCase, TestCase
from django.test.utils import setup_test_environment, teardown_test_environment
from django.utils.datastructures import OrderedSet
from django.utils.six import StringIO
try:
import tblib.pickling_support
except ImportError:
tblib = None
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger('django.db.backends')
self.logger.setLevel(logging.DEBUG)
super(DebugSQLTextTestResult, self).__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super(DebugSQLTextTestResult, self).startTest(test)
def stopTest(self, test):
super(DebugSQLTextTestResult, self).stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super(DebugSQLTextTestResult, self).addError(test, err)
self.debug_sql_stream.seek(0)
self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),)
def addFailure(self, test, err):
super(DebugSQLTextTestResult, self).addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % sql_debug)
class RemoteTestResult(object):
"""
Record information about which tests have succeeded and which have failed.
The sole purpose of this class is to record events in the child processes
so they can be replayed in the master process. As a consequence it doesn't
inherit unittest.TestResult and doesn't attempt to implement all its API.
The implementation matches the unpythonic coding style of unittest2.
"""
def __init__(self):
self.events = []
self.failfast = False
self.shouldStop = False
self.testsRun = 0
@property
def test_index(self):
return self.testsRun - 1
def check_pickleable(self, test, err):
# Ensure that sys.exc_info() tuples are picklable. This displays a
# clear multiprocessing.pool.RemoteTraceback generated in the child
# process instead of a multiprocessing.pool.MaybeEncodingError, making
# the root cause easier to figure out for users who aren't familiar
# with the multiprocessing module. Since we're in a forked process,
# our best chance to communicate with them is to print to stdout.
try:
pickle.dumps(err)
except Exception as exc:
original_exc_txt = repr(err[1])
original_exc_txt = textwrap.fill(original_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
pickle_exc_txt = repr(exc)
pickle_exc_txt = textwrap.fill(pickle_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
if tblib is None:
print("""
{} failed:
{}
Unfortunately, tracebacks cannot be pickled, making it impossible for the
parallel test runner to handle this exception cleanly.
In order to see the traceback, you should install tblib:
pip install tblib
""".format(test, original_exc_txt))
else:
print("""
{} failed:
{}
Unfortunately, the exception it raised cannot be pickled, making it impossible
for the parallel test runner to handle it cleanly.
Here's the error encountered while trying to pickle the exception:
{}
You should re-run this test without the --parallel option to reproduce the
failure and get a correct traceback.
""".format(test, original_exc_txt, pickle_exc_txt))
raise
def stop_if_failfast(self):
if self.failfast:
self.stop()
def stop(self):
self.shouldStop = True
def startTestRun(self):
self.events.append(('startTestRun',))
def stopTestRun(self):
self.events.append(('stopTestRun',))
def startTest(self, test):
self.testsRun += 1
self.events.append(('startTest', self.test_index))
def stopTest(self, test):
self.events.append(('stopTest', self.test_index))
def addError(self, test, err):
self.check_pickleable(test, err)
self.events.append(('addError', self.test_index, err))
self.stop_if_failfast()
def addFailure(self, test, err):
self.check_pickleable(test, err)
self.events.append(('addFailure', self.test_index, err))
self.stop_if_failfast()
def addSubTest(self, test, subtest, err):
raise NotImplementedError("subtests aren't supported at this time")
def addSuccess(self, test):
self.events.append(('addSuccess', self.test_index))
def addSkip(self, test, reason):
self.events.append(('addSkip', self.test_index, reason))
def addExpectedFailure(self, test, err):
# If tblib isn't installed, pickling the traceback will always fail.
# However we don't want tblib to be required for running the tests
# when they pass or fail as expected. Drop the traceback when an
# expected failure occurs.
if tblib is None:
err = err[0], err[1], None
self.check_pickleable(test, err)
self.events.append(('addExpectedFailure', self.test_index, err))
def addUnexpectedSuccess(self, test):
self.events.append(('addUnexpectedSuccess', self.test_index))
self.stop_if_failfast()
class RemoteTestRunner(object):
"""
Run tests and record everything but don't display anything.
The implementation matches the unpythonic coding style of unittest2.
"""
resultclass = RemoteTestResult
def __init__(self, failfast=False, resultclass=None):
self.failfast = failfast
if resultclass is not None:
self.resultclass = resultclass
def run(self, test):
result = self.resultclass()
unittest.registerResult(result)
result.failfast = self.failfast
test(result)
return result
def default_test_processes():
"""
Default number of test processes when using the --parallel option.
"""
# The current implementation of the parallel test runner requires
# multiprocessing to start subprocesses with fork().
# On Python 3.4+: if multiprocessing.get_start_method() != 'fork':
if not hasattr(os, 'fork'):
return 1
try:
return int(os.environ['DJANGO_TEST_PROCESSES'])
except KeyError:
return multiprocessing.cpu_count()
_worker_id = 0
def _init_worker(counter):
"""
Switch to databases dedicated to this worker.
This helper lives at module-level because of the multiprocessing module's
requirements.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
for alias in connections:
connection = connections[alias]
settings_dict = connection.creation.get_test_db_clone_settings(_worker_id)
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close()
def _run_subsuite(args):
"""
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult.
This helper lives at module-level and its arguments are wrapped in a tuple
because of the multiprocessing module's requirements.
"""
subsuite_index, subsuite, failfast = args
runner = RemoteTestRunner(failfast=failfast)
result = runner.run(subsuite)
return subsuite_index, result.events
class ParallelTestSuite(unittest.TestSuite):
"""
Run a series of tests in parallel in several processes.
While the unittest module's documentation implies that orchestrating the
execution of tests is the responsibility of the test runner, in practice,
it appears that TestRunner classes are more concerned with formatting and
displaying test results.
Since there are fewer use cases for customizing TestSuite than TestRunner,
implementing parallelization at the level of the TestSuite improves
interoperability with existing custom test runners. A single instance of a
test runner can still collect results from all tests without being aware
that they have been run in parallel.
"""
# In case someone wants to modify these in a subclass.
init_worker = _init_worker
run_subsuite = _run_subsuite
def __init__(self, suite, processes, failfast=False):
self.subsuites = partition_suite_by_case(suite)
self.processes = processes
self.failfast = failfast
super(ParallelTestSuite, self).__init__()
def run(self, result):
"""
Distribute test cases across workers.
Return an identifier of each test case with its result in order to use
imap_unordered to show results as soon as they're available.
To minimize pickling errors when getting results from workers:
- pass back numeric indexes in self.subsuites instead of tests
- make tracebacks pickleable with tblib, if available
Even with tblib, errors may still occur for dynamically created
exception classes such Model.DoesNotExist which cannot be unpickled.
"""
if tblib is not None:
tblib.pickling_support.install()
counter = multiprocessing.Value(ctypes.c_int, 0)
pool = multiprocessing.Pool(
processes=self.processes,
initializer=self.init_worker.__func__,
initargs=[counter])
args = [
(index, subsuite, self.failfast)
for index, subsuite in enumerate(self.subsuites)
]
test_results = pool.imap_unordered(self.run_subsuite.__func__, args)
while True:
if result.shouldStop:
pool.terminate()
break
try:
subsuite_index, events = test_results.next(timeout=0.1)
except multiprocessing.TimeoutError:
continue
except StopIteration:
pool.close()
break
tests = list(self.subsuites[subsuite_index])
for event in events:
event_name = event[0]
handler = getattr(result, event_name, None)
if handler is None:
continue
test = tests[event[1]]
args = event[2:]
handler(test, *args)
pool.join()
return result
class DiscoverRunner(object):
"""
A Django test runner that uses unittest2 test discovery.
"""
test_suite = unittest.TestSuite
parallel_test_suite = ParallelTestSuite
test_runner = unittest.TextTestRunner
test_loader = unittest.defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(self, pattern=None, top_level=None, verbosity=1,
interactive=True, failfast=False, keepdb=False,
reverse=False, debug_sql=False, parallel=0,
**kwargs):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_sql = debug_sql
self.parallel = parallel
@classmethod
def add_arguments(cls, parser):
parser.add_argument('-t', '--top-level-directory',
action='store', dest='top_level', default=None,
help='Top level of project for unittest discovery.')
parser.add_argument('-p', '--pattern', action='store', dest='pattern',
default="test*.py",
help='The test matching pattern. Defaults to test*.py.')
parser.add_argument('-k', '--keepdb', action='store_true', dest='keepdb',
default=False,
help='Preserves the test DB between runs.')
parser.add_argument('-r', '--reverse', action='store_true', dest='reverse',
default=False,
help='Reverses test cases order.')
parser.add_argument('-d', '--debug-sql', action='store_true', dest='debug_sql',
default=False,
help='Prints logged SQL queries on failure.')
parser.add_argument(
'--parallel', dest='parallel', nargs='?', default=1, type=int,
const=default_test_processes(),
help='Run tests in parallel processes.')
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
suite = self.test_suite()
test_labels = test_labels or ['.']
extra_tests = extra_tests or []
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
suite = reorder_suite(suite, self.reorder_by, self.reverse)
if self.parallel > 1:
parallel_suite = self.parallel_test_suite(suite, self.parallel, self.failfast)
# Since tests are distributed across processes on a per-TestCase
# basis, there's no need for more processes than TestCases.
parallel_units = len(parallel_suite.subsuites)
if self.parallel > parallel_units:
self.parallel = parallel_units
# If there's only one TestCase, parallelization isn't needed.
if self.parallel > 1:
suite = parallel_suite
return suite
def setup_databases(self, **kwargs):
return setup_databases(
self.verbosity, self.interactive, self.keepdb, self.debug_sql,
self.parallel, **kwargs
)
def get_resultclass(self):
return DebugSQLTextTestResult if self.debug_sql else None
def run_suite(self, suite, **kwargs):
resultclass = self.get_resultclass()
return self.test_runner(
verbosity=self.verbosity,
failfast=self.failfast,
resultclass=resultclass,
).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
for connection, old_name, destroy in old_config:
if destroy:
if self.parallel > 1:
for index in range(self.parallel):
connection.creation.destroy_test_db(
number=index + 1,
verbosity=self.verbosity,
keepdb=self.keepdb,
)
connection.creation.destroy_test_db(old_name, self.verbosity, self.keepdb)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
def is_discoverable(label):
"""
Check if a test label points to a python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all it's aliases
dependencies_map = {}
# sanity check - no DB can depend on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all it's dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def reorder_suite(suite, classes, reverse=False):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, tests within classes are sorted in opposite order,
but test classes are not reversed.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [OrderedSet() for i in range(class_count + 1)]
partition_suite_by_type(suite, classes, bins, reverse=reverse)
reordered_suite = suite_class()
for i in range(class_count + 1):
reordered_suite.addTests(bins[i])
return reordered_suite
def partition_suite_by_type(suite, classes, bins, reverse=False):
"""
Partitions a test suite by test type. Also prevents duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
if reverse:
suite = reversed(tuple(suite))
for test in suite:
if isinstance(test, suite_class):
partition_suite_by_type(test, classes, bins, reverse=reverse)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].add(test)
break
else:
bins[-1].add(test)
def partition_suite_by_case(suite):
"""
Partitions a test suite by test case, preserving the order of tests.
"""
groups = []
suite_class = type(suite)
for test_type, test_group in itertools.groupby(suite, type):
if issubclass(test_type, unittest.TestCase):
groups.append(suite_class(test_group))
else:
for item in test_group:
groups.extend(partition_suite_by_case(item))
return groups
def get_unique_databases():
"""
Figure out which databases actually need to be created.
Deduplicate entries in DATABASES that correspond the same database or are
configured as test mirrors.
Returns an ordered mapping of signatures to (name, list of aliases)
where all aliases share the same unerlying database.
"""
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict['TEST']
if test_settings['MIRROR']:
target = test_settings['MIRROR']
signature = connections[target].creation.test_db_signature()
else:
signature = connection.creation.test_db_signature()
if 'DEPENDENCIES' in test_settings:
dependencies[alias] = test_settings['DEPENDENCIES']
elif alias != DEFAULT_DB_ALIAS and signature != default_sig:
dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
signature, (connection.settings_dict['NAME'], set()))
item[1].add(alias)
test_databases = dependency_ordered(test_databases.items(), dependencies)
test_databases = collections.OrderedDict(test_databases)
return test_databases
def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, parallel=0, **kwargs):
"""
Creates the test databases.
"""
test_databases = get_unique_databases()
old_names = []
for signature, (db_name, aliases) in test_databases.items():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict.get("TEST", {}).get("SERIALIZE", True),
)
if parallel > 1:
for index in range(parallel):
connection.creation.clone_test_db(
number=index + 1,
verbosity=verbosity,
keepdb=keepdb,
)
# Configure all other connections as mirrors of the first one
else:
connections[alias].creation.set_as_test_mirror(
connections[first_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
|
Canpio/Paddle | refs/heads/develop | python/paddle/utils/make_model_diagram.py | 9 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate dot diagram file for the given paddle model config
# The generated file can be viewed using Graphviz (http://graphviz.org)
import sys
import traceback
from paddle.trainer.config_parser import parse_config
def make_layer_label(layer_config):
label = '%s type=%s' % (layer_config.name, layer_config.type)
if layer_config.reversed:
label += ' <=='
label2 = ''
if layer_config.active_type:
label2 += 'act=%s ' % layer_config.active_type
if layer_config.bias_parameter_name:
label2 += 'bias=%s ' % layer_config.bias_parameter_name
if label2:
label += '\l' + label2
return label
def make_diagram(config_file, dot_file, config_arg_str):
config = parse_config(config_file, config_arg_str)
make_diagram_from_proto(config.model_config, dot_file)
def make_diagram_from_proto(model_config, dot_file):
# print >> sys.stderr, config
name2id = {}
f = open(dot_file, 'w')
submodel_layers = set()
def make_link(link):
return 'l%s -> l%s;' % (name2id[link.layer_name],
name2id[link.link_name])
def make_mem(mem):
s = ''
if mem.boot_layer_name:
s += 'l%s -> l%s;\n' % (name2id[mem.boot_layer_name],
name2id[mem.layer_name])
s += 'l%s -> l%s [style=dashed];' % (name2id[mem.layer_name],
name2id[mem.link_name])
return s
print >> f, 'digraph graphname {'
print >> f, 'node [width=0.375,height=0.25];'
for i in xrange(len(model_config.layers)):
l = model_config.layers[i]
name2id[l.name] = i
i = 0
for sub_model in model_config.sub_models:
if sub_model.name == 'root':
continue
print >> f, 'subgraph cluster_%s {' % i
print >> f, 'style=dashed;'
label = '%s ' % sub_model.name
if sub_model.reversed:
label += '<=='
print >> f, 'label = "%s";' % label
i += 1
submodel_layers.add(sub_model.name)
for layer_name in sub_model.layer_names:
submodel_layers.add(layer_name)
lid = name2id[layer_name]
layer_config = model_config.layers[lid]
label = make_layer_label(layer_config)
print >> f, 'l%s [label="%s", shape=box];' % (lid, label)
print >> f, '}'
for i in xrange(len(model_config.layers)):
l = model_config.layers[i]
if l.name not in submodel_layers:
label = make_layer_label(l)
print >> f, 'l%s [label="%s", shape=box];' % (i, label)
for sub_model in model_config.sub_models:
if sub_model.name == 'root':
continue
for link in sub_model.in_links:
print >> f, make_link(link)
for link in sub_model.out_links:
print >> f, make_link(link)
for mem in sub_model.memories:
print >> f, make_mem(mem)
for i in xrange(len(model_config.layers)):
for l in model_config.layers[i].inputs:
print >> f, 'l%s -> l%s [label="%s"];' % (
name2id[l.input_layer_name], i, l.input_parameter_name)
print >> f, '}'
f.close()
def usage():
print >> sys.stderr, ("Usage: python show_model_diagram.py" +
" CONFIG_FILE DOT_FILE [config_str]")
exit(1)
if __name__ == '__main__':
if len(sys.argv) < 3 or len(sys.argv) > 4:
usage()
config_file = sys.argv[1]
dot_file = sys.argv[2]
config_arg_str = sys.argv[3] if len(sys.argv) == 4 else ''
try:
make_diagram(config_file, dot_file, config_arg_str)
except:
traceback.print_exc()
raise
|
chouseknecht/ansible | refs/heads/devel | lib/ansible/modules/files/template.py | 9 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: template
version_added: historical
options:
follow:
description:
- Determine whether symbolic links should be followed.
- When set to C(yes) symbolic links will be followed, if they exist.
- When set to C(no) symbolic links will not be followed.
- Previous to Ansible 2.4, this was hardcoded as C(yes).
type: bool
default: no
version_added: '2.4'
notes:
- You can use the M(copy) module with the C(content:) option if you prefer the template inline,
as part of the playbook.
- For Windows you can use M(win_template) which uses '\\r\\n' as C(newline_sequence) by default.
seealso:
- module: copy
- module: win_copy
- module: win_template
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- backup
- files
- template_common
- validate
'''
EXAMPLES = r'''
- name: Template a file to /etc/files.conf
template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: '0644'
- name: Template a file, using symbolic modes (equivalent to 0644)
template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: u=rw,g=r,o=r
- name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file
template:
src: named.conf_{{ ansible_os_family}}.j2
dest: /etc/named.conf
group: named
setype: named_conf_t
mode: 0640
- name: Create a DOS-style text file from a template
template:
src: config.ini.j2
dest: /share/windows/config.ini
newline_sequence: '\r\n'
- name: Copy a new sudoers file into place, after passing validation with visudo
template:
src: /mine/sudoers
dest: /etc/sudoers
validate: /usr/sbin/visudo -cf %s
- name: Update sshd configuration safely, avoid locking yourself out
template:
src: etc/ssh/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: '0600'
validate: /usr/sbin/sshd -t -f %s
backup: yes
'''
|
westinedu/wrgroups | refs/heads/master | django/core/management/commands/cleanup.py | 350 | import datetime
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = "Can be run as a cronjob or directly to clean out old data from the database (only expired sessions at the moment)."
def handle_noargs(self, **options):
from django.db import transaction
from django.contrib.sessions.models import Session
Session.objects.filter(expire_date__lt=datetime.datetime.now()).delete()
transaction.commit_unless_managed()
|
daniponi/django | refs/heads/master | django/contrib/gis/db/backends/mysql/operations.py | 12 | from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.db.backends.mysql.operations import DatabaseOperations
from django.utils.functional import cached_property
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
mysql = True
name = 'mysql'
Adapter = WKTAdapter
@cached_property
def select(self):
if self.connection.mysql_version < (5, 6, 0):
return 'AsText(%s)'
return 'ST_AsText(%s)'
@cached_property
def from_wkb(self):
if self.connection.mysql_version < (5, 6, 0):
return 'GeomFromWKB'
return 'ST_GeomFromWKB'
@cached_property
def from_text(self):
if self.connection.mysql_version < (5, 6, 0):
return 'GeomFromText'
return 'ST_GeomFromText'
gis_operators = {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # .. ..
'contained': SpatialOperator(func='MBRWithin'), # .. ..
'contains': SpatialOperator(func='MBRContains'),
'disjoint': SpatialOperator(func='MBRDisjoint'),
'equals': SpatialOperator(func='MBREqual'),
'exact': SpatialOperator(func='MBREqual'),
'intersects': SpatialOperator(func='MBRIntersects'),
'overlaps': SpatialOperator(func='MBROverlaps'),
'same_as': SpatialOperator(func='MBREqual'),
'touches': SpatialOperator(func='MBRTouches'),
'within': SpatialOperator(func='MBRWithin'),
}
@cached_property
def function_names(self):
return {
'Difference': 'ST_Difference',
'Distance': 'ST_Distance',
'Intersection': 'ST_Intersection',
'Length': 'GLength' if self.connection.mysql_version < (5, 6, 0) else 'ST_Length',
'SymDifference': 'ST_SymDifference',
'Union': 'ST_Union',
}
disallowed_aggregates = (
aggregates.Collect, aggregates.Extent, aggregates.Extent3D,
aggregates.MakeLine, aggregates.Union,
)
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG', 'BoundingCircle',
'ForceRHR', 'GeoHash', 'MemSize',
'Perimeter', 'PointOnSurface', 'Reverse', 'Scale', 'SnapToGrid',
'Transform', 'Translate',
}
if self.connection.mysql_version < (5, 6, 1):
unsupported.update({'Difference', 'Distance', 'Intersection', 'SymDifference', 'Union'})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, f, value, compiler):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'as_sql'):
placeholder, _ = compiler.compile(value)
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
|
miconof/CouchPotatoServer | refs/heads/master | couchpotato/core/notifications/webhook.py | 34 | import traceback
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'Webhook'
class Webhook(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
post_data = {
'message': toUnicode(message)
}
if getIdentifier(data):
post_data.update({
'imdb_id': getIdentifier(data)
})
headers = {
'Content-type': 'application/x-www-form-urlencoded'
}
try:
self.urlopen(self.conf('url'), headers = headers, data = post_data, show_error = False)
return True
except:
log.error('Webhook notification failed: %s', traceback.format_exc())
return False
config = [{
'name': 'webhook',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'webhook',
'label': 'Webhook',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'url',
'description': 'The URL to send notification data to when '
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
}
]
}
]
}]
|
ojengwa/odoo | refs/heads/8.0 | openerp/addons/base/tests/test_osv.py | 446 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import unittest
from openerp.osv.query import Query
class QueryTestCase(unittest.TestCase):
def test_basic_query(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_product", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product" LEFT JOIN "res_user" as "product_product__user_id" ON ("product_product"."user_id" = "product_product__user_id"."id"),"product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_query_chained_explicit_joins(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_template__categ_id", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # CHAINED outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" as "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id")""".strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_mixed_query_chained_explicit_implicit_joins(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_template__categ_id", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # CHAINED outer join
query.tables.append('"account.account"')
query.where_clause.append("product_category.expense_account_id = account_account.id") # additional implicit join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" as "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id"),"account.account" """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id AND product_category.expense_account_id = account_account.id""".strip())
def test_raise_missing_lhs(self):
query = Query()
query.tables.append('"product_product"')
self.assertRaises(AssertionError, query.add_join, ("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cblecker/test-infra | refs/heads/master | config/jobs/kubernetes/kops/build_jobs.py | 1 | # Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import zlib
import yaml
import boto3 # pylint: disable=import-error
import jinja2 # pylint: disable=import-error
periodic_template = """
- name: {{job_name}}
cron: '{{cron}}'
labels:
preset-service-account: "true"
preset-aws-ssh: "true"
preset-aws-credential: "true"
decorate: true
decoration_config:
timeout: {{job_timeout}}
extra_refs:
- org: kubernetes
repo: kops
base_ref: master
workdir: true
path_alias: k8s.io/kops
spec:
containers:
- command:
- runner.sh
args:
- bash
- -c
- |
make test-e2e-install
kubetest2 kops \\
-v 2 \\
--up --down \\
--cloud-provider=aws \\
--create-args="{{create_args}}" \\
{%- if kops_feature_flags %}
--env=KOPS_FEATURE_FLAGS={{kops_feature_flags}} \\
{%- endif %}
--kops-version-marker={{kops_deploy_url}} \\
{%- if publish_version_marker %}
--publish-version-marker={{publish_version_marker}} \\
{%- endif %}
--kubernetes-version={{k8s_deploy_url}} \\
{%- if terraform_version %}
--terraform-version={{terraform_version}} \\
{%- endif %}
{%- if validation_wait %}
--validation-wait={{validation_wait}} \\
{%- endif %}
--test=kops \\
-- \\
--ginkgo-args="--debug" \\
--test-args="-test.timeout={{test_timeout}} -num-nodes=0" \\
{%- if test_package_bucket %}
--test-package-bucket={{test_package_bucket}} \\
{%- endif %}
{%- if test_package_dir %}
--test-package-dir={{test_package_dir}} \\
{%- endif %}
--test-package-marker={{marker}} \\
--parallel={{test_parallelism}} \\
{%- if focus_regex %}
--focus-regex="{{focus_regex}}" \\
{%- endif %}
--skip-regex="{{skip_regex}}"
env:
- name: KUBE_SSH_KEY_PATH
value: /etc/aws-ssh/aws-ssh-private
- name: KUBE_SSH_USER
value: {{kops_ssh_user}}
image: gcr.io/k8s-testimages/kubekins-e2e:v20210512-b8d1b30-master
imagePullPolicy: Always
resources:
limits:
memory: 3Gi
requests:
cpu: "2"
memory: 3Gi
"""
presubmit_template = """
- name: {{job_name}}
branches:
- master
{%- if run_if_changed %}
run_if_changed: '{{run_if_changed}}'
{%- endif %}
always_run: {{always_run}}
skip_report: {{skip_report}}
labels:
{%- if cloud == "aws" %}
preset-service-account: "true"
preset-aws-ssh: "true"
preset-aws-credential: "true"
preset-bazel-scratch-dir: "true"
preset-bazel-remote-cache-enabled: "true"
preset-dind-enabled: "true"
{%- else %}
preset-k8s-ssh: "true"
{%- endif %}
decorate: true
decoration_config:
timeout: {{job_timeout}}
path_alias: k8s.io/kops
spec:
{%- if cloud == "gce" %}
serviceAccountName: k8s-kops-test
{%- endif %}
containers:
- image: gcr.io/k8s-testimages/kubekins-e2e:v20210512-b8d1b30-master
imagePullPolicy: Always
command:
- runner.sh
args:
- bash
- -c
- |
make test-e2e-install
kubetest2 kops \\
-v 2 \\
--up --build --down \\
--cloud-provider={{cloud}} \\
--create-args="{{create_args}}" \\
{%- if kops_feature_flags %}
--env=KOPS_FEATURE_FLAGS={{kops_feature_flags}} \\
{%- endif %}
--kubernetes-version={{k8s_deploy_url}} \\
--kops-binary-path=/home/prow/go/src/k8s.io/kops/bazel-bin/cmd/kops/linux-amd64/kops \\
{%- if terraform_version %}
--terraform-version={{terraform_version}} \\
{%- endif %}
--test=kops \\
-- \\
--ginkgo-args="--debug" \\
--test-args="-test.timeout={{test_timeout}} -num-nodes=0" \\
{%- if test_package_bucket %}
--test-package-bucket={{test_package_bucket}} \\
{%- endif %}
{%- if test_package_dir %}
--test-package-dir={{test_package_dir}} \\
{%- endif %}
--test-package-marker={{marker}} \\
--parallel={{test_parallelism}} \\
{%- if focus_regex %}
--focus-regex="{{focus_regex}}" \\
{%- endif %}
--skip-regex="{{skip_regex}}"
securityContext:
privileged: true
env:
- name: KUBE_SSH_KEY_PATH
value: {{kops_ssh_key_path}}
- name: KUBE_SSH_USER
value: {{kops_ssh_user}}
- name: GOPATH
value: /home/prow/go
resources:
requests:
cpu: "2"
memory: "6Gi"
"""
# We support rapid focus on a few tests of high concern
# This should be used for temporary tests we are evaluating,
# and ideally linked to a bug, and removed once the bug is fixed
run_hourly = [
]
run_daily = [
'kops-grid-scenario-service-account-iam',
'kops-grid-scenario-arm64',
'kops-grid-scenario-aws-cloud-controller-manager',
'kops-grid-scenario-serial-test-for-timeout',
'kops-grid-scenario-terraform',
]
# These are job tab names of unsupported grid combinations
skip_jobs = [
]
def simple_hash(s):
# & 0xffffffff avoids python2/python3 compatibility
return zlib.crc32(s.encode()) & 0xffffffff
def build_cron(key, runs_per_day):
runs_per_week = 0
minute = simple_hash("minutes:" + key) % 60
hour = simple_hash("hours:" + key) % 24
day_of_week = simple_hash("day_of_week:" + key) % 7
if runs_per_day > 0:
hour_denominator = 24 / runs_per_day
hour_offset = simple_hash("hours:" + key) % hour_denominator
return "%d %d-23/%d * * *" % (minute, hour_offset, hour_denominator), (runs_per_day * 7)
# run Ubuntu 20.04 (Focal) jobs more frequently
if "u2004" in key:
runs_per_week += 7
return "%d %d * * *" % (minute, hour), runs_per_week
# run hotlist jobs more frequently
if key in run_hourly:
runs_per_week += 24 * 7
return "%d * * * *" % (minute), runs_per_week
if key in run_daily:
runs_per_week += 7
return "%d %d * * *" % (minute, hour), runs_per_week
runs_per_week += 1
return "%d %d * * %d" % (minute, hour, day_of_week), runs_per_week
def replace_or_remove_line(s, pattern, new_str):
keep = []
for line in s.split('\n'):
if pattern in line:
if new_str:
line = line.replace(pattern, new_str)
keep.append(line)
else:
keep.append(line)
return '\n'.join(keep)
def should_skip_newer_k8s(k8s_version, kops_version):
if kops_version is None:
return False
if k8s_version is None:
return True
return float(k8s_version) > float(kops_version)
def k8s_version_info(k8s_version):
test_package_bucket = ''
test_package_dir = ''
if k8s_version == 'latest':
marker = 'latest.txt'
k8s_deploy_url = "https://storage.googleapis.com/kubernetes-release/release/latest.txt"
elif k8s_version == 'ci':
marker = 'latest.txt'
k8s_deploy_url = "https://storage.googleapis.com/kubernetes-release-dev/ci/latest.txt"
test_package_bucket = 'kubernetes-release-dev'
test_package_dir = 'ci'
elif k8s_version == 'stable':
marker = 'stable.txt'
k8s_deploy_url = "https://storage.googleapis.com/kubernetes-release/release/stable.txt"
elif k8s_version:
marker = f"stable-{k8s_version}.txt"
k8s_deploy_url = f"https://storage.googleapis.com/kubernetes-release/release/stable-{k8s_version}.txt" # pylint: disable=line-too-long
else:
raise Exception('missing required k8s_version')
return marker, k8s_deploy_url, test_package_bucket, test_package_dir
def create_args(kops_channel, networking, container_runtime, extra_flags, kops_image):
args = f"--channel={kops_channel} --networking=" + (networking or "kubenet")
if container_runtime:
args += f" --container-runtime={container_runtime}"
if kops_image:
image_overridden = False
if extra_flags:
for arg in extra_flags:
if "--image=" in arg:
image_overridden = True
args = args + " " + arg
if not image_overridden:
args = f"--image='{kops_image}' {args}"
return args.strip()
def latest_aws_image(owner, name):
client = boto3.client('ec2', region_name='us-east-1')
response = client.describe_images(
Owners=[owner],
Filters=[
{
'Name': 'name',
'Values': [
name,
],
},
],
)
images = {}
for image in response['Images']:
images[image['CreationDate']] = image['ImageLocation']
return images[sorted(images, reverse=True)[0]]
distro_images = {
'amzn2': latest_aws_image('137112412989', 'amzn2-ami-hvm-*-x86_64-gp2'),
'centos7': latest_aws_image('125523088429', 'CentOS 7.*x86_64'),
'centos8': latest_aws_image('125523088429', 'CentOS 8.*x86_64'),
'deb9': latest_aws_image('379101102735', 'debian-stretch-hvm-x86_64-gp2-*'),
'deb10': latest_aws_image('136693071363', 'debian-10-amd64-*'),
'flatcar': latest_aws_image('075585003325', 'Flatcar-stable-*-hvm'),
'rhel7': latest_aws_image('309956199498', 'RHEL-7.*_HVM_*-x86_64-0-Hourly2-GP2'),
'rhel8': latest_aws_image('309956199498', 'RHEL-8.*_HVM-*-x86_64-0-Hourly2-GP2'),
'u1804': latest_aws_image('099720109477', 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*'), # pylint: disable=line-too-long
'u2004': latest_aws_image('099720109477', 'ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*'), # pylint: disable=line-too-long
'u2004arm64': latest_aws_image('099720109477', 'ubuntu/images/hvm-ssd/ubuntu-focal-20.04-arm64-server-*'), # pylint: disable=line-too-long
'u2010': latest_aws_image('099720109477', 'ubuntu/images/hvm-ssd/ubuntu-groovy-20.10-amd64-server-*'), # pylint: disable=line-too-long
}
distros_ssh_user = {
'amzn2': 'ec2-user',
'centos7': 'centos',
'centos8': 'centos',
'deb9': 'admin',
'deb10': 'admin',
'flatcar': 'core',
'rhel7': 'ec2-user',
'rhel8': 'ec2-user',
'u1804': 'ubuntu',
'u2004': 'ubuntu',
'u2004arm64': 'ubuntu',
'u2010': 'ubuntu',
}
##############
# Build Test #
##############
# Returns a string representing the periodic prow job and the number of job invocations per week
def build_test(cloud='aws',
distro='u2004',
networking=None,
container_runtime='docker',
k8s_version='latest',
kops_channel='alpha',
kops_version=None,
publish_version_marker=None,
name_override=None,
feature_flags=(),
extra_flags=None,
extra_dashboards=None,
terraform_version=None,
test_parallelism=25,
test_timeout_minutes=60,
skip_override=None,
focus_regex=None,
runs_per_day=0):
# pylint: disable=too-many-statements,too-many-branches,too-many-arguments
if kops_version is None:
# TODO: Move to kops-ci/markers/master/ once validated
kops_deploy_url = "https://storage.googleapis.com/kops-ci/bin/latest-ci-updown-green.txt"
elif kops_version.startswith("https://"):
kops_deploy_url = kops_version
kops_version = None
else:
kops_deploy_url = f"https://storage.googleapis.com/kops-ci/markers/release-{kops_version}/latest-ci-updown-green.txt" # pylint: disable=line-too-long
# https://github.com/cilium/cilium/blob/71cfb265d53b63a2be3806fb3fd4425fa36262ff/Documentation/install/system_requirements.rst#centos-foot
if networking == "cilium" and distro not in ["u2004", "u2004arm64", "deb10", "rhel8"]:
return None
if should_skip_newer_k8s(k8s_version, kops_version):
return None
kops_image = distro_images[distro]
kops_ssh_user = distros_ssh_user[distro]
validation_wait = '20m' if distro == 'flatcar' else None
marker, k8s_deploy_url, test_package_bucket, test_package_dir = k8s_version_info(k8s_version)
args = create_args(kops_channel, networking, container_runtime, extra_flags, kops_image)
skip_regex = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|Services.*functioning.*NodePort|Services.*rejected.*endpoints|Services.*affinity' # pylint: disable=line-too-long
if networking == "cilium":
# https://github.com/cilium/cilium/issues/10002
skip_regex += r'|TCP.CLOSE_WAIT'
# https://github.com/cilium/cilium/issues/15361
skip_regex += r'|external.IP.is.not.assigned.to.a.node'
if skip_override is not None:
skip_regex = skip_override
# TODO(rifelpet): Remove once k8s tags has been created that include
# https://github.com/kubernetes/kubernetes/pull/101443
if cloud == 'aws' and k8s_version in ('latest', 'stable', '1.21', '1.22') and skip_regex:
skip_regex += r'|Invalid.AWS.KMS.key'
suffix = ""
if cloud and cloud != "aws":
suffix += "-" + cloud
if networking:
suffix += "-" + networking
if distro:
suffix += "-" + distro
if k8s_version:
suffix += "-k" + k8s_version.replace("1.", "")
if kops_version:
suffix += "-ko" + kops_version.replace("1.", "")
if container_runtime:
suffix += "-" + container_runtime
tab = name_override or (f"kops-grid{suffix}")
if tab in skip_jobs:
return None
job_name = f"e2e-{tab}"
cron, runs_per_week = build_cron(tab, runs_per_day)
tmpl = jinja2.Template(periodic_template)
job = tmpl.render(
job_name=job_name,
cron=cron,
kops_ssh_user=kops_ssh_user,
create_args=args,
k8s_deploy_url=k8s_deploy_url,
kops_deploy_url=kops_deploy_url,
test_parallelism=str(test_parallelism),
job_timeout=str(test_timeout_minutes + 30) + 'm',
test_timeout=str(test_timeout_minutes) + 'm',
marker=marker,
skip_regex=skip_regex,
kops_feature_flags=','.join(feature_flags),
terraform_version=terraform_version,
test_package_bucket=test_package_bucket,
test_package_dir=test_package_dir,
focus_regex=focus_regex,
publish_version_marker=publish_version_marker,
validation_wait=validation_wait,
)
spec = {
'cloud': cloud,
'networking': networking,
'distro': distro,
'k8s_version': k8s_version,
'kops_version': kops_version,
'container_runtime': container_runtime,
'kops_channel': kops_channel,
}
if feature_flags:
spec['feature_flags'] = ','.join(feature_flags)
if extra_flags:
spec['extra_flags'] = ' '.join(extra_flags)
jsonspec = json.dumps(spec, sort_keys=True)
dashboards = [
'sig-cluster-lifecycle-kops',
'google-aws',
'kops-kubetest2',
f"kops-distro-{distro}",
f"kops-k8s-{k8s_version or 'latest'}",
f"kops-{kops_version or 'latest'}",
]
if extra_dashboards:
dashboards.extend(extra_dashboards)
annotations = {
'testgrid-dashboards': ', '.join(sorted(dashboards)),
'testgrid-days-of-results': '90',
'testgrid-tab-name': tab,
}
for (k, v) in spec.items():
annotations[f"test.kops.k8s.io/{k}"] = v or ""
extra = yaml.dump({'annotations': annotations}, width=9999, default_flow_style=False)
output = f"\n# {jsonspec}\n{job.strip()}\n"
for line in extra.splitlines():
output += f" {line}\n"
return output, runs_per_week
# Returns a string representing a presubmit prow job YAML
def presubmit_test(cloud='aws',
distro='u2004',
networking=None,
container_runtime='docker',
k8s_version='latest',
kops_channel='alpha',
name=None,
tab_name=None,
feature_flags=(),
extra_flags=None,
extra_dashboards=None,
test_parallelism=25,
test_timeout_minutes=60,
skip_override=None,
focus_regex=None,
run_if_changed=None,
skip_report=False,
always_run=False):
# pylint: disable=too-many-statements,too-many-branches,too-many-arguments
if cloud == 'aws':
kops_image = distro_images[distro]
kops_ssh_user = distros_ssh_user[distro]
kops_ssh_key_path = '/etc/aws-ssh/aws-ssh-private'
# TODO(rifelpet): Remove once k8s tags has been created that include
# https://github.com/kubernetes/kubernetes/pull/101443
if k8s_version in ('ci', 'latest', 'stable', '1.21', '1.22'):
skip_override += r'|Invalid.AWS.KMS.key'
elif cloud == 'gce':
kops_image = None
kops_ssh_user = 'prow'
kops_ssh_key_path = '/etc/ssh-key-secret/ssh-private'
marker, k8s_deploy_url, test_package_bucket, test_package_dir = k8s_version_info(k8s_version)
args = create_args(kops_channel, networking, container_runtime, extra_flags, kops_image)
tmpl = jinja2.Template(presubmit_template)
job = tmpl.render(
job_name=name,
cloud=cloud,
kops_ssh_key_path=kops_ssh_key_path,
kops_ssh_user=kops_ssh_user,
create_args=args,
k8s_deploy_url=k8s_deploy_url,
test_parallelism=str(test_parallelism),
job_timeout=str(test_timeout_minutes + 30) + 'm',
test_timeout=str(test_timeout_minutes) + 'm',
marker=marker,
skip_regex=skip_override,
kops_feature_flags=','.join(feature_flags),
test_package_bucket=test_package_bucket,
test_package_dir=test_package_dir,
focus_regex=focus_regex,
run_if_changed=run_if_changed,
skip_report='true' if skip_report else 'false',
always_run='true' if always_run else 'false',
)
spec = {
'cloud': cloud,
'networking': networking,
'distro': distro,
'k8s_version': k8s_version,
'container_runtime': container_runtime,
'kops_channel': kops_channel,
}
if feature_flags:
spec['feature_flags'] = ','.join(feature_flags)
if extra_flags:
spec['extra_flags'] = ' '.join(extra_flags)
jsonspec = json.dumps(spec, sort_keys=True)
dashboards = [
'presubmits-kops',
'kops-presubmits',
'sig-cluster-lifecycle-kops',
'kops-kubetest2',
f"kops-distro-{distro}",
f"kops-k8s-{k8s_version or 'latest'}",
]
if extra_dashboards:
dashboards.extend(extra_dashboards)
annotations = {
'testgrid-dashboards': ', '.join(sorted(dashboards)),
'testgrid-days-of-results': '90',
'testgrid-tab-name': tab_name,
}
for (k, v) in spec.items():
annotations[f"test.kops.k8s.io/{k}"] = v or ""
extra = yaml.dump({'annotations': annotations}, width=9999, default_flow_style=False)
output = f"\n# {jsonspec}{job}\n"
for line in extra.splitlines():
output += f" {line}\n"
return output
####################
# Grid Definitions #
####################
networking_options = [
None,
'calico',
'cilium',
'flannel',
'kopeio',
]
distro_options = [
'amzn2',
'deb9',
'deb10',
'flatcar',
'rhel7',
'rhel8',
'u1804',
'u2004',
]
k8s_versions = [
#"latest", # disabled until we're ready to test 1.22
"1.19",
"1.20",
"1.21",
]
kops_versions = [
None, # maps to latest
"1.20",
"1.21",
]
container_runtimes = [
"docker",
"containerd",
]
############################
# kops-periodics-grid.yaml #
############################
def generate_grid():
results = []
# pylint: disable=too-many-nested-blocks
for container_runtime in container_runtimes:
for networking in networking_options:
for distro in distro_options:
for k8s_version in k8s_versions:
for kops_version in kops_versions:
results.append(
build_test(cloud="aws",
distro=distro,
extra_dashboards=['kops-grid'],
k8s_version=k8s_version,
kops_version=kops_version,
networking=networking,
container_runtime=container_runtime)
)
return filter(None, results)
#############################
# kops-periodics-misc2.yaml #
#############################
def generate_misc():
results = [
# A one-off scenario testing arm64
build_test(name_override="kops-grid-scenario-arm64",
cloud="aws",
distro="u2004arm64",
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large"],
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|Services.*functioning.*NodePort|Services.*rejected.*endpoints|Services.*affinity|Simple.pod.should.handle.in-cluster.config', # pylint: disable=line-too-long
extra_dashboards=['kops-misc']),
# A special test for JWKS
build_test(name_override="kops-grid-scenario-service-account-iam",
cloud="aws",
distro="u2004",
feature_flags=["UseServiceAccountIAM"],
extra_flags=['--api-loadbalancer-type=public'],
extra_dashboards=['kops-misc']),
# A special test for AWS Cloud-Controller-Manager
build_test(name_override="kops-grid-scenario-aws-cloud-controller-manager",
cloud="aws",
distro="u2004",
k8s_version="1.19",
feature_flags=["EnableExternalCloudController,SpecOverrideFlag"],
extra_flags=['--override=cluster.spec.cloudControllerManager.cloudProvider=aws',
'--override=cluster.spec.cloudConfig.awsEBSCSIDriver.enabled=true'],
extra_dashboards=['provider-aws-cloud-provider-aws', 'kops-misc']),
build_test(name_override="kops-grid-scenario-terraform",
container_runtime='containerd',
k8s_version="1.20",
terraform_version="0.14.6",
extra_dashboards=['kops-misc']),
build_test(name_override="kops-aws-misc-ha-euwest1",
k8s_version="stable",
networking="calico",
kops_channel="alpha",
runs_per_day=24,
extra_flags=["--master-count=3", "--zones=eu-west-1a,eu-west-1b,eu-west-1c"],
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-arm64-release",
k8s_version="latest",
container_runtime="containerd",
distro="u2004arm64",
networking="calico",
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large"],
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|Services.*functioning.*NodePort|Services.*rejected.*endpoints|Services.*affinity|Simple.pod.should.handle.in-cluster.config', # pylint: disable=line-too-long
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-arm64-ci",
k8s_version="ci",
container_runtime="containerd",
distro="u2004arm64",
networking="calico",
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large"],
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|Simple.pod.should.handle.in-cluster.config', # pylint: disable=line-too-long
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-arm64-conformance",
k8s_version="ci",
container_runtime="containerd",
distro="u2004arm64",
networking="calico",
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large"],
skip_override=r'\[Slow\]|\[Serial\]|\[Flaky\]',
focus_regex=r'\[Conformance\]|\[NodeConformance\]',
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-amd64-conformance",
k8s_version="ci",
container_runtime="containerd",
distro='u2004',
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--node-size=c5.large",
"--master-size=c5.large"],
skip_override=r'\[Slow\]|\[Serial\]|\[Flaky\]',
focus_regex=r'\[Conformance\]|\[NodeConformance\]',
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-updown",
k8s_version="stable",
container_runtime="containerd",
networking="calico",
distro='u2004',
kops_channel="alpha",
kops_version="https://storage.googleapis.com/kops-ci/bin/latest-ci.txt",
publish_version_marker="gs://kops-ci/bin/latest-ci-updown-green.txt",
runs_per_day=24,
extra_flags=["--node-size=c5.large",
"--master-size=c5.large"],
skip_override=r'',
focus_regex=r'\[k8s.io\]\sNetworking.*\[Conformance\]',
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-grid-scenario-cilium10-arm64",
cloud="aws",
networking="cilium",
distro="u2004arm64",
kops_channel="alpha",
runs_per_day=1,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large",
"--override=cluster.spec.networking.cilium.version=v1.10.0-rc2"],
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|Services.*functioning.*NodePort|Services.*rejected.*endpoints|Services.*affinity|TCP.CLOSE_WAIT|external.IP.is.not.assigned.to.a.node|Simple.pod.should.handle.in-cluster.config', # pylint: disable=line-too-long
extra_dashboards=['kops-misc']),
build_test(name_override="kops-grid-scenario-cilium10-amd64",
cloud="aws",
networking="cilium",
distro="u2004",
kops_channel="alpha",
runs_per_day=1,
extra_flags=["--zones=eu-central-1a",
"--override=cluster.spec.networking.cilium.version=v1.10.0-rc2"],
extra_dashboards=['kops-misc']),
]
return results
###############################
# kops-periodics-distros.yaml #
###############################
def generate_distros():
distros = ['debian9', 'debian10', 'ubuntu1804', 'ubuntu2004', 'centos7', 'centos8',
'amazonlinux2', 'rhel7', 'rhel8', 'flatcar']
results = []
for distro in distros:
distro_short = distro.replace('ubuntu', 'u').replace('debian', 'deb').replace('amazonlinux', 'amzn') # pylint: disable=line-too-long
results.append(
build_test(distro=distro_short,
networking='calico',
container_runtime='containerd',
k8s_version='stable',
kops_channel='alpha',
name_override=f"kops-aws-distro-image{distro}",
extra_dashboards=['kops-distros'],
runs_per_day=3,
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
)
)
# pprint.pprint(results)
return results
#######################################
# kops-periodics-network-plugins.yaml #
#######################################
def generate_network_plugins():
plugins = ['amazon-vpc', 'calico', 'canal', 'cilium', 'flannel', 'kopeio', 'kuberouter', 'weave'] # pylint: disable=line-too-long
results = []
skip_base = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler'# pylint: disable=line-too-long
for plugin in plugins:
networking_arg = plugin
skip_regex = skip_base
if plugin == 'amazon-vpc':
networking_arg = 'amazonvpc'
if plugin == 'cilium':
skip_regex += r'|should.set.TCP.CLOSE_WAIT'
else:
skip_regex += r'|Services.*functioning.*NodePort'
if plugin in ['calico', 'canal', 'weave', 'cilium']:
skip_regex += r'|Services.*rejected.*endpoints'
if plugin == 'kuberouter':
skip_regex += r'|load-balancer|hairpin|affinity\stimeout|service\.kubernetes\.io|CLOSE_WAIT' # pylint: disable=line-too-long
networking_arg = 'kube-router'
results.append(
build_test(
container_runtime='containerd',
k8s_version='stable',
kops_channel='alpha',
name_override=f"kops-aws-cni-{plugin}",
networking=networking_arg,
extra_flags=['--node-size=t3.large'],
extra_dashboards=['kops-network-plugins'],
runs_per_day=3,
skip_override=skip_regex
)
)
return results
################################
# kops-periodics-versions.yaml #
################################
def generate_versions():
skip_regex = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
results = [
build_test(
container_runtime='containerd',
k8s_version='ci',
kops_channel='alpha',
name_override='kops-aws-k8s-latest',
networking='calico',
extra_dashboards=['kops-versions'],
runs_per_day=24,
# This version marker is only used by the k/k presubmit job
publish_version_marker='gs://kops-ci/bin/latest-ci-green.txt',
skip_override=skip_regex
)
]
for version in ['1.21', '1.20', '1.19', '1.18', '1.17']:
distro = 'deb9' if version == '1.17' else 'u2004'
results.append(
build_test(
container_runtime='containerd',
distro=distro,
k8s_version=version,
kops_channel='alpha',
name_override=f"kops-aws-k8s-{version.replace('.', '-')}",
networking='calico',
extra_dashboards=['kops-versions'],
runs_per_day=8,
skip_override=skip_regex
)
)
return results
######################
# kops-pipeline.yaml #
######################
def generate_pipeline():
results = []
focus_regex = r'\[k8s.io\]\sNetworking.*\[Conformance\]'
for version in ['master', '1.21', '1.20', '1.19']:
branch = version if version == 'master' else f"release-{version}"
publish_version_marker = f"gs://kops-ci/markers/{branch}/latest-ci-updown-green.txt"
kops_version = f"https://storage.googleapis.com/k8s-staging-kops/kops/releases/markers/{branch}/latest-ci.txt" # pylint: disable=line-too-long
results.append(
build_test(
container_runtime='containerd',
k8s_version=version.replace('master', 'latest'),
kops_version=kops_version,
kops_channel='alpha',
name_override=f"kops-pipeline-updown-kops{version.replace('.', '')}",
networking='calico',
extra_dashboards=['kops-versions'],
runs_per_day=24,
skip_override=r'\[Slow\]|\[Serial\]',
focus_regex=focus_regex,
publish_version_marker=publish_version_marker,
)
)
return results
########################################
# kops-presubmits-network-plugins.yaml #
########################################
def generate_presubmits_network_plugins():
plugins = {
'amazonvpc': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.amazon-vpc-routed-eni\/|pkg\/model\/(firewall|components\/kubeproxy|iam\/iam_builder).go|nodeup\/pkg\/model\/(context|kubelet).go|upup\/pkg\/fi\/cloudup\/defaults.go)', # pylint: disable=line-too-long
'calico': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.projectcalico\.org\/|pkg\/model\/(firewall.go|pki.go|iam\/iam_builder.go)|nodeup\/pkg\/model\/networking\/calico.go)', # pylint: disable=line-too-long
'canal': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.projectcalico\.org\.canal\/|nodeup\/pkg\/model\/networking\/(flannel|canal).go)', # pylint: disable=line-too-long
'cilium': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.cilium\.io\/|pkg\/model\/(firewall|components\/cilium|iam\/iam_builder).go|nodeup\/pkg\/model\/(context|networking\/cilium).go|upup\/pkg\/fi\/cloudup\/template_functions.go)', # pylint: disable=line-too-long
'flannel': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.flannel\/|nodeup\/pkg\/model\/(sysctls|networking\/flannel).go|upup\/pkg\/fi\/cloudup\/template_functions.go)', # pylint: disable=line-too-long
'kuberouter': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.kuberouter\/|upup\/pkg\/fi\/cloudup\/template_functions.go)', # pylint: disable=line-too-long
'weave': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.weave\/|upup\/pkg\/fi\/cloudup\/template_functions.go)' # pylint: disable=line-too-long
}
results = []
skip_base = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
for plugin, run_if_changed in plugins.items():
networking_arg = plugin
skip_regex = skip_base
if plugin == 'cilium':
skip_regex += r'|should.set.TCP.CLOSE_WAIT'
else:
skip_regex += r'|Services.*functioning.*NodePort'
if plugin in ['calico', 'canal', 'weave', 'cilium']:
skip_regex += r'|Services.*rejected.*endpoints|external.IP.is.not.assigned.to.a.node|hostPort.but.different.hostIP|same.port.number.but.different.protocols' # pylint: disable=line-too-long
if plugin == 'kuberouter':
skip_regex += r'|load-balancer|hairpin|affinity\stimeout|service\.kubernetes\.io|CLOSE_WAIT' # pylint: disable=line-too-long
networking_arg = 'kube-router'
if plugin in ['canal', 'flannel']:
skip_regex += r'|up\sand\sdown|headless|service-proxy-name'
results.append(
presubmit_test(
container_runtime='containerd',
k8s_version='stable',
kops_channel='alpha',
name=f"pull-kops-e2e-cni-{plugin}",
tab_name=f"e2e-{plugin}",
networking=networking_arg,
extra_flags=['--node-size=t3.large'],
extra_dashboards=['kops-network-plugins'],
skip_override=skip_regex,
run_if_changed=run_if_changed,
skip_report=False,
always_run=False,
)
)
return results
############################
# kops-presubmits-e2e.yaml #
############################
def generate_presubmits_e2e():
skip_regex = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
return [
presubmit_test(
container_runtime='docker',
k8s_version='1.21',
kops_channel='alpha',
name='pull-kops-e2e-kubernetes-aws',
tab_name='e2e-docker',
always_run=True,
skip_override=skip_regex,
),
presubmit_test(
container_runtime='containerd',
k8s_version='1.21',
kops_channel='alpha',
name='pull-kops-e2e-k8s-containerd',
networking='calico',
tab_name='e2e-containerd',
always_run=True,
skip_override=skip_regex,
),
presubmit_test(
container_runtime='containerd',
k8s_version='1.21',
kops_channel='alpha',
name='pull-kops-e2e-k8s-containerd-ha',
networking='calico',
extra_flags=["--master-count=3", "--zones=eu-central-1a,eu-central-1b,eu-central-1c"],
tab_name='e2e-containerd-ha',
always_run=False,
skip_override=skip_regex+'|Multi-AZ',
),
presubmit_test(
distro="u2010",
networking='calico',
container_runtime='crio',
k8s_version='1.21',
kops_channel='alpha',
name='pull-kops-e2e-k8s-crio',
tab_name='e2e-crio',
always_run=False,
skip_override=skip_regex,
),
presubmit_test(
cloud='gce',
container_runtime='containerd',
k8s_version='1.21',
kops_channel='alpha',
name='pull-kops-e2e-k8s-gce',
networking='cilium',
tab_name='e2e-gce',
always_run=False,
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Firewall|Dashboard|RuntimeClass|RuntimeHandler|kube-dns|run.a.Pod.requesting.a.RuntimeClass|should.set.TCP.CLOSE_WAIT|Services.*rejected.*endpoints', # pylint: disable=line-too-long
feature_flags=['GoogleCloudBucketACL'],
),
]
########################
# YAML File Generation #
########################
periodics_files = {
'kops-periodics-distros.yaml': generate_distros,
'kops-periodics-grid.yaml': generate_grid,
'kops-periodics-misc2.yaml': generate_misc,
'kops-periodics-network-plugins.yaml': generate_network_plugins,
'kops-periodics-versions.yaml': generate_versions,
'kops-periodics-pipeline.yaml': generate_pipeline,
}
presubmits_files = {
'kops-presubmits-network-plugins.yaml': generate_presubmits_network_plugins,
'kops-presubmits-e2e.yaml': generate_presubmits_e2e,
}
def main():
for filename, generate_func in periodics_files.items():
print(f"Generating {filename}")
output = []
runs_per_week = 0
job_count = 0
for res in generate_func():
output.append(res[0])
runs_per_week += res[1]
job_count += 1
output.insert(0, "# Test jobs generated by build_jobs.py (do not manually edit)\n")
output.insert(1, f"# {job_count} jobs, total of {runs_per_week} runs per week\n")
output.insert(2, "periodics:\n")
with open(filename, 'w') as fd:
fd.write(''.join(output))
for filename, generate_func in presubmits_files.items():
print(f"Generating {filename}")
output = []
job_count = 0
for res in generate_func():
output.append(res)
job_count += 1
output.insert(0, "# Test jobs generated by build_jobs.py (do not manually edit)\n")
output.insert(1, f"# {job_count} jobs\n")
output.insert(2, "presubmits:\n")
output.insert(3, " kubernetes/kops:\n")
with open(filename, 'w') as fd:
fd.write(''.join(output))
if __name__ == "__main__":
main()
|
chrisndodge/edx-platform | refs/heads/master | cms/djangoapps/contentstore/views/session_kv_store.py | 214 | """
An :class:`~xblock.runtime.KeyValueStore` that stores data in the django session
"""
from __future__ import absolute_import
from xblock.runtime import KeyValueStore
def stringify(key):
return repr(tuple(key))
class SessionKeyValueStore(KeyValueStore):
def __init__(self, request):
self._session = request.session
def get(self, key):
return self._session[stringify(key)]
def set(self, key, value):
self._session[stringify(key)] = value
def delete(self, key):
del self._session[stringify(key)]
def has(self, key):
return stringify(key) in self._session
|
pprett/statsmodels | refs/heads/master | statsmodels/examples/ex_shrink_pickle.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 09 16:00:27 2012
Author: Josef Perktold
"""
import numpy as np
import statsmodels.api as sm
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x, prepend=True)
y = x.sum(1) + np.random.randn(nobs)
xf = 0.25 * np.ones((2,4))
model = sm.OLS(y, x)
#y_count = np.random.poisson(np.exp(x.sum(1)-x.mean()))
#model = sm.Poisson(y_count, x)#, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
results = model.fit()
#print results.predict(xf)
print results.model.predict(results.params, xf)
results.summary()
shrinkit = 1
if shrinkit:
results.remove_data()
import pickle
fname = 'try_shrink%d_ols.pickle' % shrinkit
fh = open(fname, 'w')
pickle.dump(results._results, fh) #pickling wrapper doesn't work
fh.close()
fh = open(fname, 'r')
results2 = pickle.load(fh)
fh.close()
print results2.predict(xf)
print results2.model.predict(results.params, xf)
y_count = np.random.poisson(np.exp(x.sum(1)-x.mean()))
model = sm.Poisson(y_count, x)#, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
results = model.fit(method='bfgs')
results.summary()
print results.model.predict(results.params, xf, exposure=1, offset=0)
if shrinkit:
results.remove_data()
else:
#work around pickling bug
results.mle_settings['callback'] = None
import pickle
fname = 'try_shrink%d_poisson.pickle' % shrinkit
fh = open(fname, 'w')
pickle.dump(results._results, fh) #pickling wrapper doesn't work
fh.close()
fh = open(fname, 'r')
results3 = pickle.load(fh)
fh.close()
print results3.predict(xf, exposure=1, offset=0)
print results3.model.predict(results.params, xf, exposure=1, offset=0)
def check_pickle(obj):
import StringIO
fh = StringIO.StringIO()
pickle.dump(obj, fh)
plen = fh.pos
fh.seek(0,0)
res = pickle.load(fh)
fh.close()
return res, plen
def test_remove_data_pickle(results, xf):
res, l = check_pickle(results)
#Note: 10000 is just a guess for the limit on the length of the pickle
np.testing.assert_(l < 10000, msg='pickle length not %d < %d' % (l, 10000))
pred1 = results.predict(xf, exposure=1, offset=0)
pred2 = res.predict(xf, exposure=1, offset=0)
np.testing.assert_equal(pred2, pred1)
test_remove_data_pickle(results._results, xf)
|
Wengex/PyDirectory | refs/heads/master | pydirectory/directory/objects/types.py | 1 | import importlib
class object(object):
_objtype = {}
def __init__(self,objects,data,objid=None,readonly=False):
if objid == None:
self._id = id(self)
else:
self._id = objid
self._is_readonly = readonly
self._objects = objects
self._exceptions = self._objects._exceptions
self._attributes = importlib.import_module("pydirectory.%(type)s.objects.attributes" % {'type':self._objects._engine._settings.type})
self._attrs = {}
self._initload = True
self._update(data)
self._initload = False
self._drops = []
def __setitem__(self,key,value):
if str == bytes:
if (type(value) == str) or (type(value) == unicode):
if value.strip() == '':
value = None
else:
if (type(value) == str) or (type(value) == bytes):
if value.strip() == '':
value = None
if not self._initload:
if value == None:
if self.attributes.get(key,False):
del self[key]
return
def lower_name(s,obj):
for item in dir(obj):
if item.lower() == s.lower():
return item
try:
attribute = getattr(self._attributes,str(lower_name(key,self._attributes)))
except AttributeError:
attribute = self._attributes.attribute
if self._initload:
self._attrs[key.lower()] = attribute(value,self._objects,objid=self._id,modify=False)
else:
if not attribute._is_readonly:
self._attrs[key.lower()] = attribute(value,self._objects,objid=self._id,modify=True)
else:
raise self._exceptions.AttributeisReadOnly
def __getitem__(self,key):
return self._attrs[key]
def __delitem__(self,key):
self._drops.append(key)
del self._attrs[key]
def __getattribute__(self,key):
if key.find('_') != 0:
if key in self._attrs:
return self[key]
try:
return super(object,self).__getattribute__(key)
except AttributeError:
return None
def __setattr__(self,key,value):
if key.find('_') == 0:
super(object,self).__setattr__(key,value)
else:
self[key] = value
def __delattr__(self,key):
if key.find('_') == 0:
super(object,self).__delattr__(key)
del self[key]
def __len__(self):
return len(self._attrs)
def __iter__(self):
return iter(self._attrs)
def __dir__(self):
return self._attrs.keys()
def _update(self,data):
for attr,value in data.items():
self[attr] = value
def __str__(self):
return str(self._attrs['dn'])
def __repr__(self):
return repr(self._attrs['dn'])
def items(self):
return self._attrs.items()
@property
def attributes(self):
return self._attrs
def delete(self):
if not self._is_readonly:
return self._delete()
else:
raise self._exceptions.IsReadOnly
def save(self):
if not self._is_readonly:
return self._save()
else:
raise self._exceptions.IsReadOnly
def reset(self):
self._delattr = []
self._addattr = []
self._reset()
|
hoehnp/navit_test | refs/heads/master | lib/python2.7/site-packages/py/_code/code.py | 4 | import py
import sys
from inspect import CO_VARARGS, CO_VARKEYWORDS
builtin_repr = repr
reprlib = py.builtin._tryimport('repr', 'reprlib')
if sys.version_info[0] >= 3:
from traceback import format_exception_only
else:
from py._code._py2traceback import format_exception_only
class Code(object):
""" wrapper around Python code objects """
def __init__(self, rawcode):
if not hasattr(rawcode, "co_filename"):
rawcode = py.code.getrawcode(rawcode)
try:
self.filename = rawcode.co_filename
self.firstlineno = rawcode.co_firstlineno - 1
self.name = rawcode.co_name
except AttributeError:
raise TypeError("not a code object: %r" %(rawcode,))
self.raw = rawcode
def __eq__(self, other):
return self.raw == other.raw
def __ne__(self, other):
return not self == other
@property
def path(self):
""" return a path object pointing to source code (note that it
might not point to an actually existing file). """
p = py.path.local(self.raw.co_filename)
# maybe don't try this checking
if not p.check():
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
p = self.raw.co_filename
return p
@property
def fullsource(self):
""" return a py.code.Source object for the full source file of the code
"""
from py._code import source
full, _ = source.findsource(self.raw)
return full
def source(self):
""" return a py.code.Source object for the code object's source only
"""
# return source only for that part of code
return py.code.Source(self.raw)
def getargs(self, var=False):
""" return a tuple with the argument names for the code object
if 'var' is set True also return the names of the variable and
keyword arguments when present
"""
# handfull shortcut for getting args
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame(object):
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
def __init__(self, frame):
self.lineno = frame.f_lineno - 1
self.f_globals = frame.f_globals
self.f_locals = frame.f_locals
self.raw = frame
self.code = py.code.Code(frame.f_code)
@property
def statement(self):
""" statement this frame is at """
if self.code.fullsource is None:
return py.code.Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
""" evaluate 'code' in the frame
'vars' are optional additional local variables
returns the result of the evaluation
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def exec_(self, code, **vars):
""" exec 'code' in the frame
'vars' are optiona; additional local variables
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
py.builtin.exec_(code, self.f_globals, f_locals )
def repr(self, object):
""" return a 'safe' (non-recursive, one-line) string repr for 'object'
"""
return py.io.saferepr(object)
def is_true(self, object):
return object
def getargs(self, var=False):
""" return a list of tuples (name, value) for all arguments
if 'var' is set True also include the variable and keyword
arguments when present
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry(object):
""" a single entry in a traceback """
_repr_style = None
exprinfo = None
def __init__(self, rawentry):
self._rawentry = rawentry
self.lineno = rawentry.tb_lineno - 1
def set_repr_style(self, mode):
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self):
return py.code.Frame(self._rawentry.tb_frame)
@property
def relline(self):
return self.lineno - self.frame.code.firstlineno
def __repr__(self):
return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
@property
def statement(self):
""" py.code.Source object for the current statement """
source = self.frame.code.fullsource
return source.getstatement(self.lineno)
@property
def path(self):
""" path to the source code """
return self.frame.code.path
def getlocals(self):
return self.frame.f_locals
locals = property(getlocals, None, None, "locals of underlaying frame")
def reinterpret(self):
"""Reinterpret the failing statement and returns a detailed information
about what operations are performed."""
if self.exprinfo is None:
source = str(self.statement).strip()
x = py.code._reinterpret(source, self.frame, should_fail=True)
if not isinstance(x, str):
raise TypeError("interpret returned non-string %r" % (x,))
self.exprinfo = x
return self.exprinfo
def getfirstlinesource(self):
# on Jython this firstlineno can be -1 apparently
return max(self.frame.code.firstlineno, 0)
def getsource(self, astcache=None):
""" return failing source code. """
# we use the passed in astcache to not reparse asttrees
# within exception info printing
from py._code.source import getstatementrange_ast
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(self.lineno, source,
astnode=astnode)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self):
""" return True if the current frame has a var __tracebackhide__
resolving to True
mostly for internal use
"""
try:
return self.frame.f_locals['__tracebackhide__']
except KeyError:
try:
return self.frame.f_globals['__tracebackhide__']
except KeyError:
return False
def __str__(self):
try:
fn = str(self.path)
except py.error.Error:
fn = '???'
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except:
line = "???"
return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
def name(self):
return self.frame.code.raw.co_name
name = property(name, None, None, "co_name of underlaying code")
class Traceback(list):
""" Traceback objects encapsulate and offer higher level
access to Traceback entries.
"""
Entry = TracebackEntry
def __init__(self, tb):
""" initialize from given python traceback object. """
if hasattr(tb, 'tb_next'):
def f(cur):
while cur is not None:
yield self.Entry(cur)
cur = cur.tb_next
list.__init__(self, f(tb))
else:
list.__init__(self, tb)
def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
""" return a Traceback instance wrapping part of this Traceback
by provding any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined
this allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback)
"""
for x in self:
code = x.frame.code
codepath = code.path
if ((path is None or codepath == path) and
(excludepath is None or not hasattr(codepath, 'relto') or
not codepath.relto(excludepath)) and
(lineno is None or x.lineno == lineno) and
(firstlineno is None or x.frame.code.firstlineno == firstlineno)):
return Traceback(x._rawentry)
return self
def __getitem__(self, key):
val = super(Traceback, self).__getitem__(key)
if isinstance(key, type(slice(0))):
val = self.__class__(val)
return val
def filter(self, fn=lambda x: not x.ishidden()):
""" return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackItem
instance, and should return True when the item should be added
to the Traceback, False when not
by default this removes all the TracebackItems which are hidden
(see ishidden() above)
"""
return Traceback(filter(fn, self))
def getcrashentry(self):
""" return last non-hidden traceback entry that lead
to the exception of a traceback.
"""
for i in range(-1, -len(self)-1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self):
""" return the index of the frame/TracebackItem where recursion
originates if appropriate, None if no recursion occurred
"""
cache = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
#XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
#print "checking for recursion at", key
l = cache.setdefault(key, [])
if l:
f = entry.frame
loc = f.f_locals
for otherloc in l:
if f.is_true(f.eval(co_equal,
__recursioncache_locals_1=loc,
__recursioncache_locals_2=otherloc)):
return i
l.append(entry.frame.f_locals)
return None
co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
'?', 'eval')
class ExceptionInfo(object):
""" wraps sys.exc_info() objects and offers
help for navigating the traceback.
"""
_striptext = ''
def __init__(self, tup=None, exprinfo=None):
if tup is None:
tup = sys.exc_info()
if exprinfo is None and isinstance(tup[1], AssertionError):
exprinfo = getattr(tup[1], 'msg', None)
if exprinfo is None:
exprinfo = str(tup[1])
if exprinfo and exprinfo.startswith('assert '):
self._striptext = 'AssertionError: '
self._excinfo = tup
#: the exception class
self.type = tup[0]
#: the exception instance
self.value = tup[1]
#: the exception raw traceback
self.tb = tup[2]
#: the exception type name
self.typename = self.type.__name__
#: the exception traceback (py.code.Traceback instance)
self.traceback = py.code.Traceback(self.tb)
def __repr__(self):
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
def exconly(self, tryshort=False):
""" return the exception as a string
when 'tryshort' resolves to True, and the exception is a
py.code._AssertionError, only the actual exception part of
the exception representation is returned (so 'AssertionError: ' is
removed from the beginning)
"""
lines = format_exception_only(self.type, self.value)
text = ''.join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext):]
return text
def errisinstance(self, exc):
""" return True if the exception is an instance of exc """
return isinstance(self.value, exc)
def _getreprcrash(self):
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno+1, exconly)
def getrepr(self, showlocals=False, style="long",
abspath=False, tbfilter=True, funcargs=False):
""" return str()able representation of this exception info.
showlocals: show locals per traceback entry
style: long|short|no|native traceback style
tbfilter: hide entries (where __tracebackhide__ is true)
in case of style==native, tbfilter and showlocals is ignored.
"""
if style == 'native':
return ReprExceptionInfo(ReprTracebackNative(
py.std.traceback.format_exception(
self.type,
self.value,
self.traceback[0]._rawentry,
)), self._getreprcrash())
fmt = FormattedExcinfo(showlocals=showlocals, style=style,
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
return fmt.repr_excinfo(self)
def __str__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return str(loc)
def __unicode__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return loc.__unicode__()
class FormattedExcinfo(object):
""" presenting information about failing Functions and Generators. """
# for traceback entries
flow_marker = ">"
fail_marker = "E"
def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
self.showlocals = showlocals
self.style = style
self.tbfilter = tbfilter
self.funcargs = funcargs
self.abspath = abspath
self.astcache = {}
def _getindent(self, source):
# figure out indent for given source
try:
s = str(source.getstatement(len(source)-1))
except KeyboardInterrupt:
raise
except:
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except:
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry):
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def _saferepr(self, obj):
return py.io.saferepr(obj)
def repr_args(self, entry):
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, self._saferepr(argvalue)))
return ReprFuncArgs(args)
def get_source(self, source, line_index=-1, excinfo=None, short=False):
""" return formatted and marked up source lines. """
lines = []
if source is None or line_index >= len(source.lines):
source = py.code.Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index+1:]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(self, excinfo, indent=4, markall=False):
lines = []
indent = " " * indent
# get the real exception information out
exlines = excinfo.exconly(tryshort=True).split('\n')
failindent = self.fail_marker + indent[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indent
return lines
def repr_locals(self, locals):
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == '__builtins__':
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
str_repr = self._saferepr(value)
#if len(str_repr) < 70 or not isinstance(value,
# (list, tuple, dict)):
lines.append("%-10s = %s" %(name, str_repr))
#else:
# self._line("%-10s =\\" % (name,))
# # XXX
# py.std.pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
def repr_traceback_entry(self, entry, excinfo=None):
source = self._getentrysource(entry)
if source is None:
source = py.code.Source("???")
line_index = 0
else:
# entry.getfirstlinesource() can be -1, should be 0 on jython
line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
lines = []
style = entry._repr_style
if style is None:
style = self.style
if style in ("short", "long"):
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" %(entry.name)
else:
message = excinfo and excinfo.typename or ""
path = self._makepath(entry.path)
filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
localsrepr = None
if not short:
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path):
if not self.abspath:
try:
np = py.path.local().bestrelpath(path)
except OSError:
return path
if len(np) < len(str(path)):
path = np
return path
def repr_traceback(self, excinfo):
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
recursionindex = None
if excinfo.errisinstance(RuntimeError):
if "maximum recursion depth exceeded" in str(excinfo.value):
recursionindex = traceback.recursionindex()
last = traceback[-1]
entries = []
extraline = None
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
if index == recursionindex:
extraline = "!!! Recursion detected (same locals & position)"
break
return ReprTraceback(entries, extraline, style=self.style)
def repr_excinfo(self, excinfo):
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
return ReprExceptionInfo(reprtraceback, reprcrash)
class TerminalRepr:
def __str__(self):
s = self.__unicode__()
if sys.version_info[0] < 3:
s = s.encode('utf-8')
return s
def __unicode__(self):
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = py.io.TextIO()
tw = py.io.TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self):
return "<%s instance at %0x>" %(self.__class__, id(self))
class ReprExceptionInfo(TerminalRepr):
def __init__(self, reprtraceback, reprcrash):
self.reprtraceback = reprtraceback
self.reprcrash = reprcrash
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, tw):
self.reprtraceback.toterminal(tw)
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
class ReprTraceback(TerminalRepr):
entrysep = "_ "
def __init__(self, reprentries, extraline, style):
self.reprentries = reprentries
self.extraline = extraline
self.style = style
def toterminal(self, tw):
# the entries might have different styles
last_style = None
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i+1]
if entry.style == "long" or \
entry.style == "short" and next_entry.style == "long":
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines):
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
class ReprEntryNative(TerminalRepr):
style = "native"
def __init__(self, tblines):
self.lines = tblines
def toterminal(self, tw):
tw.write("".join(self.lines))
class ReprEntry(TerminalRepr):
localssep = "_ "
def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
self.lines = lines
self.reprfuncargs = reprfuncargs
self.reprlocals = reprlocals
self.reprfileloc = filelocrepr
self.style = style
def toterminal(self, tw):
if self.style == "short":
self.reprfileloc.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
#tw.line("")
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
if self.reprlocals:
#tw.sep(self.localssep, "Locals")
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self):
return "%s\n%s\n%s" % ("\n".join(self.lines),
self.reprlocals,
self.reprfileloc)
class ReprFileLocation(TerminalRepr):
def __init__(self, path, lineno, message):
self.path = str(path)
self.lineno = lineno
self.message = message
def toterminal(self, tw):
# filename and lineno output for each entry,
# using an output format that most editors unterstand
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
class ReprLocals(TerminalRepr):
def __init__(self, lines):
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
class ReprFuncArgs(TerminalRepr):
def __init__(self, args):
self.args = args
def toterminal(self, tw):
if self.args:
linesofar = ""
for name, value in self.args:
ns = "%s = %s" %(name, value)
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
oldbuiltins = {}
def patch_builtins(assertion=True, compile=True):
""" put compile and AssertionError builtins to Python's builtins. """
if assertion:
from py._code import assertion
l = oldbuiltins.setdefault('AssertionError', [])
l.append(py.builtin.builtins.AssertionError)
py.builtin.builtins.AssertionError = assertion.AssertionError
if compile:
l = oldbuiltins.setdefault('compile', [])
l.append(py.builtin.builtins.compile)
py.builtin.builtins.compile = py.code.compile
def unpatch_builtins(assertion=True, compile=True):
""" remove compile and AssertionError builtins from Python builtins. """
if assertion:
py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
if compile:
py.builtin.builtins.compile = oldbuiltins['compile'].pop()
def getrawcode(obj, trycall=True):
""" return code object for given function. """
try:
return obj.__code__
except AttributeError:
obj = getattr(obj, 'im_func', obj)
obj = getattr(obj, 'func_code', obj)
obj = getattr(obj, 'f_code', obj)
obj = getattr(obj, '__code__', obj)
if trycall and not hasattr(obj, 'co_firstlineno'):
if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
x = getrawcode(obj.__call__, trycall=False)
if hasattr(x, 'co_firstlineno'):
return x
return obj
|
ky822/scikit-learn | refs/heads/master | examples/svm/plot_weighted_samples.py | 188 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
|
obi-two/Rebelion | refs/heads/master | data/scripts/templates/object/static/item/shared_item_shisha.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/item/shared_item_shisha.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
sparkslabs/kamaelia | refs/heads/master | Sketches/MPS/Examples/Checkers/Checkers.py | 9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
===================
Checkers board game
===================
A 3D version of the checkers (draughts) boardgame.
Only basic game rules are implemented (pieces can't be put on top of
another and only on black fields). The implementation of more advanced
game rules is left up to the reader :) .
"""
import Axon
import sys
from Kamaelia.UI.OpenGL.OpenGLDisplay import OpenGLDisplay
from Kamaelia.UI.OpenGL.OpenGLComponent import OpenGLComponent
from CheckersBoard import CheckersBoard
from CheckersPiece import CheckersPiece
from CheckersInteractor import CheckersInteractor
class Checkers(Axon.AdaptiveCommsComponent.AdaptiveCommsComponent):
Inboxes = {
"inbox": "not used",
"control": "receive shutdown messages",
}
Outboxes = {
"outbox": "not used",
}
def initialiseComponent(self):
# listen to shutdown events
ogl_display = OpenGLDisplay.getDisplayService()[0]
self.link( (ogl_display, "signal"), (self, "control") )
# create board
self.boardvis = CheckersBoard(position=(0,0,-15)).activate()
self.interactor_comms = {}
self.board = {}
for i in range(8):
self.board[i] = {}
for j in range(8):
self.board[i][j] = None
# create black pieces
self.blackPieces = []
self.blackInteractors = []
for i in range(8):
for j in range(3):
if (i+j) %2 == 0:
x = float(i)-3.5
y = float(j)-3.5
piece = CheckersPiece(position=(x, y, -15), colour=(0.6,0,0)).activate()
self.blackPieces.append(piece)
interactor = CheckersInteractor(target=piece, colour='B').activate()
self.blackInteractors.append(interactor)
intcomms = self.addOutbox("interactor_comms")
self.interactor_comms[id(interactor)] = intcomms
self.link( (self, intcomms), (interactor, "inbox"))
self.link( (interactor, "outbox"), (self, "inbox"))
self.board[i][j] = 'B'
# create white pieces
self.whitePieces = []
self.whiteInteractors = []
for i in range(8):
for j in range(5,8):
if (i+j) %2 == 0:
x = float(i)-3.5
y = float(j)-3.5
piece = CheckersPiece(position=(x, y, -15), colour=(0,0,0.6)).activate()
self.whitePieces.append(piece)
interactor = CheckersInteractor(target=piece, colour='B').activate()
self.whiteInteractors.append(interactor)
intcomms = self.addOutbox("interactor_comms")
self.interactor_comms[id(interactor)] = intcomms
self.link( (self, intcomms), (interactor, "inbox"))
self.link( (interactor, "outbox"), (self, "inbox"))
self.board[i][j] = 'W'
return 1
def mainBody(self):
while self.dataReady("inbox"):
msg = self.recv("inbox")
if msg.get("PLACEMENT", None):
objectid = msg.get("objectid")
fr = msg.get("from")
to = msg.get("to")
colour = msg.get("colour")
if (to[0]<0 or to[0]>7 or to[1]<0 or to[1]>7 or to[0] + to[1]) % 2 != 0 or self.board[to[0]][to[1]] is not None:
self.send("INVALID", self.interactor_comms[objectid])
else:
self.board[fr[0]][fr[1]] = None
self.board[to[0]][to[1]] = colour
self.send("ACK", self.interactor_comms[objectid])
while self.dataReady("control"):
cmsg = self.recv("control")
if isinstance(cmsg, Axon.Ipc.shutdownMicroprocess):
# dirty way to terminate program
sys.exit(0)
return 1
if __name__=='__main__':
# initialise display, change point of view
ogl_display = OpenGLDisplay(viewerposition=(0,-10,0), lookat=(0,0,-15), limit_fps=100)
ogl_display.activate()
OpenGLDisplay.setDisplayService(ogl_display)
Checkers().activate()
Axon.Scheduler.scheduler.run.runThreads()
# Licensed to the BBC under a Contributor Agreement: THF
|
andyzsf/edx | refs/heads/master | lms/djangoapps/django_comment_client/management/commands/create_roles_for_existing.py | 229 | """
This must be run only after seed_permissions_roles.py!
Creates default roles for all users currently in the database. Just runs through
Enrollments.
"""
from django.core.management.base import BaseCommand, CommandError
from student.models import CourseEnrollment
from django_comment_common.models import assign_default_role_on_enrollment
class Command(BaseCommand):
args = 'course_id'
help = 'Seed default permisssions and roles'
def handle(self, *args, **options):
if len(args) != 0:
raise CommandError("This Command takes no arguments")
print "Updated roles for ",
for i, enrollment in enumerate(CourseEnrollment.objects.filter(is_active=1), start=1):
assign_default_role_on_enrollment(None, enrollment)
if i % 1000 == 0:
print "{0}...".format(i),
print
|
jayceyxc/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/tests/utils.py | 114 | from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
# function that will pass a test.
def pass_test(*args): return
def no_backend(test_func, backend):
"Use this decorator to disable test on specified backend."
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] == backend:
return pass_test
else:
return test_func
# Decorators to disable entire test functions for specific
# spatial backends.
def no_oracle(func): return no_backend(func, 'oracle')
def no_postgis(func): return no_backend(func, 'postgis')
def no_mysql(func): return no_backend(func, 'mysql')
def no_spatialite(func): return no_backend(func, 'spatialite')
# Shortcut booleans to omit only portions of tests.
_default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1]
oracle = _default_db == 'oracle'
postgis = _default_db == 'postgis'
mysql = _default_db == 'mysql'
spatialite = _default_db == 'spatialite'
HAS_SPATIALREFSYS = True
if oracle and 'gis' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']:
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
else:
HAS_SPATIALREFSYS = False
SpatialRefSys = None
def has_spatial_db():
# All databases must have spatial backends to run GeoDjango tests.
spatial_dbs = [name for name, db_dict in settings.DATABASES.items()
if db_dict['ENGINE'].startswith('django.contrib.gis')]
return len(spatial_dbs) == len(settings.DATABASES)
HAS_SPATIAL_DB = has_spatial_db()
|
smalyshev/pywikibot-core | refs/heads/master | pywikibot/userinterfaces/__init__.py | 4 | # -*- coding: utf-8 -*-
"""User interfaces."""
#
# (C) Pywikibot team, 2007
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
|
CLVsol/clvsol_odoo_addons | refs/heads/12.0 | clv_summary/__init__.py | 13 | # -*- coding: utf-8 -*-
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import models
|
hortonworks/hortonworks-sandbox | refs/heads/master | desktop/core/ext-py/Twisted/twisted/test/stdio_test_loseconn.py | 2 | # -*- test-case-name: twisted.test.test_stdio.StandardInputOutputTestCase.test_loseConnection -*-
# Copyright (c) 2006-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Main program for the child process run by
L{twisted.test.test_stdio.StandardInputOutputTestCase.test_loseConnection} to
test that ITransport.loseConnection() works for process transports.
"""
import sys
from twisted.internet import stdio, protocol
from twisted.python import reflect
class LoseConnChild(protocol.Protocol):
def connectionMade(self):
self.transport.loseConnection()
def connectionLost(self, reason):
reactor.stop()
if __name__ == '__main__':
reflect.namedAny(sys.argv[1]).install()
from twisted.internet import reactor
stdio.StandardIO(LoseConnChild())
reactor.run()
|
chipx86/reviewboard | refs/heads/master | reviewboard/rb_platform.py | 9 | """Global configuration for deployment paths and settings.
These values may need to be modified for deployment on different operating
system platforms. Packagers should review this file and patch it appropriately
for their systems.
"""
from __future__ import unicode_literals
#: Location of the sitelist file. This file maintains a list of the installed
#: Review Board sites on this machine and is used when performing a site
#: upgrade to ensure all sites are upgraded together.
SITELIST_FILE_UNIX = "/etc/reviewboard/sites"
#: Default location of the cache directory. This path is used in
#: :command:`rb-site install` if using a file-based cache instead of a
#: memcached-based cache.
DEFAULT_FS_CACHE_PATH = "/tmp/reviewboard_cache"
#: Preferred location of the Review Board sites. If the :program:`rb-site`
#: tool is passed a site name instead of a full path, it will be prepended
#: with this path.
INSTALLED_SITE_PATH = "/var/www"
|
devs1991/test_edx_docmode | refs/heads/master | venv/lib/python2.7/site-packages/lepl/support/_test/timer.py | 2 |
# The contents of this file are subject to the Mozilla Public License
# (MPL) Version 1.1 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License
# at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and
# limitations under the License.
#
# The Original Code is LEPL (http://www.acooke.org/lepl)
# The Initial Developer of the Original Code is Andrew Cooke.
# Portions created by the Initial Developer are Copyright (C) 2009-2010
# Andrew Cooke (andrew@acooke.org). All Rights Reserved.
#
# Alternatively, the contents of this file may be used under the terms
# of the LGPL license (the GNU Lesser General Public License,
# http://www.gnu.org/licenses/lgpl.html), in which case the provisions
# of the LGPL License are applicable instead of those above.
#
# If you wish to allow use of your version of this file only under the
# terms of the LGPL License and not to allow others to use your version
# of this file under the MPL, indicate your decision by deleting the
# provisions above and replace them with the notice and other provisions
# required by the LGPL License. If you do not delete the provisions
# above, a recipient may use your version of this file under either the
# MPL or the LGPL License.
'''
Tests for the lepl.support.timer module.
'''
from unittest import TestCase
from lepl import *
from lepl.support.lib import StringIO
class TimerTest(TestCase):
def test_luca(self):
'''
See mailing list.
'''
integer = Token(Integer()) >> int
uletter = Token(Upper())
real = Token(Real()) >> float
data_line = Line(integer & uletter & real[6])
table = data_line[1:]
source = '''1 G 0.0 0.0 0.0 0.0 0.0 0.0
2 G 0.0 0.0 0.0 0.0 0.0 0.0
3 G 0.0 0.0 0.0 0.0 0.0 0.0
4 G 0.0 0.0 0.0 0.0 0.0 0.0
5 G 0.0 0.0 0.0 0.0 0.0 0.0
6 G 0.0 0.0 0.0 0.0 0.0 0.0
7 G 0.0 0.0 0.0 0.0 0.0 0.0
8 G 0.0 0.0 0.0 0.0 0.0 0.0
9 G 0.0 0.0 -9.856000E-05 -1.444699E-17 1.944000E-03 0.0
10 G 0.0 0.0 -9.856000E-05 -1.427843E-17 1.944000E-03 0.0
11 G 0.0 0.0 -1.085216E-02 -2.749537E-16 1.874400E-02 0.0
12 G 0.0 0.0 -1.085216E-02 -2.748317E-16 1.874400E-02 0.0
13 G 0.0 0.0 -3.600576E-02 -6.652665E-16 3.074400E-02 0.0
14 G 0.0 0.0 -3.600576E-02 -6.717988E-16 3.074400E-02 0.0
15 G 0.0 0.0 -7.075936E-02 -8.592844E-16 3.794400E-02 0.0
16 G 0.0 0.0 -7.075936E-02 -8.537008E-16 3.794400E-02 0.0
17 G 0.0 0.0 -1.103130E-01 -9.445027E-16 4.034400E-02 0.0
18 G 0.0 0.0 -1.103130E-01 -9.538811E-16 4.034400E-02 0.0
100 G 0.0 0.0 0.0 0.0 0.0 0.0
200 G 0.0 0.0 0.0 0.0 0.0 0.0
'''
out = StringIO()
print_timing(source,
{'Real()': table.clone().config.lines().matcher,
'Real() no memoize': table.clone().config.lines().no_memoize().matcher},
count_compile=1, out=out)
table = out.getvalue()
print(table)
assert 'Timing Results' in table, table
|
xuxiao19910803/edx-platform | refs/heads/master | lms/djangoapps/instructor/views/coupons.py | 103 | """
E-commerce Tab Instructor Dashboard Coupons Operations views
"""
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _
from util.json_request import JsonResponse
from shoppingcart.models import Coupon, CourseRegistrationCode
from opaque_keys.edx.locations import SlashSeparatedCourseKey
import datetime
import pytz
import logging
log = logging.getLogger(__name__)
@require_POST
@login_required
def remove_coupon(request, course_id): # pylint: disable=unused-argument
"""
remove the coupon against the coupon id
set the coupon is_active flag to false
"""
coupon_id = request.POST.get('id', None)
if not coupon_id:
return JsonResponse({
'message': _('coupon id is None')
}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(id=coupon_id)
except ObjectDoesNotExist:
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) DoesNotExist').format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
if not coupon.is_active:
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) is already inactive').format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
coupon.is_active = False
coupon.save()
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) updated successfully').format(coupon_id=coupon_id)
}) # status code 200: OK by default
@require_POST
@login_required
def add_coupon(request, course_id): # pylint: disable=unused-argument
"""
add coupon in the Coupons Table
"""
code = request.POST.get('code')
# check if the code is already in the Coupons Table and active
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
coupon = Coupon.objects.get(is_active=True, code=code, course_id=course_id)
except Coupon.DoesNotExist:
# check if the coupon code is in the CourseRegistrationCode Table
course_registration_code = CourseRegistrationCode.objects.filter(code=code)
if course_registration_code:
return JsonResponse(
{'message': _("The code ({code}) that you have tried to define is already in use as a registration code").format(code=code)},
status=400) # status code 400: Bad Request
description = request.POST.get('description')
course_id = request.POST.get('course_id')
try:
discount = int(request.POST.get('discount'))
except ValueError:
return JsonResponse({
'message': _("Please Enter the Integer Value for Coupon Discount")
}, status=400) # status code 400: Bad Request
if discount > 100 or discount < 0:
return JsonResponse({
'message': _("Please Enter the Coupon Discount Value Less than or Equal to 100")
}, status=400) # status code 400: Bad Request
expiration_date = None
if request.POST.get('expiration_date'):
expiration_date = request.POST.get('expiration_date')
try:
expiration_date = datetime.datetime.strptime(expiration_date, "%m/%d/%Y").replace(tzinfo=pytz.UTC) + datetime.timedelta(days=1)
except ValueError:
return JsonResponse({
'message': _("Please enter the date in this format i-e month/day/year")
}, status=400) # status code 400: Bad Request
coupon = Coupon(
code=code, description=description,
course_id=course_id,
percentage_discount=discount,
created_by_id=request.user.id,
expiration_date=expiration_date
)
coupon.save()
return JsonResponse(
{'message': _("coupon with the coupon code ({code}) added successfully").format(code=code)}
)
if coupon:
return JsonResponse(
{'message': _("coupon with the coupon code ({code}) already exists for this course").format(code=code)},
status=400) # status code 400: Bad Request
@require_POST
@login_required
def update_coupon(request, course_id): # pylint: disable=unused-argument
"""
update the coupon object in the database
"""
coupon_id = request.POST.get('coupon_id', None)
if not coupon_id:
return JsonResponse({'message': _("coupon id not found")}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(pk=coupon_id)
except ObjectDoesNotExist:
return JsonResponse(
{'message': _("coupon with the coupon id ({coupon_id}) DoesNotExist").format(coupon_id=coupon_id)},
status=400) # status code 400: Bad Request
description = request.POST.get('description')
coupon.description = description
coupon.save()
return JsonResponse(
{'message': _("coupon with the coupon id ({coupon_id}) updated Successfully").format(coupon_id=coupon_id)}
)
@require_POST
@login_required
def get_coupon_info(request, course_id): # pylint: disable=unused-argument
"""
get the coupon information to display in the pop up form
"""
coupon_id = request.POST.get('id', None)
if not coupon_id:
return JsonResponse({
'message': _("coupon id not found")
}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(id=coupon_id)
except ObjectDoesNotExist:
return JsonResponse({
'message': _("coupon with the coupon id ({coupon_id}) DoesNotExist").format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
if not coupon.is_active:
return JsonResponse({
'message': _("coupon with the coupon id ({coupon_id}) is already inactive").format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
expiry_date = coupon.display_expiry_date
return JsonResponse({
'coupon_code': coupon.code,
'coupon_description': coupon.description,
'coupon_course_id': coupon.course_id.to_deprecated_string(),
'coupon_discount': coupon.percentage_discount,
'expiry_date': expiry_date,
'message': _('coupon with the coupon id ({coupon_id}) updated successfully').format(coupon_id=coupon_id)
}) # status code 200: OK by default
|
neilhan/tensorflow | refs/heads/master | tensorflow/contrib/lookup/__init__.py | 16 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for lookup operations.
@@string_to_index
@@index_to_string
@@LookupInterface
@@InitializableLookupTableBase
@@HashTable
@@MutableHashTable
@@TableInitializerBase
@@KeyValueTensorInitializer
@@TextFileIndex
@@TextFileInitializer
@@TextFileIdTableInitializer
@@TextFileStringTableInitializer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.lookup.lookup_ops import *
|
alu0100207385/dsi_3Django | refs/heads/master | build/lib.linux-i686-2.7/django/db/backends/postgresql_psycopg2/version.py | 331 | """
Extracts the version of the PostgreSQL server.
"""
import re
# This reg-exp is intentionally fairly flexible here.
# Needs to be able to handle stuff like:
# PostgreSQL 8.3.6
# EnterpriseDB 8.3
# PostgreSQL 8.3 beta4
# PostgreSQL 8.4beta1
VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?')
def _parse_version(text):
"Internal parsing method. Factored out for testing purposes."
major, major2, minor = VERSION_RE.search(text).groups()
try:
return int(major) * 10000 + int(major2) * 100 + int(minor)
except (ValueError, TypeError):
return int(major) * 10000 + int(major2) * 100
def get_version(connection):
"""
Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)
For example, 80304 for 8.3.4. The last two digits will be 00 in the case of
releases (e.g., 80400 for 'PostgreSQL 8.4') or in the case of beta and
prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
PQServerVersion()/``server_version`` doesn't execute a query so try that
first, then fallback to a ``SELECT version()`` query.
"""
if hasattr(connection, 'server_version'):
return connection.server_version
else:
cursor = connection.cursor()
cursor.execute("SELECT version()")
return _parse_version(cursor.fetchone()[0])
|
hackers-terabit/portage | refs/heads/master | repoman/pym/repoman/qa_data.py | 1 | # -*- coding:utf-8 -*-
import logging
from _emerge.Package import Package
# import our initialized portage instance
from repoman._portage import portage
max_desc_len = 80
allowed_filename_chars = "a-zA-Z0-9._-+:"
qahelp = {
"CVS/Entries.IO_error": (
"Attempting to commit, and an IO error was encountered access the"
" Entries file"),
"ebuild.invalidname": (
"Ebuild files with a non-parseable or syntactically incorrect name"
" (or using 2.1 versioning extensions)"),
"ebuild.namenomatch": (
"Ebuild files that do not have the same name as their parent"
" directory"),
"changelog.ebuildadded": (
"An ebuild was added but the ChangeLog was not modified"),
"changelog.missing": (
"Missing ChangeLog files"),
"ebuild.notadded": (
"Ebuilds that exist but have not been added to cvs"),
"ebuild.patches": (
"PATCHES variable should be a bash array to ensure white space safety"),
"changelog.notadded": (
"ChangeLogs that exist but have not been added to cvs"),
"dependency.bad": (
"User-visible ebuilds with unsatisfied dependencies"
" (matched against *visible* ebuilds)"),
"dependency.badmasked": (
"Masked ebuilds with unsatisfied dependencies"
" (matched against *all* ebuilds)"),
"dependency.badindev": (
"User-visible ebuilds with unsatisfied dependencies"
" (matched against *visible* ebuilds) in developing arch"),
"dependency.badmaskedindev": (
"Masked ebuilds with unsatisfied dependencies"
" (matched against *all* ebuilds) in developing arch"),
"dependency.badtilde": (
"Uses the ~ dep operator with a non-zero revision part,"
" which is useless (the revision is ignored)"),
"dependency.missingslot": (
"RDEPEND matches more than one SLOT but does not specify a "
"slot and/or use the := or :* slot operator"),
"dependency.perlcore": (
"This ebuild directly depends on a package in perl-core;"
" it should use the corresponding virtual instead."),
"dependency.syntax": (
"Syntax error in dependency string"
" (usually an extra/missing space/parenthesis)"),
"dependency.unknown": (
"Ebuild has a dependency that refers to an unknown package"
" (which may be valid if it is a blocker for a renamed/removed package,"
" or is an alternative choice provided by an overlay)"),
"dependency.badslotop": (
"RDEPEND contains ':=' slot operator under '||' dependency."),
"file.executable": (
"Ebuilds, digests, metadata.xml, Manifest, and ChangeLog do not need"
" the executable bit"),
"file.size": (
"Files in the files directory must be under 20 KiB"),
"file.size.fatal": (
"Files in the files directory must be under 60 KiB"),
"file.name": (
"File/dir name must be composed"
" of only the following chars: %s " % allowed_filename_chars),
"file.UTF8": (
"File is not UTF8 compliant"),
"inherit.deprecated": (
"Ebuild inherits a deprecated eclass"),
"inherit.missing": (
"Ebuild uses functions from an eclass but does not inherit it"),
"inherit.unused": (
"Ebuild inherits an eclass but does not use it"),
"java.eclassesnotused": (
"With virtual/jdk in DEPEND you must inherit a java eclass"),
"wxwidgets.eclassnotused": (
"Ebuild DEPENDs on x11-libs/wxGTK without inheriting wxwidgets.eclass"),
"KEYWORDS.dropped": (
"Ebuilds that appear to have dropped KEYWORDS for some arch"),
"KEYWORDS.missing": (
"Ebuilds that have a missing or empty KEYWORDS variable"),
"KEYWORDS.stable": (
"Ebuilds that have been added directly with stable KEYWORDS"),
"KEYWORDS.stupid": (
"Ebuilds that use KEYWORDS=-* instead of package.mask"),
"LICENSE.missing": (
"Ebuilds that have a missing or empty LICENSE variable"),
"LICENSE.virtual": (
"Virtuals that have a non-empty LICENSE variable"),
"DESCRIPTION.missing": (
"Ebuilds that have a missing or empty DESCRIPTION variable"),
"DESCRIPTION.toolong": (
"DESCRIPTION is over %d characters" % max_desc_len),
"EAPI.definition": (
"EAPI definition does not conform to PMS section 7.3.1"
" (first non-comment, non-blank line)"),
"EAPI.deprecated": (
"Ebuilds that use features that are deprecated in the current EAPI"),
"EAPI.incompatible": (
"Ebuilds that use features that are only available with a different"
" EAPI"),
"EAPI.unsupported": (
"Ebuilds that have an unsupported EAPI version"
" (you must upgrade portage)"),
"SLOT.invalid": (
"Ebuilds that have a missing or invalid SLOT variable value"),
"HOMEPAGE.missing": (
"Ebuilds that have a missing or empty HOMEPAGE variable"),
"HOMEPAGE.virtual": (
"Virtuals that have a non-empty HOMEPAGE variable"),
"PDEPEND.suspect": (
"PDEPEND contains a package that usually only belongs in DEPEND."),
"LICENSE.syntax": (
"Syntax error in LICENSE"
" (usually an extra/missing space/parenthesis)"),
"PROVIDE.syntax": (
"Syntax error in PROVIDE"
" (usually an extra/missing space/parenthesis)"),
"PROPERTIES.syntax": (
"Syntax error in PROPERTIES"
" (usually an extra/missing space/parenthesis)"),
"RESTRICT.syntax": (
"Syntax error in RESTRICT"
" (usually an extra/missing space/parenthesis)"),
"REQUIRED_USE.syntax": (
"Syntax error in REQUIRED_USE"
" (usually an extra/missing space/parenthesis)"),
"SRC_URI.syntax": (
"Syntax error in SRC_URI"
" (usually an extra/missing space/parenthesis)"),
"SRC_URI.mirror": (
"A uri listed in profiles/thirdpartymirrors is found in SRC_URI"),
"ebuild.syntax": (
"Error generating cache entry for ebuild;"
" typically caused by ebuild syntax error"
" or digest verification failure"),
"ebuild.output": (
"A simple sourcing of the ebuild produces output;"
" this breaks ebuild policy."),
"ebuild.nesteddie": (
"Placing 'die' inside ( ) prints an error,"
" but doesn't stop the ebuild."),
"variable.invalidchar": (
"A variable contains an invalid character"
" that is not part of the ASCII character set"),
"variable.readonly": (
"Assigning a readonly variable"),
"variable.usedwithhelpers": (
"Ebuild uses D, ROOT, ED, EROOT or EPREFIX with helpers"),
"LIVEVCS.stable": (
"This ebuild is a live checkout from a VCS but has stable keywords."),
"LIVEVCS.unmasked": (
"This ebuild is a live checkout from a VCS but has keywords"
" and is not masked in the global package.mask."),
"IUSE.invalid": (
"This ebuild has a variable in IUSE"
" that is not in the use.desc or its metadata.xml file"),
"IUSE.missing": (
"This ebuild has a USE conditional"
" which references a flag that is not listed in IUSE"),
"IUSE.rubydeprecated": (
"The ebuild has set a ruby interpreter in USE_RUBY,"
" that is not available as a ruby target anymore"),
"LICENSE.invalid": (
"This ebuild is listing a license"
" that doesnt exist in portages license/ dir."),
"LICENSE.deprecated": (
"This ebuild is listing a deprecated license."),
"KEYWORDS.invalid": (
"This ebuild contains KEYWORDS"
" that are not listed in profiles/arch.list"
" or for which no valid profile was found"),
"RDEPEND.implicit": (
"RDEPEND is unset in the ebuild"
" which triggers implicit RDEPEND=$DEPEND assignment"
" (prior to EAPI 4)"),
"RDEPEND.suspect": (
"RDEPEND contains a package that usually only belongs in DEPEND."),
"RESTRICT.invalid": (
"This ebuild contains invalid RESTRICT values."),
"digest.assumed": (
"Existing digest must be assumed correct (Package level only)"),
"digest.missing": (
"Some files listed in SRC_URI aren't referenced in the Manifest"),
"digest.unused": (
"Some files listed in the Manifest aren't referenced in SRC_URI"),
"ebuild.majorsyn": (
"This ebuild has a major syntax error"
" that may cause the ebuild to fail partially or fully"),
"ebuild.minorsyn": (
"This ebuild has a minor syntax error"
" that contravenes gentoo coding style"),
"ebuild.badheader": (
"This ebuild has a malformed header"),
"manifest.bad": (
"Manifest has missing or incorrect digests"),
"metadata.missing": (
"Missing metadata.xml files"),
"metadata.bad": (
"Bad metadata.xml files"),
"metadata.warning": (
"Warnings in metadata.xml files"),
"portage.internal": (
"The ebuild uses an internal Portage function or variable"),
"repo.eapi.banned": (
"The ebuild uses an EAPI which is"
" banned by the repository's metadata/layout.conf settings"),
"repo.eapi.deprecated": (
"The ebuild uses an EAPI which is"
" deprecated by the repository's metadata/layout.conf settings"),
"virtual.oldstyle": (
"The ebuild PROVIDEs an old-style virtual (see GLEP 37)"),
"virtual.suspect": (
"Ebuild contains a package"
" that usually should be pulled via virtual/, not directly."),
"usage.obsolete": (
"The ebuild makes use of an obsolete construct"),
"upstream.workaround": (
"The ebuild works around an upstream bug,"
" an upstream bug should be filed and tracked in bugs.gentoo.org")
}
qacats = list(qahelp)
qacats.sort()
qawarnings = set((
"changelog.missing",
"changelog.notadded",
"dependency.unknown",
"digest.assumed",
"digest.unused",
"ebuild.notadded",
"ebuild.nesteddie",
"dependency.badmasked",
"dependency.badindev",
"dependency.badmaskedindev",
"dependency.badtilde",
"dependency.missingslot",
"dependency.perlcore",
"DESCRIPTION.toolong",
"EAPI.deprecated",
"HOMEPAGE.virtual",
"LICENSE.deprecated",
"LICENSE.virtual",
"KEYWORDS.dropped",
"KEYWORDS.stupid",
"KEYWORDS.missing",
"PDEPEND.suspect",
"RDEPEND.implicit",
"RDEPEND.suspect",
"virtual.suspect",
"RESTRICT.invalid",
"ebuild.minorsyn",
"ebuild.badheader",
"ebuild.patches",
"file.size",
"inherit.unused",
"inherit.deprecated",
"java.eclassesnotused",
"wxwidgets.eclassnotused",
"metadata.warning",
"portage.internal",
"repo.eapi.deprecated",
"usage.obsolete",
"upstream.workaround",
"IUSE.rubydeprecated",
))
missingvars = ["KEYWORDS", "LICENSE", "DESCRIPTION", "HOMEPAGE"]
allvars = set(x for x in portage.auxdbkeys if not x.startswith("UNUSED_"))
allvars.update(Package.metadata_keys)
allvars = sorted(allvars)
for x in missingvars:
x += ".missing"
if x not in qacats:
logging.warning('* missingvars values need to be added to qahelp ("%s")' % x)
qacats.append(x)
qawarnings.add(x)
valid_restrict = frozenset([
"binchecks", "bindist", "fetch", "installsources", "mirror",
"preserve-libs", "primaryuri", "splitdebug", "strip", "test", "userpriv"])
suspect_rdepend = frozenset([
"app-arch/cabextract",
"app-arch/rpm2targz",
"app-doc/doxygen",
"dev-lang/nasm",
"dev-lang/swig",
"dev-lang/yasm",
"dev-perl/extutils-pkgconfig",
"dev-util/byacc",
"dev-util/cmake",
"dev-util/ftjam",
"dev-util/gperf",
"dev-util/gtk-doc",
"dev-util/gtk-doc-am",
"dev-util/intltool",
"dev-util/jam",
"dev-util/pkg-config-lite",
"dev-util/pkgconf",
"dev-util/pkgconfig",
"dev-util/pkgconfig-openbsd",
"dev-util/scons",
"dev-util/unifdef",
"dev-util/yacc",
"media-gfx/ebdftopcf",
"sys-apps/help2man",
"sys-devel/autoconf",
"sys-devel/automake",
"sys-devel/bin86",
"sys-devel/bison",
"sys-devel/dev86",
"sys-devel/flex",
"sys-devel/m4",
"sys-devel/pmake",
"virtual/linux-sources",
"virtual/pkgconfig",
"x11-misc/bdftopcf",
"x11-misc/imake",
])
suspect_virtual = {
"dev-util/pkg-config-lite": "virtual/pkgconfig",
"dev-util/pkgconf": "virtual/pkgconfig",
"dev-util/pkgconfig": "virtual/pkgconfig",
"dev-util/pkgconfig-openbsd": "virtual/pkgconfig",
"dev-libs/libusb": "virtual/libusb",
"dev-libs/libusbx": "virtual/libusb",
"dev-libs/libusb-compat": "virtual/libusb",
}
ruby_deprecated = frozenset([
"ruby_targets_ree18",
"ruby_targets_ruby18",
"ruby_targets_ruby19",
])
# file.executable
no_exec = frozenset(["Manifest", "ChangeLog", "metadata.xml"])
def format_qa_output(
formatter, fails, dofull, dofail, options, qawarnings):
"""Helper function that formats output properly
@param formatter: an instance of Formatter
@type formatter: Formatter
@param fails: dict of qa status failures
@type fails: dict
@param dofull: Whether to print full results or a summary
@type dofull: boolean
@param dofail: Whether failure was hard or soft
@type dofail: boolean
@param options: The command-line options provided to repoman
@type options: Namespace
@param qawarnings: the set of warning types
@type qawarnings: set
@return: None (modifies formatter)
"""
full = options.mode == 'full'
# we only want key value pairs where value > 0
for category in sorted(fails):
number = len(fails[category])
formatter.add_literal_data(" " + category)
spacing_width = 30 - len(category)
if category in qawarnings:
formatter.push_style("WARN")
else:
formatter.push_style("BAD")
formatter.add_literal_data(" [fatal]")
spacing_width -= 8
formatter.add_literal_data(" " * spacing_width)
formatter.add_literal_data("%s" % number)
formatter.pop_style()
formatter.add_line_break()
if not dofull:
if not full and dofail and category in qawarnings:
# warnings are considered noise when there are failures
continue
fails_list = fails[category]
if not full and len(fails_list) > 12:
fails_list = fails_list[:12]
for failure in fails_list:
formatter.add_literal_data(" " + failure)
formatter.add_line_break()
def format_qa_output_column(
formatter, fails, dofull, dofail, options, qawarnings):
"""Helper function that formats output in a machine-parseable column format
@param formatter: an instance of Formatter
@type formatter: Formatter
@param fails: dict of qa status failures
@type fails: dict
@param dofull: Whether to print full results or a summary
@type dofull: boolean
@param dofail: Whether failure was hard or soft
@type dofail: boolean
@param options: The command-line options provided to repoman
@type options: Namespace
@param qawarnings: the set of warning types
@type qawarnings: set
@return: None (modifies formatter)
"""
full = options.mode == 'full'
for category in sorted(fails):
number = len(fails[category])
formatter.add_literal_data("NumberOf " + category + " ")
if category in qawarnings:
formatter.push_style("WARN")
else:
formatter.push_style("BAD")
formatter.add_literal_data("%s" % number)
formatter.pop_style()
formatter.add_line_break()
if not dofull:
if not full and dofail and category in qawarnings:
# warnings are considered noise when there are failures
continue
fails_list = fails[category]
if not full and len(fails_list) > 12:
fails_list = fails_list[:12]
for failure in fails_list:
formatter.add_literal_data(category + " " + failure)
formatter.add_line_break()
|
pkdevbox/trac | refs/heads/master | trac/versioncontrol/api.py | 1 | # -*- coding: utf-8 -*-
#
# Copyright (C)2005-2011 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
import os.path
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime
from trac.admin import AdminCommandError, IAdminCommandProvider, get_dir_list
from trac.config import ConfigSection, ListOption, Option
from trac.core import *
from trac.resource import IResourceManager, Resource, ResourceNotFound
from trac.util import as_bool
from trac.util.concurrency import threading
from trac.util.datefmt import utc
from trac.util.text import printout, to_unicode, exception_to_unicode
from trac.util.translation import _
from trac.web.api import IRequestFilter
def is_default(reponame):
"""Check whether `reponame` is the default repository."""
return not reponame or reponame in ('(default)', _('(default)'))
class InvalidRepository(TracError):
"""Exception raised when a repository is invalid."""
class IRepositoryConnector(Interface):
"""Provide support for a specific version control system."""
error = None # place holder for storing relevant error message
def get_supported_types():
"""Return the types of version control systems that are supported.
Yields `(repotype, priority)` pairs, where `repotype` is used to
match against the configured `[trac] repository_type` value in TracIni.
If multiple provider match a given type, the `priority` is used to
choose between them (highest number is highest priority).
If the `priority` returned is negative, this indicates that the
connector for the given `repotype` indeed exists but can't be
used for some reason. The `error` property can then be used to
store an error message or exception relevant to the problem detected.
"""
def get_repository(repos_type, repos_dir, params):
"""Return a Repository instance for the given repository type and dir.
"""
class IRepositoryProvider(Interface):
"""Provide known named instances of Repository."""
def get_repositories():
"""Generate repository information for known repositories.
Repository information is a key,value pair, where the value is
a dictionary which must contain at the very least either of
the following entries:
- `'dir'`: the repository directory which can be used by the
connector to create a `Repository` instance. This
defines a "real" repository.
- `'alias'`: the name of another repository. This defines an
alias to another (real) repository.
Optional entries:
- `'type'`: the type of the repository (if not given, the
default repository type will be used).
- `'description'`: a description of the repository (can
contain WikiFormatting).
- `'hidden'`: if set to `'true'`, the repository is hidden
from the repository index.
- `'url'`: the base URL for checking out the repository.
"""
class IRepositoryChangeListener(Interface):
"""Listen for changes in repositories."""
def changeset_added(repos, changeset):
"""Called after a changeset has been added to a repository."""
def changeset_modified(repos, changeset, old_changeset):
"""Called after a changeset has been modified in a repository.
The `old_changeset` argument contains the metadata of the changeset
prior to the modification. It is `None` if the old metadata cannot
be retrieved.
"""
class DbRepositoryProvider(Component):
"""Component providing repositories registered in the DB."""
implements(IRepositoryProvider, IAdminCommandProvider)
repository_attrs = ('alias', 'description', 'dir', 'hidden', 'name',
'sync_per_request', 'type', 'url')
# IRepositoryProvider methods
def get_repositories(self):
"""Retrieve repositories specified in the repository DB table."""
repos = {}
for id, name, value in self.env.db_query(
"SELECT id, name, value FROM repository WHERE name IN (%s)"
% ",".join("'%s'" % each for each in self.repository_attrs)):
if value is not None:
repos.setdefault(id, {})[name] = value
reponames = {}
for id, info in repos.iteritems():
if 'name' in info and ('dir' in info or 'alias' in info):
info['id'] = id
reponames[info['name']] = info
return reponames.iteritems()
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('repository add', '<repos> <dir> [type]',
"Add a source repository",
self._complete_add, self._do_add)
yield ('repository alias', '<name> <target>',
"Create an alias for a repository",
self._complete_alias, self._do_alias)
yield ('repository remove', '<repos>',
"Remove a source repository",
self._complete_repos, self._do_remove)
yield ('repository set', '<repos> <key> <value>',
"""Set an attribute of a repository
The following keys are supported: %s
""" % ', '.join(self.repository_attrs),
self._complete_set, self._do_set)
def get_reponames(self):
rm = RepositoryManager(self.env)
return [reponame or '(default)' for reponame
in rm.get_all_repositories()]
def _complete_add(self, args):
if len(args) == 2:
return get_dir_list(args[-1], True)
elif len(args) == 3:
return RepositoryManager(self.env).get_supported_types()
def _complete_alias(self, args):
if len(args) == 2:
return self.get_reponames()
def _complete_repos(self, args):
if len(args) == 1:
return self.get_reponames()
def _complete_set(self, args):
if len(args) == 1:
return self.get_reponames()
elif len(args) == 2:
return self.repository_attrs
def _do_add(self, reponame, dir, type_=None):
self.add_repository(reponame, os.path.abspath(dir), type_)
def _do_alias(self, reponame, target):
self.add_alias(reponame, target)
def _do_remove(self, reponame):
self.remove_repository(reponame)
def _do_set(self, reponame, key, value):
if key not in self.repository_attrs:
raise AdminCommandError(_('Invalid key "%(key)s"', key=key))
if key == 'dir':
value = os.path.abspath(value)
if key in ('hidden', 'sync_per_request'):
value = '1' if as_bool(value) else None
self.modify_repository(reponame, {key: value})
if not reponame:
reponame = '(default)'
if key == 'dir':
printout(_('You should now run "repository resync %(name)s".',
name=reponame))
elif key == 'type':
printout(_('You may have to run "repository resync %(name)s".',
name=reponame))
# Public interface
def add_repository(self, reponame, dir, type_=None):
"""Add a repository."""
if not os.path.isabs(dir):
raise TracError(_("The repository directory must be absolute"))
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
if type_ and type_ not in rm.get_supported_types():
raise TracError(_("The repository type '%(type)s' is not "
"supported", type=type_))
with self.env.db_transaction as db:
id = rm.get_repository_id(reponame)
db.executemany(
"INSERT INTO repository (id, name, value) VALUES (%s, %s, %s)",
[(id, 'dir', dir),
(id, 'type', type_ or '')])
rm.reload_repositories()
def add_alias(self, reponame, target):
"""Create an alias repository."""
if is_default(reponame):
reponame = ''
if is_default(target):
target = ''
rm = RepositoryManager(self.env)
repositories = rm.get_all_repositories()
if target not in repositories:
raise TracError(_("Repository \"%(repo)s\" doesn't exist",
repo=target or '(default)'))
if 'alias' in repositories[target]:
raise TracError(_('Cannot create an alias to the alias "%(repo)s"',
repo=target or '(default)'))
with self.env.db_transaction as db:
id = rm.get_repository_id(reponame)
db.executemany(
"INSERT INTO repository (id, name, value) VALUES (%s, %s, %s)",
[(id, 'dir', None),
(id, 'alias', target)])
rm.reload_repositories()
def remove_repository(self, reponame):
"""Remove a repository."""
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
repositories = rm.get_all_repositories()
if any(reponame == repos.get('alias')
for repos in repositories.itervalues()):
raise TracError(_('Cannot remove the repository "%(repos)s" used '
'in aliases', repos=reponame or '(default)'))
with self.env.db_transaction as db:
id = rm.get_repository_id(reponame)
db("DELETE FROM repository WHERE id=%s", (id,))
db("DELETE FROM revision WHERE repos=%s", (id,))
db("DELETE FROM node_change WHERE repos=%s", (id,))
rm.reload_repositories()
def modify_repository(self, reponame, changes):
"""Modify attributes of a repository."""
if is_default(reponame):
reponame = ''
new_reponame = changes.get('name', reponame)
if is_default(new_reponame):
new_reponame = ''
rm = RepositoryManager(self.env)
if reponame != new_reponame:
repositories = rm.get_all_repositories()
if any(reponame == repos.get('alias')
for repos in repositories.itervalues()):
raise TracError(_('Cannot rename the repository "%(repos)s" '
'used in aliases',
repos=reponame or '(default)'))
with self.env.db_transaction as db:
id = rm.get_repository_id(reponame)
if reponame != new_reponame:
if db("""SELECT id FROM repository WHERE name='name' AND
value=%s""", (new_reponame,)):
raise TracError(_('The repository "%(name)s" already '
'exists.',
name=new_reponame or '(default)'))
for (k, v) in changes.iteritems():
if k not in self.repository_attrs:
continue
if k in ('alias', 'name') and is_default(v):
v = ''
if k == 'dir' and not os.path.isabs(v):
raise TracError(_("The repository directory must be "
"absolute"))
db("UPDATE repository SET value=%s WHERE id=%s AND name=%s",
(v, id, k))
if not db(
"SELECT value FROM repository WHERE id=%s AND name=%s",
(id, k)):
db("""INSERT INTO repository (id, name, value)
VALUES (%s, %s, %s)
""", (id, k, v))
rm.reload_repositories()
class RepositoryManager(Component):
"""Version control system manager."""
implements(IRequestFilter, IResourceManager, IRepositoryProvider)
changeset_realm = 'changeset'
source_realm = 'source'
repository_realm = 'repository'
connectors = ExtensionPoint(IRepositoryConnector)
providers = ExtensionPoint(IRepositoryProvider)
change_listeners = ExtensionPoint(IRepositoryChangeListener)
repositories_section = ConfigSection('repositories',
"""One of the alternatives for registering new repositories is to
populate the `[repositories]` section of the `trac.ini`.
This is especially suited for setting up convenience aliases,
short-lived repositories, or during the initial phases of an
installation.
See [TracRepositoryAdmin#Intrac.ini TracRepositoryAdmin] for details
about the format adopted for this section and the rest of that page for
the other alternatives.
(''since 0.12'')""")
default_repository_type = Option('versioncontrol',
'default_repository_type', 'svn',
"""Default repository connector type.
This is used as the default repository type for repositories defined
in the [[TracIni#repositories-section repositories]] section or using
the "Repositories" admin panel. (''since 0.12'')
""")
def __init__(self):
self._cache = {}
self._lock = threading.Lock()
self._connectors = None
self._all_repositories = None
# IRequestFilter methods
def pre_process_request(self, req, handler):
from trac.web.chrome import Chrome, add_warning
if handler is not Chrome(self.env):
for repo_info in self.get_all_repositories().values():
if not as_bool(repo_info.get('sync_per_request')):
continue
start = time.time()
repo_name = repo_info['name'] or '(default)'
try:
repo = self.get_repository(repo_info['name'])
repo.sync()
except TracError as e:
add_warning(req,
_("Can't synchronize with repository \"%(name)s\" "
"(%(error)s). Look in the Trac log for more "
"information.", name=repo_name,
error=to_unicode(e)))
except Exception as e:
add_warning(req,
_("Failed to sync with repository \"%(name)s\": "
"%(error)s; repository information may be out of "
"date. Look in the Trac log for more information "
"including mitigation strategies.",
name=repo_name, error=to_unicode(e)))
self.log.error(
"Failed to sync with repository \"%s\"; You may be "
"able to reduce the impact of this issue by "
"configuring the sync_per_request option; see "
"http://trac.edgewall.org/wiki/TracRepositoryAdmin"
"#ExplicitSync for more detail: %s", repo_name,
exception_to_unicode(e, traceback=True))
self.log.info("Synchronized '%s' repository in %0.2f seconds",
repo_name, time.time() - start)
return handler
def post_process_request(self, req, template, data, content_type):
return (template, data, content_type)
# IResourceManager methods
def get_resource_realms(self):
yield self.changeset_realm
yield self.source_realm
yield self.repository_realm
def get_resource_description(self, resource, format=None, **kwargs):
if resource.realm == self.changeset_realm:
parent = resource.parent
reponame = parent and parent.id
id = resource.id
if reponame:
return _("Changeset %(rev)s in %(repo)s", rev=id, repo=reponame)
else:
return _("Changeset %(rev)s", rev=id)
elif resource.realm == self.source_realm:
parent = resource.parent
reponame = parent and parent.id
id = resource.id
version = ''
if format == 'summary':
repos = self.get_repository(reponame)
node = repos.get_node(resource.id, resource.version)
if node.isdir:
kind = _("directory")
elif node.isfile:
kind = _("file")
if resource.version:
version = _(" at version %(rev)s", rev=resource.version)
else:
kind = _("path")
if resource.version:
version = '@%s' % resource.version
in_repo = _(" in %(repo)s", repo=reponame) if reponame else ''
# TRANSLATOR: file /path/to/file.py at version 13 in reponame
return _('%(kind)s %(id)s%(at_version)s%(in_repo)s',
kind=kind, id=id, at_version=version, in_repo=in_repo)
elif resource.realm == self.repository_realm:
if not resource.id:
return _("Default repository")
return _("Repository %(repo)s", repo=resource.id)
def get_resource_url(self, resource, href, **kwargs):
if resource.realm == self.changeset_realm:
parent = resource.parent
return href.changeset(resource.id, parent and parent.id or None)
elif resource.realm == self.source_realm:
parent = resource.parent
return href.browser(parent and parent.id or None, resource.id,
rev=resource.version or None)
elif resource.realm == self.repository_realm:
return href.browser(resource.id or None)
def resource_exists(self, resource):
if resource.realm == self.repository_realm:
reponame = resource.id
else:
reponame = resource.parent.id
repos = self.env.get_repository(reponame)
if not repos:
return False
if resource.realm == self.changeset_realm:
try:
repos.get_changeset(resource.id)
return True
except NoSuchChangeset:
return False
elif resource.realm == self.source_realm:
try:
repos.get_node(resource.id, resource.version)
return True
except NoSuchNode:
return False
elif resource.realm == self.repository_realm:
return True
# IRepositoryProvider methods
def get_repositories(self):
"""Retrieve repositories specified in TracIni.
The `[repositories]` section can be used to specify a list
of repositories.
"""
repositories = self.repositories_section
reponames = {}
# first pass to gather the <name>.dir entries
for option in repositories:
if option.endswith('.dir') and repositories.get(option):
reponames[option[:-4]] = {'sync_per_request': False}
# second pass to gather aliases
for option in repositories:
alias = repositories.get(option)
if '.' not in option: # Support <alias> = <repo> syntax
option += '.alias'
if option.endswith('.alias') and alias in reponames:
reponames.setdefault(option[:-6], {})['alias'] = alias
# third pass to gather the <name>.<detail> entries
for option in repositories:
if '.' in option:
name, detail = option.rsplit('.', 1)
if name in reponames and detail != 'alias':
reponames[name][detail] = repositories.get(option)
for reponame, info in reponames.iteritems():
yield (reponame, info)
# Public API methods
def get_supported_types(self):
"""Return the list of supported repository types."""
types = set(type_ for connector in self.connectors
for (type_, prio) in connector.get_supported_types() or []
if prio >= 0)
return list(types)
def get_repositories_by_dir(self, directory):
"""Retrieve the repositories based on the given directory.
:param directory: the key for identifying the repositories.
:return: list of `Repository` instances.
"""
directory = os.path.join(os.path.normcase(directory), '')
repositories = []
for reponame, repoinfo in self.get_all_repositories().iteritems():
dir = repoinfo.get('dir')
if dir:
dir = os.path.join(os.path.normcase(dir), '')
if dir.startswith(directory):
repos = self.get_repository(reponame)
if repos:
repositories.append(repos)
return repositories
def get_repository_id(self, reponame):
"""Return a unique id for the given repository name.
This will create and save a new id if none is found.
Note: this should probably be renamed as we're dealing
exclusively with *db* repository ids here.
"""
with self.env.db_transaction as db:
for id, in db(
"SELECT id FROM repository WHERE name='name' AND value=%s",
(reponame,)):
return id
id = db("SELECT COALESCE(MAX(id), 0) FROM repository")[0][0] + 1
db("INSERT INTO repository (id, name, value) VALUES (%s, %s, %s)",
(id, 'name', reponame))
return id
def get_repository(self, reponame):
"""Retrieve the appropriate `Repository` for the given
repository name.
:param reponame: the key for specifying the repository.
If no name is given, take the default
repository.
:return: if no corresponding repository was defined,
simply return `None`.
:raises InvalidRepository: if the repository cannot be opened.
"""
reponame = reponame or ''
repoinfo = self.get_all_repositories().get(reponame, {})
if 'alias' in repoinfo:
reponame = repoinfo['alias']
repoinfo = self.get_all_repositories().get(reponame, {})
rdir = repoinfo.get('dir')
if not rdir:
return None
rtype = repoinfo.get('type') or self.default_repository_type
# get a Repository for the reponame (use a thread-level cache)
with self.env.db_transaction: # prevent possible deadlock, see #4465
with self._lock:
tid = threading._get_ident()
if tid in self._cache:
repositories = self._cache[tid]
else:
repositories = self._cache[tid] = {}
repos = repositories.get(reponame)
if not repos:
if not os.path.isabs(rdir):
rdir = os.path.join(self.env.path, rdir)
connector = self._get_connector(rtype)
repos = connector.get_repository(rtype, rdir,
repoinfo.copy())
repositories[reponame] = repos
return repos
def get_repository_by_path(self, path):
"""Retrieve a matching `Repository` for the given `path`.
:param path: the eventually scoped repository-scoped path
:return: a `(reponame, repos, path)` triple, where `path` is
the remaining part of `path` once the `reponame` has
been truncated, if needed.
"""
matches = []
path = path.strip('/') + '/' if path else '/'
for reponame in self.get_all_repositories().keys():
stripped_reponame = reponame.strip('/') + '/'
if path.startswith(stripped_reponame):
matches.append((len(stripped_reponame), reponame))
if matches:
matches.sort()
length, reponame = matches[-1]
path = path[length:]
else:
reponame = ''
return (reponame, self.get_repository(reponame),
path.rstrip('/') or '/')
def get_default_repository(self, context):
"""Recover the appropriate repository from the current context.
Lookup the closest source or changeset resource in the context
hierarchy and return the name of its associated repository.
"""
while context:
if context.resource.realm in (self.source_realm,
self.changeset_realm):
return context.resource.parent.id
context = context.parent
def get_all_repositories(self):
"""Return a dictionary of repository information, indexed by name."""
if not self._all_repositories:
all_repositories = {}
for provider in self.providers:
for reponame, info in provider.get_repositories() or []:
if reponame in all_repositories:
self.log.warn("Discarding duplicate repository '%s'",
reponame)
else:
info['name'] = reponame
if 'id' not in info:
info['id'] = self.get_repository_id(reponame)
all_repositories[reponame] = info
self._all_repositories = all_repositories
return self._all_repositories
def get_real_repositories(self):
"""Return a set of all real repositories (i.e. excluding aliases)."""
repositories = set()
for reponame in self.get_all_repositories():
try:
repos = self.get_repository(reponame)
if repos is not None:
repositories.add(repos)
except TracError:
pass # Skip invalid repositories
return repositories
def reload_repositories(self):
"""Reload the repositories from the providers."""
with self._lock:
# FIXME: trac-admin doesn't reload the environment
self._cache = {}
self._all_repositories = None
self.config.touch() # Force environment reload
def notify(self, event, reponame, revs):
"""Notify repositories and change listeners about repository events.
The supported events are the names of the methods defined in the
`IRepositoryChangeListener` interface.
"""
self.log.debug("Event %s on repository '%s' for changesets %r",
event, reponame or '(default)', revs)
# Notify a repository by name, and all repositories with the same
# base, or all repositories by base or by repository dir
repos = self.get_repository(reponame)
repositories = []
if repos:
base = repos.get_base()
else:
dir = os.path.abspath(reponame)
repositories = self.get_repositories_by_dir(dir)
if repositories:
base = None
else:
base = reponame
if base:
repositories = [r for r in self.get_real_repositories()
if r.get_base() == base]
if not repositories:
self.log.warn("Found no repositories matching '%s' base.",
base or reponame)
return
errors = []
for repos in sorted(repositories, key=lambda r: r.reponame):
reponame = repos.reponame or '(default)'
repos.sync()
for rev in revs:
args = []
if event == 'changeset_modified':
try:
old_changeset = repos.sync_changeset(rev)
except NoSuchChangeset as e:
errors.append(exception_to_unicode(e))
self.log.warn(
"No changeset '%s' found in repository '%s'. "
"Skipping subscribers for event %s",
rev, reponame, event)
continue
else:
args.append(old_changeset)
try:
changeset = repos.get_changeset(rev)
except NoSuchChangeset:
try:
repos.sync_changeset(rev)
changeset = repos.get_changeset(rev)
except NoSuchChangeset as e:
errors.append(exception_to_unicode(e))
self.log.warn(
"No changeset '%s' found in repository '%s'. "
"Skipping subscribers for event %s",
rev, reponame, event)
continue
self.log.debug("Event %s on repository '%s' for revision '%s'",
event, reponame, rev)
for listener in self.change_listeners:
getattr(listener, event)(repos, changeset, *args)
return errors
def shutdown(self, tid=None):
"""Free `Repository` instances bound to a given thread identifier"""
if tid:
assert tid == threading._get_ident()
with self._lock:
repositories = self._cache.pop(tid, {})
for reponame, repos in repositories.iteritems():
repos.close()
# private methods
def _get_connector(self, rtype):
"""Retrieve the appropriate connector for the given repository type.
Note that the self._lock must be held when calling this method.
"""
if self._connectors is None:
# build an environment-level cache for the preferred connectors
self._connectors = {}
for connector in self.connectors:
for type_, prio in connector.get_supported_types() or []:
keep = (connector, prio)
if type_ in self._connectors and \
prio <= self._connectors[type_][1]:
keep = None
if keep:
self._connectors[type_] = keep
if rtype in self._connectors:
connector, prio = self._connectors[rtype]
if prio >= 0: # no error condition
return connector
else:
raise TracError(
_('Unsupported version control system "%(name)s"'
': %(error)s', name=rtype,
error=to_unicode(connector.error)))
else:
raise TracError(
_('Unsupported version control system "%(name)s": '
'Can\'t find an appropriate component, maybe the '
'corresponding plugin was not enabled? ', name=rtype))
class NoSuchChangeset(ResourceNotFound):
def __init__(self, rev):
ResourceNotFound.__init__(self,
_('No changeset %(rev)s in the repository',
rev=rev),
_('No such changeset'))
class NoSuchNode(ResourceNotFound):
def __init__(self, path, rev, msg=None):
if msg is None:
msg = _("No node %(path)s at revision %(rev)s", path=path, rev=rev)
else:
msg = _("%(msg)s: No node %(path)s at revision %(rev)s",
msg=msg, path=path, rev=rev)
ResourceNotFound.__init__(self, msg, _('No such node'))
class Repository(object):
"""Base class for a repository provided by a version control system."""
__metaclass__ = ABCMeta
has_linear_changesets = False
scope = '/'
realm = RepositoryManager.repository_realm
@property
def resource(self):
return Resource(self.realm, self.reponame)
def __init__(self, name, params, log):
"""Initialize a repository.
:param name: a unique name identifying the repository, usually a
type-specific prefix followed by the path to the
repository.
:param params: a `dict` of parameters for the repository. Contains
the name of the repository under the key "name" and
the surrogate key that identifies the repository in
the database under the key "id".
:param log: a logger instance.
:raises InvalidRepository: if the repository cannot be opened.
"""
self.name = name
self.params = params
self.reponame = params['name']
self.id = params['id']
self.log = log
def __repr__(self):
return '<%s %r %r %r>' % (self.__class__.__name__,
self.id, self.name, self.scope)
@abstractmethod
def close(self):
"""Close the connection to the repository."""
pass
def get_base(self):
"""Return the name of the base repository for this repository.
This function returns the name of the base repository to which scoped
repositories belong. For non-scoped repositories, it returns the
repository name.
"""
return self.name
def clear(self, youngest_rev=None):
"""Clear any data that may have been cached in instance properties.
`youngest_rev` can be specified as a way to force the value
of the `youngest_rev` property (''will change in 0.12'').
"""
pass
def sync(self, rev_callback=None, clean=False):
"""Perform a sync of the repository cache, if relevant.
If given, `rev_callback` must be a callable taking a `rev` parameter.
The backend will call this function for each `rev` it decided to
synchronize, once the synchronization changes are committed to the
cache. When `clean` is `True`, the cache is cleaned first.
"""
pass
def sync_changeset(self, rev):
"""Resync the repository cache for the given `rev`, if relevant.
Returns a "metadata-only" changeset containing the metadata prior to
the resync, or `None` if the old values cannot be retrieved (typically
when the repository is not cached).
"""
return None
def get_quickjump_entries(self, rev):
"""Generate a list of interesting places in the repository.
`rev` might be used to restrict the list of available locations,
but in general it's best to produce all known locations.
The generated results must be of the form (category, name, path, rev).
"""
return []
def get_path_url(self, path, rev):
"""Return the repository URL for the given path and revision.
The returned URL can be `None`, meaning that no URL has been specified
for the repository, an absolute URL, or a scheme-relative URL starting
with `//`, in which case the scheme of the request should be prepended.
"""
return None
@abstractmethod
def get_changeset(self, rev):
"""Retrieve a Changeset corresponding to the given revision `rev`."""
pass
def get_changeset_uid(self, rev):
"""Return a globally unique identifier for the ''rev'' changeset.
Two changesets from different repositories can sometimes refer to
the ''very same'' changeset (e.g. the repositories are clones).
"""
def get_changesets(self, start, stop):
"""Generate Changeset belonging to the given time period (start, stop).
"""
rev = self.youngest_rev
while rev:
chgset = self.get_changeset(rev)
if chgset.date < start:
return
if chgset.date < stop:
yield chgset
rev = self.previous_rev(rev)
def has_node(self, path, rev=None):
"""Tell if there's a node at the specified (path,rev) combination.
When `rev` is `None`, the latest revision is implied.
"""
try:
self.get_node(path, rev)
return True
except TracError:
return False
@abstractmethod
def get_node(self, path, rev=None):
"""Retrieve a Node from the repository at the given path.
A Node represents a directory or a file at a given revision in the
repository.
If the `rev` parameter is specified, the Node corresponding to that
revision is returned, otherwise the Node corresponding to the youngest
revision is returned.
"""
pass
@abstractmethod
def get_oldest_rev(self):
"""Return the oldest revision stored in the repository."""
pass
oldest_rev = property(lambda self: self.get_oldest_rev())
@abstractmethod
def get_youngest_rev(self):
"""Return the youngest revision in the repository."""
pass
youngest_rev = property(lambda self: self.get_youngest_rev())
@abstractmethod
def previous_rev(self, rev, path=''):
"""Return the revision immediately preceding the specified revision.
If `path` is given, filter out ancestor revisions having no changes
below `path`.
In presence of multiple parents, this follows the first parent.
"""
pass
@abstractmethod
def next_rev(self, rev, path=''):
"""Return the revision immediately following the specified revision.
If `path` is given, filter out descendant revisions having no changes
below `path`.
In presence of multiple children, this follows the first child.
"""
pass
def parent_revs(self, rev):
"""Return a list of parents of the specified revision."""
parent = self.previous_rev(rev)
return [parent] if parent is not None else []
@abstractmethod
def rev_older_than(self, rev1, rev2):
"""Provides a total order over revisions.
Return `True` if `rev1` is an ancestor of `rev2`.
"""
pass
# @abstractmethod
def get_path_history(self, path, rev=None, limit=None):
"""Retrieve all the revisions containing this path.
If given, `rev` is used as a starting point (i.e. no revision
''newer'' than `rev` should be returned).
The result format should be the same as the one of Node.get_history()
:since 1.1.2: The method should be implemented in subclasses since
it will be made abstract in Trac 1.3.1. A `TypeError`
will result when instantiating classes that don't
implement the method.
"""
raise NotImplementedError
@abstractmethod
def normalize_path(self, path):
"""Return a canonical representation of path in the repos."""
pass
@abstractmethod
def normalize_rev(self, rev):
"""Return a (unique) canonical representation of a revision.
It's up to the backend to decide which string values of `rev`
(usually provided by the user) should be accepted, and how they
should be normalized. Some backends may for instance want to match
against known tags or branch names.
In addition, if `rev` is `None` or '', the youngest revision should
be returned.
:raise NoSuchChangeset: If the given `rev` isn't found.
"""
pass
def short_rev(self, rev):
"""Return a compact string representation of a revision in the
repos.
:raise NoSuchChangeset: If the given `rev` isn't found.
:since 1.2: Always returns a string or `None`.
"""
norm_rev = self.normalize_rev(rev)
return str(norm_rev) if norm_rev is not None else norm_rev
def display_rev(self, rev):
"""Return a string representation of a revision in the repos for
displaying to the user.
This can be a shortened revision string, e.g. for repositories
using long hashes.
:raise NoSuchChangeset: If the given `rev` isn't found.
:since 1.2: Always returns a string or `None`.
"""
norm_rev = self.normalize_rev(rev)
return str(norm_rev) if norm_rev is not None else norm_rev
@abstractmethod
def get_changes(self, old_path, old_rev, new_path, new_rev,
ignore_ancestry=1):
"""Generates changes corresponding to generalized diffs.
Generator that yields change tuples (old_node, new_node, kind, change)
for each node change between the two arbitrary (path,rev) pairs.
The old_node is assumed to be None when the change is an ADD,
the new_node is assumed to be None when the change is a DELETE.
"""
pass
def is_viewable(self, perm):
"""Return True if view permission is granted on the repository."""
return 'BROWSER_VIEW' in perm(self.resource.child('source', '/'))
can_view = is_viewable # 0.12 compatibility
class Node(object):
"""Represents a directory or file in the repository at a given revision."""
__metaclass__ = ABCMeta
DIRECTORY = "dir"
FILE = "file"
realm = RepositoryManager.source_realm
@property
def resource(self):
return Resource(self.realm, self.path, self.rev, self.repos.resource)
# created_path and created_rev properties refer to the Node "creation"
# in the Subversion meaning of a Node in a versioned tree (see #3340).
#
# Those properties must be set by subclasses.
#
created_rev = None
created_path = None
def __init__(self, repos, path, rev, kind):
assert kind in (Node.DIRECTORY, Node.FILE), \
"Unknown node kind %s" % kind
self.repos = repos
self.path = to_unicode(path)
self.rev = rev
self.kind = kind
def __repr__(self):
name = u'%s:%s' % (self.repos.name, self.path)
if self.rev is not None:
name += '@' + unicode(self.rev)
return '<%s %r>' % (self.__class__.__name__, name)
@abstractmethod
def get_content(self):
"""Return a stream for reading the content of the node.
This method will return `None` for directories.
The returned object must support a `read([len])` method.
"""
pass
def get_processed_content(self, keyword_substitution=True, eol_hint=None):
"""Return a stream for reading the content of the node, with some
standard processing applied.
:param keyword_substitution: if `True`, meta-data keywords
present in the content like ``$Rev$`` are substituted
(which keyword are substituted and how they are
substituted is backend specific)
:param eol_hint: which style of line ending is expected if
`None` was explicitly specified for the file itself in
the version control backend (for example in Subversion,
if it was set to ``'native'``). It can be `None`,
``'LF'``, ``'CR'`` or ``'CRLF'``.
"""
return self.get_content()
@abstractmethod
def get_entries(self):
"""Generator that yields the immediate child entries of a directory.
The entries are returned in no particular order.
If the node is a file, this method returns `None`.
"""
pass
@abstractmethod
def get_history(self, limit=None):
"""Provide backward history for this Node.
Generator that yields `(path, rev, chg)` tuples, one for each revision
in which the node was changed. This generator will follow copies and
moves of a node (if the underlying version control system supports
that), which will be indicated by the first element of the tuple
(i.e. the path) changing.
Starts with an entry for the current revision.
:param limit: if given, yield at most ``limit`` results.
"""
pass
def get_previous(self):
"""Return the change event corresponding to the previous revision.
This returns a `(path, rev, chg)` tuple.
"""
skip = True
for p in self.get_history(2):
if skip:
skip = False
else:
return p
@abstractmethod
def get_annotations(self):
"""Provide detailed backward history for the content of this Node.
Retrieve an array of revisions, one `rev` for each line of content
for that node.
Only expected to work on (text) FILE nodes, of course.
"""
pass
@abstractmethod
def get_properties(self):
"""Returns the properties (meta-data) of the node, as a dictionary.
The set of properties depends on the version control system.
"""
pass
@abstractmethod
def get_content_length(self):
"""The length in bytes of the content.
Will be `None` for a directory.
"""
pass
content_length = property(lambda self: self.get_content_length())
@abstractmethod
def get_content_type(self):
"""The MIME type corresponding to the content, if known.
Will be `None` for a directory.
"""
pass
content_type = property(lambda self: self.get_content_type())
def get_name(self):
return self.path.split('/')[-1]
name = property(lambda self: self.get_name())
@abstractmethod
def get_last_modified(self):
pass
last_modified = property(lambda self: self.get_last_modified())
isdir = property(lambda self: self.kind == Node.DIRECTORY)
isfile = property(lambda self: self.kind == Node.FILE)
def is_viewable(self, perm):
"""Return True if view permission is granted on the node."""
return ('BROWSER_VIEW' if self.isdir else 'FILE_VIEW') \
in perm(self.resource)
can_view = is_viewable # 0.12 compatibility
class Changeset(object):
"""Represents a set of changes committed at once in a repository."""
__metaclass__ = ABCMeta
ADD = 'add'
COPY = 'copy'
DELETE = 'delete'
EDIT = 'edit'
MOVE = 'move'
# change types which can have diff associated to them
DIFF_CHANGES = (EDIT, COPY, MOVE) # MERGE
OTHER_CHANGES = (ADD, DELETE)
ALL_CHANGES = DIFF_CHANGES + OTHER_CHANGES
realm = RepositoryManager.changeset_realm
@property
def resource(self):
return Resource(self.realm, self.rev, parent=self.repos.resource)
def __init__(self, repos, rev, message, author, date):
self.repos = repos
self.rev = rev
self.message = message or ''
self.author = author or ''
self.date = date
def __repr__(self):
name = u'%s@%s' % (self.repos.name, self.rev)
return '<%s %r>' % (self.__class__.__name__, name)
def get_properties(self):
"""Returns the properties (meta-data) of the node, as a dictionary.
The set of properties depends on the version control system.
Warning: this used to yield 4-elements tuple (besides `name` and
`text`, there were `wikiflag` and `htmlclass` values).
This is now replaced by the usage of IPropertyRenderer (see #1601).
"""
return []
@abstractmethod
def get_changes(self):
"""Generator that produces a tuple for every change in the changeset.
The tuple will contain `(path, kind, change, base_path, base_rev)`,
where `change` can be one of Changeset.ADD, Changeset.COPY,
Changeset.DELETE, Changeset.EDIT or Changeset.MOVE,
and `kind` is one of Node.FILE or Node.DIRECTORY.
The `path` is the targeted path for the `change` (which is
the ''deleted'' path for a DELETE change).
The `base_path` and `base_rev` are the source path and rev for the
action (`None` and `-1` in the case of an ADD change).
"""
pass
def get_branches(self):
"""Yield branches to which this changeset belong.
Each branch is given as a pair `(name, head)`, where `name` is
the branch name and `head` a flag set if the changeset is a head
for this branch (i.e. if it has no children changeset).
"""
return []
def get_tags(self):
"""Yield tags associated with this changeset.
.. versionadded :: 1.0
"""
return []
def get_bookmarks(self):
"""Yield bookmarks associated with this changeset.
.. versionadded :: 1.1.5
"""
return []
def is_viewable(self, perm):
"""Return True if view permission is granted on the changeset."""
return 'CHANGESET_VIEW' in perm(self.resource)
can_view = is_viewable # 0.12 compatibility
class EmptyChangeset(Changeset):
"""Changeset that contains no changes. This is typically used when the
changeset can't be retrieved."""
def __init__(self, repos, rev, message=None, author=None, date=None):
if date is None:
date = datetime(1970, 1, 1, tzinfo=utc)
super(EmptyChangeset, self).__init__(repos, rev, message, author,
date)
def get_changes(self):
return iter([])
# Note: Since Trac 0.12, Exception PermissionDenied class is gone,
# and class Authorizer is gone as well.
#
# Fine-grained permissions are now handled via normal permission policies.
|
wnt-zhp/hufce | refs/heads/master | django/conf/locale/nl/formats.py | 329 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '20 januari 2009'
TIME_FORMAT = 'H:i' # '15:23'
DATETIME_FORMAT = 'j F Y H:i' # '20 januari 2009 15:23'
YEAR_MONTH_FORMAT = 'F Y' # 'januari 2009'
MONTH_DAY_FORMAT = 'j F' # '20 januari'
SHORT_DATE_FORMAT = 'j-n-Y' # '20-1-2009'
SHORT_DATETIME_FORMAT = 'j-n-Y H:i' # '20-1-2009 15:23'
FIRST_DAY_OF_WEEK = 1 # Monday (in Dutch 'maandag')
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d', # '20-01-2009', '20-01-09', '2009-01-20'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 09'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '15:23:35'
'%H.%M:%S', # '15.23:35'
'%H.%M', # '15.23'
'%H:%M', # '15:23'
)
DATETIME_INPUT_FORMATS = (
# With time in %H:%M:%S :
'%d-%m-%Y %H:%M:%S', '%d-%m-%y %H:%M:%S', '%Y-%m-%d %H:%M:%S', # '20-01-2009 15:23:35', '20-01-09 15:23:35', '2009-01-20 15:23:35'
# '%d %b %Y %H:%M:%S', '%d %b %y %H:%M:%S', # '20 jan 2009 15:23:35', '20 jan 09 15:23:35'
# '%d %B %Y %H:%M:%S', '%d %B %y %H:%M:%S', # '20 januari 2009 15:23:35', '20 januari 2009 15:23:35'
# With time in %H.%M:%S :
'%d-%m-%Y %H.%M:%S', '%d-%m-%y %H.%M:%S', # '20-01-2009 15.23:35', '20-01-09 15.23:35'
# '%d %b %Y %H.%M:%S', '%d %b %y %H.%M:%S', # '20 jan 2009 15.23:35', '20 jan 09 15.23:35'
# '%d %B %Y %H.%M:%S', '%d %B %y %H.%M:%S', # '20 januari 2009 15.23:35', '20 januari 2009 15.23:35'
# With time in %H:%M :
'%d-%m-%Y %H:%M', '%d-%m-%y %H:%M', '%Y-%m-%d %H:%M', # '20-01-2009 15:23', '20-01-09 15:23', '2009-01-20 15:23'
# '%d %b %Y %H:%M', '%d %b %y %H:%M', # '20 jan 2009 15:23', '20 jan 09 15:23'
# '%d %B %Y %H:%M', '%d %B %y %H:%M', # '20 januari 2009 15:23', '20 januari 2009 15:23'
# With time in %H.%M :
'%d-%m-%Y %H.%M', '%d-%m-%y %H.%M', # '20-01-2009 15.23', '20-01-09 15.23'
# '%d %b %Y %H.%M', '%d %b %y %H.%M', # '20 jan 2009 15.23', '20 jan 09 15.23'
# '%d %B %Y %H.%M', '%d %B %y %H.%M', # '20 januari 2009 15.23', '20 januari 2009 15.23'
# Without time :
'%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d', # '20-01-2009', '20-01-09', '2009-01-20'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
suncycheng/intellij-community | refs/heads/master | python/testData/refactoring/rename/renameUpdatesImportReferences/after/bar.py | 83 | import baz
from baz import f
|
PatrickChrist/scikit-learn | refs/heads/master | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
jffm/pyanalyzer | refs/heads/master | rules/AvoidMultiImport.py | 1 | # Copyright (c) 2008-2009 Junior (Frederic) FLEURIAL MONFILS
#
# This file is part of PyAnalyzer.
#
# PyAnalyzer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# or see <http://www.opensource.org/licenses/gpl-3.0.html>
#
# Contact:
# Junior FLEURIAL MONFILS <frederic dot fleurialmonfils at cetic dot be>
__author__ = "Frederic F. MONFILS"
__version__ = "$Revision: $".split()[1]
__revision__ = __version__
# $Source: $
__date__ = "$Date: $"
__copyright__ = "Copyright (c) 2008-2009 Junior (Frederic) FLEURIAL MONFILS"
__license__ = "GPLv3"
__contact__ = "ffm at cetic.be"
import sys
from core.rule import Rule
class AvoidMultiImport(Rule):
"""Multiple imports on same line 'import %(modules)s'
Rationale: For readability, it is preferable to put every import
on a line by itself.
"""
class config:
severity = "convention"
code = 201
message = dict(modules="module1, module2")
def visitImport(self, node, *args):
if len(node.names) > 1:
self.report(
node,
dict(modules=", ".join([module for (module, alias) in node.names]))
)
|
pombredanne/pants | refs/heads/master | contrib/go/tests/python/pants_test/contrib/go/targets/test_go_binary.py | 25 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.base_test import BaseTest
from pants_test.contrib.go.targets.go_local_source_test_base import GoLocalSourceTestBase
from pants.contrib.go.targets.go_binary import GoBinary
class GoBinaryTest(GoLocalSourceTestBase, BaseTest):
@property
def target_type(self):
return GoBinary
|
apporc/nova | refs/heads/master | nova/tests/functional/test_servers.py | 16 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
import zlib
from oslo_log import log as logging
from oslo_utils import timeutils
from nova import context
from nova import exception
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
import nova.virt.fake
LOG = logging.getLogger(__name__)
class ServersTestBase(integrated_helpers._IntegratedTestBase):
_api_version = 'v2'
_force_delete_parameter = 'forceDelete'
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
_access_ipv4_parameter = 'accessIPv4'
_access_ipv6_parameter = 'accessIPv6'
_return_resv_id_parameter = 'return_reservation_id'
_min_count_parameter = 'min_count'
def setUp(self):
super(ServersTestBase, self).setUp()
self.conductor = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
def _wait_for_state_change(self, server, from_status):
for i in range(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _restart_compute_service(self, *args, **kwargs):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
self.compute = self.start_service('compute', *args, **kwargs)
def _wait_for_deletion(self, server_id):
# Wait (briefly) for deletion
for _retries in range(50):
try:
found_server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
found_server = None
LOG.debug("Got 404, proceeding")
break
LOG.debug("Found_server=%s" % found_server)
# TODO(justinsb): Mock doesn't yet do accurate state changes
# if found_server['status'] != 'deleting':
# break
time.sleep(.1)
# Should be gone
self.assertFalse(found_server)
def _delete_server(self, server_id):
# Delete the server
self.api.delete_server(server_id)
self._wait_for_deletion(server_id)
def _get_access_ips_params(self):
return {self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2"}
def _verify_access_ips(self, server):
self.assertEqual('172.19.0.2',
server[self._access_ipv4_parameter])
self.assertEqual('fe80::2', server[self._access_ipv6_parameter])
class ServersTest(ServersTestBase):
def test_get_servers(self):
# Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
# Create a server which will enter error state.
fake_network.set_stub_network_methods(self.stubs)
def throw_error(*args, **kwargs):
raise exception.BuildAbortException(reason='',
instance_uuid='fake')
self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ERROR', found_server['status'])
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
# Creates and deletes a server.
fake_network.set_stub_network_methods(self.stubs)
# Create server
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
post = {'server': server}
# Without an imageRef, this throws 500.
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# With an invalid imageRef, this throws 500.
server[self._image_ref_parameter] = self.get_invalid_image()
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Add a valid imageRef
server[self._image_ref_parameter] = good_server.get(
self._image_ref_parameter)
# Without flavorRef, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
server[self._flavor_ref_parameter] = good_server.get(
self._flavor_ref_parameter)
# Without a name, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Set a valid server name
server['name'] = good_server['name']
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertIn("image", server)
self.assertIn("flavor", server)
self._delete_server(created_server_id)
def _force_reclaim(self):
# Make sure that compute manager thinks the instance is
# old enough to be expired
the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
timeutils.set_time_override(override_time=the_past)
self.addCleanup(timeutils.clear_time_override)
ctxt = context.get_admin_context()
self.compute._reclaim_queued_deletes(ctxt)
def test_deferred_delete(self):
# Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Cannot restore unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'restore': {}})
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
self._force_reclaim()
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
# Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Restore server
self.api.post_server_action(created_server_id, {'restore': {}})
# Wait for server to become active again
found_server = self._wait_for_state_change(found_server, 'DELETED')
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
# Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Force delete server
self.api.post_server_action(created_server_id,
{self._force_delete_parameter: {}})
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_create_server_with_metadata(self):
# Creates a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# Build the server data gradually, checking errors along the way
server = self._build_minimal_create_server_request()
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers details list
servers = self.api.get_servers(detail=True)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Details do include metadata
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers summary list
servers = self.api.get_servers(detail=False)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Summary should not include metadata
self.assertFalse(found_server.get('metadata'))
# Cleanup
self._delete_server(created_server_id)
def test_create_and_rebuild_server(self):
# Rebuild a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
server_post = {'server': server}
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server_post['server']['metadata'] = metadata
created_server = self.api.post_server(server_post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata and other server attributes
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"name": "blah",
self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2",
"metadata": {'some': 'thing'},
}
post['rebuild'].update(self._get_access_ips_params())
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# rebuild the server with empty metadata and nothing else
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"metadata": {},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# Cleanup
self._delete_server(created_server_id)
def test_rename_server(self):
# Test building and renaming a server.
fake_network.set_stub_network_methods(self.stubs)
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
server_id = created_server['id']
self.assertTrue(server_id)
# Rename the server to 'new-name'
self.api.put_server(server_id, {'server': {'name': 'new-name'}})
# Check the name of the server
created_server = self.api.get_server(server_id)
self.assertEqual(created_server['name'], 'new-name')
# Cleanup
self._delete_server(server_id)
def test_create_multiple_servers(self):
# Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
server = self._build_minimal_create_server_request()
server[self._min_count_parameter] = 2
server[self._return_resv_id_parameter] = True
post = {'server': server}
response = self.api.post_server(post)
self.assertIn('reservation_id', response)
reservation_id = response['reservation_id']
self.assertNotIn(reservation_id, ['', None])
# Create 1 more server, which should not return a reservation_id
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# lookup servers created by the first request.
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
# The server from the 2nd request should not be there.
self.assertIsNone(found_server)
# Should have found 2 servers.
self.assertEqual(len(server_map), 2)
# Cleanup
self._delete_server(created_server_id)
for server_id in server_map:
self._delete_server(server_id)
def test_create_server_with_injected_files(self):
# Creates a server with injected_files.
fake_network.set_stub_network_methods(self.stubs)
personality = []
# Inject a text file
data = 'Hello, World!'
personality.append({
'path': '/helloworld.txt',
'contents': data.encode('base64'),
})
# Inject a binary file
data = zlib.compress('Hello, World!')
personality.append({
'path': '/helloworld.zip',
'contents': data.encode('base64'),
})
# Create server
server = self._build_minimal_create_server_request()
server['personality'] = personality
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Cleanup
self._delete_server(created_server_id)
class ServersTestV21(ServersTest):
_api_version = 'v2.1'
|
ilo10/scikit-learn | refs/heads/master | sklearn/manifold/isomap.py | 229 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
thomastu/pyEIA | refs/heads/master | eia/constants.py | 1 | from enum import Enum
class Category(Enum):
root = 371
electricity = 0
seds = 40203
petroleum = 714755
natural_gas = 714804
total_energy = 711224
coal = 717234
steo = 829714
aeo = 964164
crude_oil = 1292190
international_energy = 2134384
usesod = 2123635
co2_emissions = 2251604
us_nuclear_outages = 2889994
|
shepdelacreme/ansible | refs/heads/devel | lib/ansible/modules/network/aci/aci_interface_policy_ospf.py | 15 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_interface_policy_ospf
short_description: Manage OSPF interface policies (ospf:IfPol)
description:
- Manage OSPF interface policies on Cisco ACI fabrics.
notes:
- More information about the internal APIC class B(ospf:IfPol) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.7'
options:
tenant:
description:
- The name of the Tenant the OSPF interface policy should belong to.
required: yes
aliases: [ tenant_name ]
ospf:
description:
- The OSPF interface policy name.
- This name can be between 1 and 64 alphanumeric characters.
- Note that you cannot change this name after the object has been saved.
required: yes
aliases: [ ospf_interface, name ]
description:
description:
- The description for the OSPF interface.
aliases: [ descr ]
network_type:
description:
- The OSPF interface policy network type.
- OSPF supports broadcast and point-to-point.
- The APIC defaults to C(unspecified) when unset during creation.
choices: [ bcast, p2p ]
cost:
description:
- The OSPF cost of the interface.
- The cost (also called metric) of an interface in OSPF is an indication of
the overhead required to send packets across a certain interface. The
cost of an interface is inversely proportional to the bandwidth of that
interface. A higher bandwidth indicates a lower cost. There is more
overhead (higher cost) and time delays involved in crossing a 56k serial
line than crossing a 10M ethernet line. The formula used to calculate the
cost is C(cost= 10000 0000/bandwith in bps) For example, it will cost
10 EXP8/10 EXP7 = 10 to cross a 10M Ethernet line and will cost
10 EXP8/1544000 = 64 to cross a T1 line.
- By default, the cost of an interface is calculated based on the bandwidth;
you can force the cost of an interface with the ip ospf cost value
interface subconfiguration mode command.
- Accepted values range between C(1) and C(450).
- The APIC defaults to C(0) when unset during creation.
controls:
description:
- The interface policy controls.
- 'This is a list of one or more of the following controls:'
- C(advert-subnet) -- Advertise IP subnet instead of a host mask in the router LSA.
- C(bfd) -- Bidirectional Forwarding Detection
- C(mtu-ignore) -- Disables MTU mismatch detection on an interface.
- C(passive) -- The interface does not participate in the OSPF protocol and
will not establish adjacencies or send routing updates. However the
interface is announced as part of the routing network.
type: list
choices: [ advert-subnet, bfd, mtu-ignore, passive ]
dead_interval:
description:
- The interval between hello packets from a neighbor before the router
declares the neighbor as down.
- This value must be the same for all networking devices on a specific network.
- Specifying a smaller dead interval (seconds) will give faster detection
of a neighbor being down and improve convergence, but might cause more
routing instability.
- Accepted values range between C(1) and C(65535).
- The APIC defaults to C(40) when unset during creation.
type: int
hello_interval:
description:
- The interval between hello packets that OSPF sends on the interface.
- Note that the smaller the hello interval, the faster topological changes will be detected, but more routing traffic will ensue.
- This value must be the same for all routers and access servers on a specific network.
- Accepted values range between C(1) and C(65535).
- The APIC defaults to C(10) when unset during creation.
type: int
prefix_suppression:
description:
- Whether prefix suppressions is enabled or disabled.
- The APIC defaults to C(inherit) when unset during creation.
type: bool
priority:
description:
- The priority for the OSPF interface profile.
- Accepted values ranges between C(0) and C(255).
- The APIC defaults to C(1) when unset during creation.
type: int
retransmit_interval:
description:
- The interval between LSA retransmissions.
- The retransmit interval occurs while the router is waiting for an acknowledgement from the neighbor router that it received the LSA.
- If no acknowlegment is received at the end of the interval, then the LSA is resent.
- Accepted values range between C(1) and C(65535).
- The APIC defaults to C(5) when unset during creation.
type: int
transmit_delay:
description:
- The delay time needed to send an LSA update packet.
- OSPF increments the LSA age time by the transmit delay amount before transmitting the LSA update.
- You should take into account the transmission and propagation delays for the interface when you set this value.
- Accepted values range between C(1) and C(450).
- The APIC defaults to C(1) when unset during creation.
type: int
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Ensure ospf interface policy exists
aci_interface_policy_ospf:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
ospf: ospf1
state: present
delegate_to: localhost
- name: Ensure ospf interface policy does not exist
aci_interface_policy_ospf:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
ospf: ospf1
state: present
delegate_to: localhost
- name: Query an ospf interface policy
aci_interface_policy_ospf:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
ospf: ospf1
state: query
delegate_to: localhost
register: query_result
- name: Query all ospf interface policies in tenant production
aci_interface_policy_ospf:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
ospf=dict(type='str', required=False, aliases=['ospf_interface', 'name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
network_type=dict(type='str', choices=['bcast', 'p2p']),
cost=dict(type='int'),
controls=dict(type='list', choices=['advert-subnet', 'bfd', 'mtu-ignore', 'passive']),
dead_interval=dict(type='int'),
hello_interval=dict(type='int'),
prefix_suppression=dict(type='bool'),
priority=dict(type='int'),
retransmit_interval=dict(type='int'),
transmit_delay=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['ospf', 'tenant']],
['state', 'present', ['ospf', 'tenant']],
],
)
aci = ACIModule(module)
tenant = module.params['tenant']
ospf = module.params['ospf']
description = module.params['description']
if module.params['controls'] is None:
controls = None
else:
controls = ','.join(module.params['controls'])
cost = module.params['cost']
if cost is not None and cost not in range(1, 451):
module.fail_json(msg="Parameter 'cost' is only valid in range between 1 and 450.")
dead_interval = module.params['dead_interval']
if dead_interval is not None and dead_interval not in range(1, 65536):
module.fail_json(msg="Parameter 'dead_interval' is only valid in range between 1 and 65536.")
hello_interval = module.params['hello_interval']
if hello_interval is not None and hello_interval not in range(1, 65536):
module.fail_json(msg="Parameter 'hello_interval' is only valid in range between 1 and 65536.")
network_type = module.params['network_type']
prefix_suppression = aci.boolean(module.params['prefix_suppression'], 'enabled', 'disabled')
priority = module.params['priority']
if priority is not None and priority not in range(0, 256):
module.fail_json(msg="Parameter 'priority' is only valid in range between 1 and 255.")
retransmit_interval = module.params['retransmit_interval']
if retransmit_interval is not None and retransmit_interval not in range(1, 65536):
module.fail_json(msg="Parameter 'retransmit_interval' is only valid in range between 1 and 65536.")
transmit_delay = module.params['transmit_delay']
if transmit_delay is not None and transmit_delay not in range(1, 451):
module.fail_json(msg="Parameter 'transmit_delay' is only valid in range between 1 and 450.")
state = module.params['state']
aci.construct_url(
root_class=dict(
aci_class='ospfIfPol',
aci_rn='tn-{0}/ospfIfPol-{1}'.format(tenant, ospf),
module_object=ospf,
target_filter={'name': ospf},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='ospfIfPol',
class_config=dict(
name=ospf,
descr=description,
cost=cost,
ctrl=controls,
deadIntvl=dead_interval,
helloIntvl=hello_interval,
nwT=network_type,
pfxSuppress=prefix_suppression,
prio=priority,
rexmitIntvl=retransmit_interval,
xmitDelay=transmit_delay,
),
)
aci.get_diff(aci_class='ospfIfPol')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
dleecefft/pcapstats | refs/heads/master | utils/simpletcphoneypot.py | 1 | #!/usr/bin/env python
# A simple lure that accepts coommand line args for listening port, IP address and log file
# This should be completly detectable by a human but an automated probe might waste some time
# the log file is to grab logs to assist with profiling.
# Create a new log every day, through naming based on date stamp.
#
# Credits:
# Borrows heavily from Justin Sietz Blackhat python, some smart folks in Canada :-)
# https://www.nostarch.com/blackhatpython
# All those folks who write RFCs thank you for your patience. https://tools.ietf.org/html/rfc5424#page-11
import socket, threading, os, sys, getopt, errno
from datetime import datetime
def datelogprefix():
now=datetime.now()
logdatepre = now.strftime("%Y-%m-%d")
return logdatepre
def writelog(logsfx,logevt):
#Get the time stamp as soon as possible
tstamp = datetime.utcnow().strftime("%Y-%m-%dT%H:%m:%S.%fZ")
# This log directory is hard coded based on a check at runtime, change in both places if you need to move it
# builds a new log file everyday the honey pot sees activity so it is easier to find event data
thislog = "/var/tmp/hpotlogs/" + datelogprefix() + "_" + logsfx
# write events in 5424 syslog format so other tools may be able to ingest add TZ info if really needed
# local 0, notice
# broke the timestamp for fidelity, milliseonds are helpful,
logevthdr ="<133>" + str(tstamp) + hname
wmode = 'ab+'
if os.path.exists(thislog):
wmode = 'ab+'
else:
wmode = 'w+'
with open(thislog,wmode) as fh:
fh.write(logevthdr + "tcphoneypot: " + logevt + "\n")
return
def handle_client(client_socket):
req = client_socket.recv(1024)
logstring = "Recieved data %s" % req
writelog(logfile,logstring)
# acknowledge and quit
client_socket.send("ACK!")
client_socket.close()
def runserver():
while True:
client,addr = tcpserver.accept()
logstring = "Accepted connection from %s:%d" % (addr[0],addr[1])
writelog(logfile,logstring)
client_handler = threading.Thread(target=handle_client,args=(client,))
client_handler.start()
if __name__ == "__main__":
# input and output files
listenip=''
listenport=''
logfile=''
# Use getopt to avoid param order errors
if len(sys.argv) != 7:
print("Usage: %s -i 198.51.100.99 -p 3389 -l rdphoney.log" % sys.argv[0])
exit()
opts, args = getopt.getopt(sys.argv[1:],"i:p:l:")
for o, a in opts:
if o == '-i':
listenip=a
elif o == '-p':
listenport=int(a)
elif o == '-l':
logfile=a
elif o == '-h':
print("Usage: %s -i 198.51.100.99 -p 3389 -l rdphoney.log" % sys.argv[0])
else:
print("Usage: %s -i 198.51.100.99 -p 3389 -l rdphoney.log" % sys.argv[0])
# need a default spot to place honeypot logs, make group writeable if you have a few users running them
d = "/var/tmp/hpotlogs"
if not os.path.exists(d):
os.mkdir(d,0770)
# hostname with a leading space and a following space, make it once
hname = " " + socket.gethostname() + " "
tcpserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
tcpserver.bind((listenip,listenport))
except socket.error, v:
errorcode=v[0]
# long run we should do a netstat and confirm the IP and port are legit and not running, advise user then bail.
print errorcode
raise
try:
tcpserver.listen(12) # a dozen backlog should be plenty but check, auto scanners are aggressive.
logstring = "Listening on %s:%d" % (listenip,listenport)
writelog(logfile,logstring)
except socket.error, v:
errorcode=v[0]
# assuming this worked above this should never fire so leave messy.
print errorcode
raise
runserver() |
MTG/essentia | refs/heads/master | test/src/unittests/standard/test_tensornormalize.py | 1 | #!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from sklearn.preprocessing import MinMaxScaler, StandardScaler
class TestTensorNormalize(TestCase):
def scalerSingleValue(self, scaler_arg=('standard', StandardScaler())):
scaler_name, scaler = scaler_arg
original = numpy.arange(1, dtype='float32')
# Use Scipy to generate the expected results
expected =scaler.fit_transform(original.reshape(-1, 1))
# Add singleton dimensions to trainsform the input vector into a tensor
original = numpy.expand_dims(original, axis=[0, 1, 2])
result = TensorNormalize(scaler=scaler_name, axis=-1)(original)
self.assertAlmostEqualVector(result.flatten(), expected.flatten(), 1e-6)
def scalerOverall(self, scaler_arg=('standard', StandardScaler())):
scaler_name, scaler = scaler_arg
original = numpy.arange(3, dtype='float32')
expected = scaler.fit_transform(original.reshape(-1, 1))
original = numpy.expand_dims(original, axis=[0, 1, 2])
result = TensorNormalize(scaler=scaler_name, axis=-1)(original)
self.assertAlmostEqualVector(result.flatten(), expected.flatten(), 1e-6)
def scalerAlongAxis(self, axis=0, scaler_arg=('standard', StandardScaler())):
scaler_name, scaler = scaler_arg
dims, length = 4, 2
original = numpy.arange(length ** dims, dtype='float32').reshape([length] * dims)
expected = numpy.empty(original.shape)
for i in range(original.shape[0]):
# Swap the axes before and after so we can test all the dimensions
# using the same operator ([i, :, :, :]).
original = numpy.swapaxes(original, 0, axis)
expected = numpy.swapaxes(expected, 0, axis)
tmp = scaler.fit_transform(original[i, :, :, :].reshape(-1, 1))
expected[i, :, :, :] = tmp.reshape([1] + [length] * (dims - 1))
original = numpy.swapaxes(original, 0, axis)
expected = numpy.swapaxes(expected, 0, axis)
result = TensorNormalize(scaler=scaler_name, axis=axis)(original)
self.assertAlmostEqualVector(result.flatten(), expected.flatten(), 1e-6)
def scalerOverallConstantValue(self, scaler_arg=('standard', StandardScaler())):
scaler_name, scaler = scaler_arg
original = numpy.ones([2, 2], dtype='float32')
# Use Scipy to generate the expected results
expected =scaler.fit_transform(original.reshape(-1, 1))
# Add singleton dimensions to trainsform the input vector into a tensor
original = numpy.expand_dims(original, axis=[0, 1])
result = TensorNormalize(scaler=scaler_name, axis=-1)(original)
self.assertAlmostEqualVector(result.flatten(), expected.flatten(), 1e-6)
def scalerAlongAxisConstantValue(self, axis=0, scaler_arg=('standard', StandardScaler())):
scaler_name, scaler = scaler_arg
dims, length = 4, 2
original = numpy.arange(length ** dims, dtype='float32').reshape([length] * dims)
# Constant along in one of the axes
original[0,:,:,:] = numpy.ones(length ** (dims - 1), dtype='float32').reshape([length] * (dims - 1))
expected = numpy.empty(original.shape)
for i in range(original.shape[0]):
# Swap the axes before and after so we can test all the dimensions
# using the same operator ([i, :, :, :]).
original = numpy.swapaxes(original, 0, axis)
expected = numpy.swapaxes(expected, 0, axis)
tmp = scaler.fit_transform(original[i, :, :, :].reshape(-1, 1))
expected[i, :, :, :] = tmp.reshape([1] + [length] * (dims - 1))
original = numpy.swapaxes(original, 0, axis)
expected = numpy.swapaxes(expected, 0, axis)
result = TensorNormalize(scaler=scaler_name, axis=axis)(original)
self.assertAlmostEqualVector(result.flatten(), expected.flatten(), 1e-6)
def testStandardScalerOverall(self):
self.scalerOverall(scaler_arg=('standard', StandardScaler()))
def testMinMaxScalerOverall(self):
self.scalerOverall(scaler_arg=('minMax', MinMaxScaler()))
def testStandardScalerAlongAxis(self):
for i in range(4):
self.scalerAlongAxis(axis=i, scaler_arg=('standard', StandardScaler()))
def testMinMaxScalerAlongAxis(self):
for i in range(4):
self.scalerAlongAxis(axis=i, scaler_arg=('minMax', MinMaxScaler()))
def testStandrdScalerOverallConstantValue(self):
self.scalerOverallConstantValue(scaler_arg=('standard', StandardScaler()))
def testMinMaxScalerOverallConstantValue(self):
self.scalerOverallConstantValue(scaler_arg=('minMax', MinMaxScaler()))
def testStandardScalerAlongAxisConstantValue(self):
for i in range(4):
self.scalerAlongAxisConstantValue(axis=i, scaler_arg=('standard', StandardScaler()))
def testMinMaxScalerAlongAxisConstantValue(self):
for i in range(4):
self.scalerAlongAxisConstantValue(axis=i, scaler_arg=('minMax', MinMaxScaler()))
def testInvalidParam(self):
self.assertConfigureFails(TensorNormalize(), { 'axis': -2 })
self.assertConfigureFails(TensorNormalize(), { 'axis': 5 })
self.assertConfigureFails(TensorNormalize(), { 'scaler': 'MAXMIN' })
suite = allTests(TestTensorNormalize)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
benoitsteiner/tensorflow-opencl | refs/heads/master | tensorflow/python/keras/datasets/mnist/__init__.py | 73 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNIST handwritten digits classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.datasets.mnist import load_data
del absolute_import
del division
del print_function
|
yuanagain/seniorthesis | refs/heads/master | venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack.py | 100 | from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_,
TestCase, assert_raises, run_module_suite, assert_almost_equal,
assert_raises, assert_array_almost_equal)
from numpy import array, asarray, pi, sin, cos, arange, dot, ravel, sqrt, round
from scipy import interpolate
from scipy.interpolate.fitpack import (splrep, splev, bisplrep, bisplev,
sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
def data_file(basename):
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', basename)
def norm2(x):
return sqrt(dot(x.T,x))
def f1(x,d=0):
if d is None:
return "sin"
if x is None:
return "sin(x)"
if d % 4 == 0:
return sin(x)
if d % 4 == 1:
return cos(x)
if d % 4 == 2:
return -sin(x)
if d % 4 == 3:
return -cos(x)
def f2(x,y=0,dx=0,dy=0):
if x is None:
return "sin(x+y)"
d = dx+dy
if d % 4 == 0:
return sin(x+y)
if d % 4 == 1:
return cos(x+y)
if d % 4 == 2:
return -sin(x+y)
if d % 4 == 3:
return -cos(x+y)
def makepairs(x, y):
"""Helper function to create an array of pairs of x and y."""
# Or itertools.product (>= python 2.6)
xy = array([[a, b] for a in asarray(x) for b in asarray(y)])
return xy.T
def put(*a):
"""Produce some output if file run directly"""
import sys
if hasattr(sys.modules['__main__'], '__put_prints'):
sys.stderr.write("".join(map(str, a)) + "\n")
class TestSmokeTests(TestCase):
"""
Smoke tests (with a few asserts) for fitpack routines -- mostly
check that they are runnable
"""
def check_1(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,at=0,xb=None,xe=None):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
x1 = a+(b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes
v,v1 = f(x),f(x1)
nk = []
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0/float(N)
tol = 5 * h**(.75*(k-d))
if s > 0:
tol += 1e5*s
return tol
for k in range(1,6):
tck = splrep(x,v,s=s,per=per,k=k,xe=xe)
if at:
t = tck[0][k:-k]
else:
t = x1
nd = []
for d in range(k+1):
tol = err_est(k, d)
err = norm2(f(t,d)-splev(t,tck,d)) / norm2(f(t,d))
assert_(err < tol, (k, d, err, tol))
nd.append((err, tol))
nk.append(nd)
put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
repr(round(xb,3)),repr(round(xe,3)),
repr(round(a,3)),repr(round(b,3))))
if at:
str = "at knots"
else:
str = "at the middle of nodes"
put(" per=%d s=%s Evaluation %s" % (per,repr(s),str))
put(" k : |f-s|^2 |f'-s'| |f''-.. |f'''-. |f''''- |f'''''")
k = 1
for l in nk:
put(' %d : ' % k)
for r in l:
put(' %.1e %.1e' % r)
put('\n')
k = k+1
def check_2(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
v = f(x)
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0/float(N)
tol = 5 * h**(.75*(k-d))
if s > 0:
tol += 1e5*s
return tol
nk = []
for k in range(1,6):
tck = splrep(x,v,s=s,per=per,k=k,xe=xe)
nk.append([splint(ia,ib,tck),spalde(dx,tck)])
put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
repr(round(xb,3)),repr(round(xe,3)),
repr(round(a,3)),repr(round(b,3))))
put(" per=%d s=%s N=%d [a, b] = [%s, %s] dx=%s" % (per,repr(s),N,repr(round(ia,3)),repr(round(ib,3)),repr(round(dx,3))))
put(" k : int(s,[a,b]) Int.Error Rel. error of s^(d)(dx) d = 0, .., k")
k = 1
for r in nk:
if r[0] < 0:
sr = '-'
else:
sr = ' '
put(" %d %s%.8f %.1e " % (k,sr,abs(r[0]),
abs(r[0]-(f(ib,-1)-f(ia,-1)))))
d = 0
for dr in r[1]:
err = abs(1-dr/f(dx,d))
tol = err_est(k, d)
assert_(err < tol, (k, d))
put(" %.1e %.1e" % (err, tol))
d = d+1
put("\n")
k = k+1
def check_3(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
v = f(x)
put(" k : Roots of s(x) approx %s x in [%s,%s]:" %
(f(None),repr(round(a,3)),repr(round(b,3))))
for k in range(1,6):
tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
if k == 3:
roots = sproot(tck)
assert_allclose(splev(roots, tck), 0, atol=1e-10, rtol=1e-10)
assert_allclose(roots, pi*array([1, 2, 3, 4]), rtol=1e-3)
put(' %d : %s' % (k, repr(roots.tolist())))
else:
assert_raises(ValueError, sproot, tck)
def check_4(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
x1 = a + (b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes
v,v1 = f(x),f(x1)
put(" u = %s N = %d" % (repr(round(dx,3)),N))
put(" k : [x(u), %s(x(u))] Error of splprep Error of splrep " % (f(0,None)))
for k in range(1,6):
tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1)
tck = splrep(x,v,s=s,per=per,k=k)
uv = splev(dx,tckp)
err1 = abs(uv[1]-f(uv[0]))
err2 = abs(splev(uv[0],tck)-f(uv[0]))
assert_(err1 < 1e-2)
assert_(err2 < 1e-2)
put(" %d : %s %.1e %.1e" %
(k,repr([round(z,3) for z in uv]),
err1,
err2))
put("Derivatives of parametric cubic spline at u (first function):")
k = 3
tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1)
for d in range(1,k+1):
uv = splev(dx,tckp,d)
put(" %s " % (repr(uv[0])))
def check_5(self,f=f2,kx=3,ky=3,xb=0,xe=2*pi,yb=0,ye=2*pi,Nx=20,Ny=20,s=0):
x = xb+(xe-xb)*arange(Nx+1,dtype=float)/float(Nx)
y = yb+(ye-yb)*arange(Ny+1,dtype=float)/float(Ny)
xy = makepairs(x,y)
tck = bisplrep(xy[0],xy[1],f(xy[0],xy[1]),s=s,kx=kx,ky=ky)
tt = [tck[0][kx:-kx],tck[1][ky:-ky]]
t2 = makepairs(tt[0],tt[1])
v1 = bisplev(tt[0],tt[1],tck)
v2 = f2(t2[0],t2[1])
v2.shape = len(tt[0]),len(tt[1])
err = norm2(ravel(v1-v2))
assert_(err < 1e-2, err)
put(err)
def test_smoke_splrep_splev(self):
put("***************** splrep/splev")
self.check_1(s=1e-6)
self.check_1()
self.check_1(at=1)
self.check_1(per=1)
self.check_1(per=1,at=1)
self.check_1(b=1.5*pi)
self.check_1(b=1.5*pi,xe=2*pi,per=1,s=1e-1)
def test_smoke_splint_spalde(self):
put("***************** splint/spalde")
self.check_2()
self.check_2(per=1)
self.check_2(ia=0.2*pi,ib=pi)
self.check_2(ia=0.2*pi,ib=pi,N=50)
def test_smoke_sproot(self):
put("***************** sproot")
self.check_3(a=0.1,b=15)
def test_smoke_splprep_splrep_splev(self):
put("***************** splprep/splrep/splev")
self.check_4()
self.check_4(N=50)
def test_smoke_bisplrep_bisplev(self):
put("***************** bisplev")
self.check_5()
class TestSplev(TestCase):
def test_1d_shape(self):
x = [1,2,3,4,5]
y = [4,5,6,7,8]
tck = splrep(x, y)
z = splev([1], tck)
assert_equal(z.shape, (1,))
z = splev(1, tck)
assert_equal(z.shape, ())
def test_2d_shape(self):
x = [1, 2, 3, 4, 5]
y = [4, 5, 6, 7, 8]
tck = splrep(x, y)
t = np.array([[1.0, 1.5, 2.0, 2.5],
[3.0, 3.5, 4.0, 4.5]])
z = splev(t, tck)
z0 = splev(t[0], tck)
z1 = splev(t[1], tck)
assert_equal(z, np.row_stack((z0, z1)))
def test_extrapolation_modes(self):
# test extrapolation modes
# * if ext=0, return the extrapolated value.
# * if ext=1, return 0
# * if ext=2, raise a ValueError
# * if ext=3, return the boundary value.
x = [1,2,3]
y = [0,2,4]
tck = splrep(x, y, k=1)
rstl = [[-2, 6], [0, 0], None, [0, 4]]
for ext in (0, 1, 3):
assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
assert_raises(ValueError, splev, [0, 4], tck, ext=2)
class TestSplder(object):
def __init__(self):
# non-uniform grid, just to make it sure
x = np.linspace(0, 1, 100)**3
y = np.sin(20 * x)
self.spl = splrep(x, y)
# double check that knots are non-uniform
assert_(np.diff(self.spl[0]).ptp() > 0)
def test_inverse(self):
# Check that antiderivative + derivative is identity.
for n in range(5):
spl2 = splantider(self.spl, n)
spl3 = splder(spl2, n)
assert_allclose(self.spl[0], spl3[0])
assert_allclose(self.spl[1], spl3[1])
assert_equal(self.spl[2], spl3[2])
def test_splder_vs_splev(self):
# Check derivative vs. FITPACK
for n in range(3+1):
# Also extrapolation!
xx = np.linspace(-1, 2, 2000)
if n == 3:
# ... except that FITPACK extrapolates strangely for
# order 0, so let's not check that.
xx = xx[(xx >= 0) & (xx <= 1)]
dy = splev(xx, self.spl, n)
spl2 = splder(self.spl, n)
dy2 = splev(xx, spl2)
if n == 1:
assert_allclose(dy, dy2, rtol=2e-6)
else:
assert_allclose(dy, dy2)
def test_splantider_vs_splint(self):
# Check antiderivative vs. FITPACK
spl2 = splantider(self.spl)
# no extrapolation, splint assumes function is zero outside
# range
xx = np.linspace(0, 1, 20)
for x1 in xx:
for x2 in xx:
y1 = splint(x1, x2, self.spl)
y2 = splev(x2, spl2) - splev(x1, spl2)
assert_allclose(y1, y2)
def test_order0_diff(self):
assert_raises(ValueError, splder, self.spl, 4)
def test_kink(self):
# Should refuse to differentiate splines with kinks
spl2 = insert(0.5, self.spl, m=2)
splder(spl2, 2) # Should work
assert_raises(ValueError, splder, spl2, 3)
spl2 = insert(0.5, self.spl, m=3)
splder(spl2, 1) # Should work
assert_raises(ValueError, splder, spl2, 2)
spl2 = insert(0.5, self.spl, m=4)
assert_raises(ValueError, splder, spl2, 1)
class TestBisplrep(object):
def test_overflow(self):
a = np.linspace(0, 1, 620)
b = np.linspace(0, 1, 620)
x, y = np.meshgrid(a, b)
z = np.random.rand(*x.shape)
assert_raises(OverflowError, bisplrep, x.ravel(), y.ravel(), z.ravel(), s=0)
def test_regression_1310(self):
# Regression test for gh-1310
data = np.load(data_file('bug-1310.npz'))['data']
# Shouldn't crash -- the input data triggers work array sizes
# that caused previously some data to not be aligned on
# sizeof(double) boundaries in memory, which made the Fortran
# code to crash when compiled with -O3
bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
full_output=True)
def test_dblint():
# Basic test to see it runs and gives the correct result on a trivial
# problem. Note that `dblint` is not exposed in the interpolate namespace.
x = np.linspace(0, 1)
y = np.linspace(0, 1)
xx, yy = np.meshgrid(x, y)
rect = interpolate.RectBivariateSpline(x, y, 4 * xx * yy)
tck = list(rect.tck)
tck.extend(rect.degrees)
assert_almost_equal(dblint(0, 1, 0, 1, tck), 1)
assert_almost_equal(dblint(0, 0.5, 0, 1, tck), 0.25)
assert_almost_equal(dblint(0.5, 1, 0, 1, tck), 0.75)
assert_almost_equal(dblint(-100, 100, -100, 100, tck), 1)
def test_splev_der_k():
# regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
# for x outside of knot range
# test case from gh-2188
tck = (np.array([0., 0., 2.5, 2.5]),
np.array([-1.56679978, 2.43995873, 0., 0.]),
1)
t, c, k = tck
x = np.array([-3, 0, 2.5, 3])
# an explicit form of the linear spline
assert_allclose(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
assert_allclose(splev(x, tck, 1), (c[1]-c[0]) / t[2])
# now check a random spline vs splder
np.random.seed(1234)
x = np.sort(np.random.random(30))
y = np.random.random(30)
t, c, k = splrep(x, y)
x = [t[0] - 1., t[-1] + 1.]
tck2 = splder((t, c, k), k)
assert_allclose(splev(x, (t, c, k), k), splev(x, tck2))
if __name__ == "__main__":
run_module_suite()
|
zjx20/googletest | refs/heads/master | scripts/upload_gtest.py | 1963 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = 'googletestframework@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
|
ool2016-seclab/quarantineSystem | refs/heads/master | ryu/app/simple_switch_igmp.py | 58 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from ryu.base import app_manager
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.lib import addrconv
from ryu.lib import igmplib
from ryu.lib.dpid import str_to_dpid
class SimpleSwitchIgmp(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
_CONTEXTS = {'igmplib': igmplib.IgmpLib}
def __init__(self, *args, **kwargs):
super(SimpleSwitchIgmp, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self._snoop = kwargs['igmplib']
# if you want a switch to operate as a querier,
# set up as follows:
self._snoop.set_querier_mode(
dpid=str_to_dpid('0000000000000001'), server_port=2)
# dpid the datapath id that will operate as a querier.
# server_port a port number which connect to the multicast
# server.
#
# NOTE: you can set up only the one querier.
# when you called this method several times,
# only the last one becomes effective.
def add_flow(self, datapath, in_port, dst, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(in_port=in_port,
dl_dst=addrconv.mac.text_to_bin(dst))
mod = parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, actions=actions)
datapath.send_msg(mod)
@set_ev_cls(igmplib.EventPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
(dst_, src_, _eth_type) = struct.unpack_from(
'!6s6sH', buffer(msg.data), 0)
src = addrconv.mac.bin_to_text(src_)
dst = addrconv.mac.bin_to_text(dst_)
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s",
dpid, src, dst, msg.in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = msg.in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
self.add_flow(datapath, msg.in_port, dst, actions)
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=msg.in_port,
actions=actions)
datapath.send_msg(out)
@set_ev_cls(igmplib.EventMulticastGroupStateChanged,
MAIN_DISPATCHER)
def _status_changed(self, ev):
msg = {
igmplib.MG_GROUP_ADDED: 'Multicast Group Added',
igmplib.MG_MEMBER_CHANGED: 'Multicast Group Member Changed',
igmplib.MG_GROUP_REMOVED: 'Multicast Group Removed',
}
self.logger.info("%s: [%s] querier:[%s] hosts:%s",
msg.get(ev.reason), ev.address, ev.src,
ev.dsts)
|
kenshay/ImageScripter | refs/heads/master | ProgramData/SystemFiles/Python/Lib/site-packages/rope/refactor/functionutils.py | 24 | import rope.base.exceptions
import rope.base.pyobjects
from rope.base.builtins import Lambda
from rope.base import worder
class DefinitionInfo(object):
def __init__(self, function_name, is_method, args_with_defaults,
args_arg, keywords_arg):
self.function_name = function_name
self.is_method = is_method
self.args_with_defaults = args_with_defaults
self.args_arg = args_arg
self.keywords_arg = keywords_arg
def to_string(self):
return '%s(%s)' % (self.function_name, self.arguments_to_string())
def arguments_to_string(self, from_index=0):
params = []
for arg, default in self.args_with_defaults:
if default is not None:
params.append('%s=%s' % (arg, default))
else:
params.append(arg)
if self.args_arg is not None:
params.append('*' + self.args_arg)
if self.keywords_arg:
params.append('**' + self.keywords_arg)
return ', '.join(params[from_index:])
@staticmethod
def _read(pyfunction, code):
kind = pyfunction.get_kind()
is_method = kind == 'method'
is_lambda = kind == 'lambda'
info = _FunctionParser(code, is_method, is_lambda)
args, keywords = info.get_parameters()
args_arg = None
keywords_arg = None
if args and args[-1].startswith('**'):
keywords_arg = args[-1][2:]
del args[-1]
if args and args[-1].startswith('*'):
args_arg = args[-1][1:]
del args[-1]
args_with_defaults = [(name, None) for name in args]
args_with_defaults.extend(keywords)
return DefinitionInfo(info.get_function_name(), is_method,
args_with_defaults, args_arg, keywords_arg)
@staticmethod
def read(pyfunction):
pymodule = pyfunction.get_module()
word_finder = worder.Worder(pymodule.source_code)
lineno = pyfunction.get_ast().lineno
start = pymodule.lines.get_line_start(lineno)
if isinstance(pyfunction, Lambda):
call = word_finder.get_lambda_and_args(start)
else:
call = word_finder.get_function_and_args_in_header(start)
return DefinitionInfo._read(pyfunction, call)
class CallInfo(object):
def __init__(self, function_name, args, keywords, args_arg,
keywords_arg, implicit_arg, constructor):
self.function_name = function_name
self.args = args
self.keywords = keywords
self.args_arg = args_arg
self.keywords_arg = keywords_arg
self.implicit_arg = implicit_arg
self.constructor = constructor
def to_string(self):
function = self.function_name
if self.implicit_arg:
function = self.args[0] + '.' + self.function_name
params = []
start = 0
if self.implicit_arg or self.constructor:
start = 1
if self.args[start:]:
params.extend(self.args[start:])
if self.keywords:
params.extend(['%s=%s' % (name, value)
for name, value in self.keywords])
if self.args_arg is not None:
params.append('*' + self.args_arg)
if self.keywords_arg:
params.append('**' + self.keywords_arg)
return '%s(%s)' % (function, ', '.join(params))
@staticmethod
def read(primary, pyname, definition_info, code):
is_method_call = CallInfo._is_method_call(primary, pyname)
is_constructor = CallInfo._is_class(pyname)
is_classmethod = CallInfo._is_classmethod(pyname)
info = _FunctionParser(code, is_method_call or is_classmethod)
args, keywords = info.get_parameters()
args_arg = None
keywords_arg = None
if args and args[-1].startswith('**'):
keywords_arg = args[-1][2:]
del args[-1]
if args and args[-1].startswith('*'):
args_arg = args[-1][1:]
del args[-1]
if is_constructor:
args.insert(0, definition_info.args_with_defaults[0][0])
return CallInfo(info.get_function_name(), args, keywords, args_arg,
keywords_arg, is_method_call or is_classmethod,
is_constructor)
@staticmethod
def _is_method_call(primary, pyname):
return primary is not None and \
isinstance(primary.get_object().get_type(),
rope.base.pyobjects.PyClass) and \
CallInfo._is_method(pyname)
@staticmethod
def _is_class(pyname):
return pyname is not None and \
isinstance(pyname.get_object(),
rope.base.pyobjects.PyClass)
@staticmethod
def _is_method(pyname):
if pyname is not None and \
isinstance(pyname.get_object(), rope.base.pyobjects.PyFunction):
return pyname.get_object().get_kind() == 'method'
return False
@staticmethod
def _is_classmethod(pyname):
if pyname is not None and \
isinstance(pyname.get_object(), rope.base.pyobjects.PyFunction):
return pyname.get_object().get_kind() == 'classmethod'
return False
class ArgumentMapping(object):
def __init__(self, definition_info, call_info):
self.call_info = call_info
self.param_dict = {}
self.keyword_args = []
self.args_arg = []
for index, value in enumerate(call_info.args):
if index < len(definition_info.args_with_defaults):
name = definition_info.args_with_defaults[index][0]
self.param_dict[name] = value
else:
self.args_arg.append(value)
for name, value in call_info.keywords:
index = -1
for pair in definition_info.args_with_defaults:
if pair[0] == name:
self.param_dict[name] = value
break
else:
self.keyword_args.append((name, value))
def to_call_info(self, definition_info):
args = []
keywords = []
for index in range(len(definition_info.args_with_defaults)):
name = definition_info.args_with_defaults[index][0]
if name in self.param_dict:
args.append(self.param_dict[name])
else:
for i in range(index, len(definition_info.args_with_defaults)):
name = definition_info.args_with_defaults[i][0]
if name in self.param_dict:
keywords.append((name, self.param_dict[name]))
break
args.extend(self.args_arg)
keywords.extend(self.keyword_args)
return CallInfo(self.call_info.function_name, args, keywords,
self.call_info.args_arg, self.call_info.keywords_arg,
self.call_info.implicit_arg,
self.call_info.constructor)
class _FunctionParser(object):
def __init__(self, call, implicit_arg, is_lambda=False):
self.call = call
self.implicit_arg = implicit_arg
self.word_finder = worder.Worder(self.call)
if is_lambda:
self.last_parens = self.call.rindex(':')
else:
self.last_parens = self.call.rindex(')')
self.first_parens = self.word_finder._find_parens_start(
self.last_parens)
def get_parameters(self):
args, keywords = self.word_finder.get_parameters(self.first_parens,
self.last_parens)
if self.is_called_as_a_method():
instance = self.call[:self.call.rindex('.', 0, self.first_parens)]
args.insert(0, instance.strip())
return args, keywords
def get_instance(self):
if self.is_called_as_a_method():
return self.word_finder.get_primary_at(
self.call.rindex('.', 0, self.first_parens) - 1)
def get_function_name(self):
if self.is_called_as_a_method():
return self.word_finder.get_word_at(self.first_parens - 1)
else:
return self.word_finder.get_primary_at(self.first_parens - 1)
def is_called_as_a_method(self):
return self.implicit_arg and '.' in self.call[:self.first_parens]
|
infoxchange/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/db/backends/mysql/creation.py | 311 | from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
suffix = []
if self.connection.settings_dict['TEST_CHARSET']:
suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
if self.connection.settings_dict['TEST_COLLATION']:
suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
return ' '.join(suffix)
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"All inline references are pending under MySQL"
return [], True
def sql_for_inline_many_to_many_references(self, model, field, style):
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL')),
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL'))
]
deferred = [
(field.m2m_db_table(), field.m2m_column_name(), opts.db_table,
opts.pk.column),
(field.m2m_db_table(), field.m2m_reverse_name(),
field.rel.to._meta.db_table, field.rel.to._meta.pk.column)
]
return table_output, deferred
|
yuhangwang/meson | refs/heads/master | test cases/common/64 custom header generator/makeheader.py | 9 | #!/usr/bin/env python3
# NOTE: this file does not have the executable bit set. This tests that
# Meson can automatically parse shebang lines.
import sys
template = '#define RET_VAL %s\n'
output = template % (open(sys.argv[1]).readline().strip())
open(sys.argv[2], 'w').write(output)
|
tedder/ansible | refs/heads/devel | lib/ansible/modules/crypto/acme/acme_certificate_revoke.py | 9 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: acme_certificate_revoke
author: "Felix Fontein (@felixfontein)"
version_added: "2.7"
short_description: Revoke certificates with the ACME protocol
description:
- "Allows to revoke certificates issued by a CA supporting the
L(ACME protocol,https://tools.ietf.org/html/draft-ietf-acme-acme-18),
such as L(Let's Encrypt,https://letsencrypt.org/)."
notes:
- "Exactly one of C(account_key_src), C(account_key_content),
C(private_key_src) or C(private_key_content) must be specified."
- "Trying to revoke an already revoked certificate
should result in an unchanged status, even if the revocation reason
was different than the one specified here. Also, depending on the
server, it can happen that some other error is returned if the
certificate has already been revoked."
seealso:
- name: The Let's Encrypt documentation
description: Documentation for the Let's Encrypt Certification Authority.
Provides useful information for example on rate limits.
link: https://letsencrypt.org/docs/
- name: Automatic Certificate Management Environment (ACME)
description: The current draft specification of the ACME protocol.
link: https://tools.ietf.org/html/draft-ietf-acme-acme-18
- module: acme_inspect
description: Allows to debug problems.
extends_documentation_fragment:
- acme
options:
certificate:
description:
- "Path to the certificate to revoke."
required: yes
account_key_src:
description:
- "Path to a file containing the ACME account RSA or Elliptic Curve
key."
- "RSA keys can be created with C(openssl rsa ...). Elliptic curve keys can
be created with C(openssl ecparam -genkey ...). Any other tool creating
private keys in PEM format can be used as well."
- "Mutually exclusive with C(account_key_content)."
- "Required if C(account_key_content) is not used."
account_key_content:
description:
- "Content of the ACME account RSA or Elliptic Curve key."
- "Note that exactly one of C(account_key_src), C(account_key_content),
C(private_key_src) or C(private_key_content) must be specified."
- "I(Warning): the content will be written into a temporary file, which will
be deleted by Ansible when the module completes. Since this is an
important private key — it can be used to change the account key,
or to revoke your certificates without knowing their private keys
—, this might not be acceptable."
- "In case C(cryptography) is used, the content is not written into a
temporary file. It can still happen that it is written to disk by
Ansible in the process of moving the module with its argument to
the node where it is executed."
private_key_src:
description:
- "Path to the certificate's private key."
- "Note that exactly one of C(account_key_src), C(account_key_content),
C(private_key_src) or C(private_key_content) must be specified."
private_key_content:
description:
- "Content of the certificate's private key."
- "Note that exactly one of C(account_key_src), C(account_key_content),
C(private_key_src) or C(private_key_content) must be specified."
- "I(Warning): the content will be written into a temporary file, which will
be deleted by Ansible when the module completes. Since this is an
important private key — it can be used to change the account key,
or to revoke your certificates without knowing their private keys
—, this might not be acceptable."
- "In case C(cryptography) is used, the content is not written into a
temporary file. It can still happen that it is written to disk by
Ansible in the process of moving the module with its argument to
the node where it is executed."
revoke_reason:
description:
- "One of the revocation reasonCodes defined in
L(https://tools.ietf.org/html/rfc5280#section-5.3.1, Section 5.3.1 of RFC5280)."
- "Possible values are C(0) (unspecified), C(1) (keyCompromise),
C(2) (cACompromise), C(3) (affiliationChanged), C(4) (superseded),
C(5) (cessationOfOperation), C(6) (certificateHold),
C(8) (removeFromCRL), C(9) (privilegeWithdrawn),
C(10) (aACompromise)"
'''
EXAMPLES = '''
- name: Revoke certificate with account key
acme_certificate_revoke:
account_key_src: /etc/pki/cert/private/account.key
certificate: /etc/httpd/ssl/sample.com.crt
- name: Revoke certificate with certificate's private key
acme_certificate_revoke:
private_key_src: /etc/httpd/ssl/sample.com.key
certificate: /etc/httpd/ssl/sample.com.crt
'''
RETURN = '''
'''
from ansible.module_utils.acme import (
ModuleFailException, ACMEAccount, nopad_b64, pem_to_der, set_crypto_backend,
)
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
account_key_src=dict(type='path', aliases=['account_key']),
account_key_content=dict(type='str', no_log=True),
account_uri=dict(required=False, type='str'),
acme_directory=dict(required=False, default='https://acme-staging.api.letsencrypt.org/directory', type='str'),
acme_version=dict(required=False, default=1, choices=[1, 2], type='int'),
validate_certs=dict(required=False, default=True, type='bool'),
private_key_src=dict(type='path'),
private_key_content=dict(type='str', no_log=True),
certificate=dict(required=True, type='path'),
revoke_reason=dict(required=False, type='int'),
select_crypto_backend=dict(required=False, choices=['auto', 'openssl', 'cryptography'], default='auto', type='str'),
),
required_one_of=(
['account_key_src', 'account_key_content', 'private_key_src', 'private_key_content'],
),
mutually_exclusive=(
['account_key_src', 'account_key_content', 'private_key_src', 'private_key_content'],
),
supports_check_mode=False,
)
set_crypto_backend(module)
if not module.params.get('validate_certs'):
module.warn(warning='Disabling certificate validation for communications with ACME endpoint. ' +
'This should only be done for testing against a local ACME server for ' +
'development purposes, but *never* for production purposes.')
try:
account = ACMEAccount(module)
# Load certificate
certificate = pem_to_der(module.params.get('certificate'))
certificate = nopad_b64(certificate)
# Construct payload
payload = {
'certificate': certificate
}
if module.params.get('revoke_reason') is not None:
payload['reason'] = module.params.get('revoke_reason')
# Determine endpoint
if module.params.get('acme_version') == 1:
endpoint = account.directory['revoke-cert']
payload['resource'] = 'revoke-cert'
else:
endpoint = account.directory['revokeCert']
# Get hold of private key (if available) and make sure it comes from disk
private_key = module.params.get('private_key_src')
private_key_content = module.params.get('private_key_content')
# Revoke certificate
if private_key or private_key_content:
# Step 1: load and parse private key
error, private_key_data = account.parse_key(private_key, private_key_content)
if error:
raise ModuleFailException("error while parsing private key: %s" % error)
# Step 2: sign revokation request with private key
jws_header = {
"alg": private_key_data['alg'],
"jwk": private_key_data['jwk'],
}
result, info = account.send_signed_request(endpoint, payload, key_data=private_key_data, jws_header=jws_header)
else:
# Step 1: get hold of account URI
created, account_data = account.setup_account(allow_creation=False)
if created:
raise AssertionError('Unwanted account creation')
if account_data is None:
raise ModuleFailException(msg='Account does not exist or is deactivated.')
# Step 2: sign revokation request with account key
result, info = account.send_signed_request(endpoint, payload)
if info['status'] != 200:
already_revoked = False
# Standarized error from draft 14 on (https://tools.ietf.org/html/draft-ietf-acme-acme-18#section-7.6)
if result.get('type') == 'urn:ietf:params:acme:error:alreadyRevoked':
already_revoked = True
else:
# Hack for Boulder errors
if module.params.get('acme_version') == 1:
error_type = 'urn:acme:error:malformed'
else:
error_type = 'urn:ietf:params:acme:error:malformed'
if result.get('type') == error_type and result.get('detail') == 'Certificate already revoked':
# Fallback: boulder returns this in case the certificate was already revoked.
already_revoked = True
# If we know the certificate was already revoked, we don't fail,
# but successfully terminate while indicating no change
if already_revoked:
module.exit_json(changed=False)
raise ModuleFailException('Error revoking certificate: {0} {1}'.format(info['status'], result))
module.exit_json(changed=True)
except ModuleFailException as e:
e.do_fail(module)
if __name__ == '__main__':
main()
|
ZHAW-INES/rioxo-uClinux-dist | refs/heads/rtsp | user/python/python-2.4.4/Lib/plat-mac/Carbon/OSAconst.py | 81 | # Generated from 'OSA.h'
def FOUR_CHAR_CODE(x): return x
from Carbon.AppleEvents import *
kAEUseStandardDispatch = -1
kOSAComponentType = FOUR_CHAR_CODE('osa ')
kOSAGenericScriptingComponentSubtype = FOUR_CHAR_CODE('scpt')
kOSAFileType = FOUR_CHAR_CODE('osas')
kOSASuite = FOUR_CHAR_CODE('ascr')
kOSARecordedText = FOUR_CHAR_CODE('recd')
kOSAScriptIsModified = FOUR_CHAR_CODE('modi')
kOSAScriptIsTypeCompiledScript = FOUR_CHAR_CODE('cscr')
kOSAScriptIsTypeScriptValue = FOUR_CHAR_CODE('valu')
kOSAScriptIsTypeScriptContext = FOUR_CHAR_CODE('cntx')
kOSAScriptBestType = FOUR_CHAR_CODE('best')
kOSACanGetSource = FOUR_CHAR_CODE('gsrc')
typeOSADialectInfo = FOUR_CHAR_CODE('difo')
keyOSADialectName = FOUR_CHAR_CODE('dnam')
keyOSADialectCode = FOUR_CHAR_CODE('dcod')
keyOSADialectLangCode = FOUR_CHAR_CODE('dlcd')
keyOSADialectScriptCode = FOUR_CHAR_CODE('dscd')
kOSANullScript = 0L
kOSANullMode = 0
kOSAModeNull = 0
kOSASupportsCompiling = 0x0002
kOSASupportsGetSource = 0x0004
kOSASupportsAECoercion = 0x0008
kOSASupportsAESending = 0x0010
kOSASupportsRecording = 0x0020
kOSASupportsConvenience = 0x0040
kOSASupportsDialects = 0x0080
kOSASupportsEventHandling = 0x0100
kOSASelectLoad = 0x0001
kOSASelectStore = 0x0002
kOSASelectExecute = 0x0003
kOSASelectDisplay = 0x0004
kOSASelectScriptError = 0x0005
kOSASelectDispose = 0x0006
kOSASelectSetScriptInfo = 0x0007
kOSASelectGetScriptInfo = 0x0008
kOSASelectSetActiveProc = 0x0009
kOSASelectGetActiveProc = 0x000A
kOSASelectScriptingComponentName = 0x0102
kOSASelectCompile = 0x0103
kOSASelectCopyID = 0x0104
kOSASelectCopyScript = 0x0105
kOSASelectGetSource = 0x0201
kOSASelectCoerceFromDesc = 0x0301
kOSASelectCoerceToDesc = 0x0302
kOSASelectSetSendProc = 0x0401
kOSASelectGetSendProc = 0x0402
kOSASelectSetCreateProc = 0x0403
kOSASelectGetCreateProc = 0x0404
kOSASelectSetDefaultTarget = 0x0405
kOSASelectStartRecording = 0x0501
kOSASelectStopRecording = 0x0502
kOSASelectLoadExecute = 0x0601
kOSASelectCompileExecute = 0x0602
kOSASelectDoScript = 0x0603
kOSASelectSetCurrentDialect = 0x0701
kOSASelectGetCurrentDialect = 0x0702
kOSASelectAvailableDialects = 0x0703
kOSASelectGetDialectInfo = 0x0704
kOSASelectAvailableDialectCodeList = 0x0705
kOSASelectSetResumeDispatchProc = 0x0801
kOSASelectGetResumeDispatchProc = 0x0802
kOSASelectExecuteEvent = 0x0803
kOSASelectDoEvent = 0x0804
kOSASelectMakeContext = 0x0805
kOSADebuggerCreateSession = 0x0901
kOSADebuggerGetSessionState = 0x0902
kOSADebuggerSessionStep = 0x0903
kOSADebuggerDisposeSession = 0x0904
kOSADebuggerGetStatementRanges = 0x0905
kOSADebuggerGetBreakpoint = 0x0910
kOSADebuggerSetBreakpoint = 0x0911
kOSADebuggerGetDefaultBreakpoint = 0x0912
kOSADebuggerGetCurrentCallFrame = 0x0906
kOSADebuggerGetCallFrameState = 0x0907
kOSADebuggerGetVariable = 0x0908
kOSADebuggerSetVariable = 0x0909
kOSADebuggerGetPreviousCallFrame = 0x090A
kOSADebuggerDisposeCallFrame = 0x090B
kOSADebuggerCountVariables = 0x090C
kOSASelectComponentSpecificStart = 0x1001
kOSAModePreventGetSource = 0x00000001
kOSAModeNeverInteract = kAENeverInteract
kOSAModeCanInteract = kAECanInteract
kOSAModeAlwaysInteract = kAEAlwaysInteract
kOSAModeDontReconnect = kAEDontReconnect
kOSAModeCantSwitchLayer = 0x00000040
kOSAModeDoRecord = 0x00001000
kOSAModeCompileIntoContext = 0x00000002
kOSAModeAugmentContext = 0x00000004
kOSAModeDisplayForHumans = 0x00000008
kOSAModeDontStoreParent = 0x00010000
kOSAModeDispatchToDirectObject = 0x00020000
kOSAModeDontGetDataForArguments = 0x00040000
kOSAScriptResourceType = kOSAGenericScriptingComponentSubtype
typeOSAGenericStorage = kOSAScriptResourceType
kOSAErrorNumber = keyErrorNumber
kOSAErrorMessage = keyErrorString
kOSAErrorBriefMessage = FOUR_CHAR_CODE('errb')
kOSAErrorApp = FOUR_CHAR_CODE('erap')
kOSAErrorPartialResult = FOUR_CHAR_CODE('ptlr')
kOSAErrorOffendingObject = FOUR_CHAR_CODE('erob')
kOSAErrorExpectedType = FOUR_CHAR_CODE('errt')
kOSAErrorRange = FOUR_CHAR_CODE('erng')
typeOSAErrorRange = FOUR_CHAR_CODE('erng')
keyOSASourceStart = FOUR_CHAR_CODE('srcs')
keyOSASourceEnd = FOUR_CHAR_CODE('srce')
kOSAUseStandardDispatch = kAEUseStandardDispatch
kOSANoDispatch = kAENoDispatch
kOSADontUsePhac = 0x0001
eNotStarted = 0
eRunnable = 1
eRunning = 2
eStopped = 3
eTerminated = 4
eStepOver = 0
eStepIn = 1
eStepOut = 2
eRun = 3
eLocal = 0
eGlobal = 1
eProperties = 2
keyProgramState = FOUR_CHAR_CODE('dsps')
typeStatementRange = FOUR_CHAR_CODE('srng')
keyProcedureName = FOUR_CHAR_CODE('dfnm')
keyStatementRange = FOUR_CHAR_CODE('dfsr')
keyLocalsNames = FOUR_CHAR_CODE('dfln')
keyGlobalsNames = FOUR_CHAR_CODE('dfgn')
keyParamsNames = FOUR_CHAR_CODE('dfpn')
|
0xffea/keystone | refs/heads/master | setup.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
from keystone.openstack.common import setup
requires = setup.parse_requirements()
depend_links = setup.parse_dependency_links()
project = 'keystone'
setuptools.setup(
name=project,
version=setup.get_version(project, '2013.1'),
description="Authentication service for OpenStack",
license='Apache License (2.0)',
author='OpenStack, LLC.',
author_email='openstack@lists.launchpad.net',
url='http://www.openstack.org',
cmdclass=setup.get_cmdclass(),
packages=setuptools.find_packages(exclude=['test', 'bin']),
include_package_data=True,
scripts=['bin/keystone-all', 'bin/keystone-manage'],
zip_safe=False,
install_requires=requires,
dependency_links=depend_links,
test_suite='nose.collector',
classifiers=[
'Environment :: OpenStack',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
)
|
voidspace/juju | refs/heads/master | scripts/jujuman.py | 27 | """Functions for generating the manpage using the juju command."""
import subprocess
import textwrap
import time
class JujuMan(object):
def __init__(self):
self.version = self._version()
def get_filename(self, options):
"""Provides name of manpage"""
return 'juju.1'
def run_juju(self, *args):
cmd = ['juju'] + list(args)
return subprocess.check_output(cmd).strip()
def _version(self):
juju_version = self.run_juju('version')
return juju_version.split('-')[0]
def commands(self):
commands = self.run_juju('help', 'commands')
result = []
for line in commands.split('\n'):
name, short_help = line.split(' ', 1)
if 'alias for' in short_help:
continue
result.append((name, short_help.strip()))
return result
def write_documentation(self, options, outfile):
"""Assembles a man page"""
t = time.time()
tt = time.gmtime(t)
params = {
"cmd": "juju",
"datestamp": time.strftime("%Y-%m-%d",tt),
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S +0000",tt),
"version": self.version,
}
outfile.write(man_preamble % params)
outfile.write(man_escape(man_head % params))
outfile.write(man_escape(self.getcommand_list(params)))
outfile.write("".join(environment_variables()))
outfile.write(man_escape(man_foot % params))
def getcommand_list(self, params):
"""Builds summary help for command names in manpage format"""
output = '.SH "COMMAND OVERVIEW"\n'
for cmd_name, short_help in self.commands():
tmp = '.TP\n.B "%s %s"\n%s\n' % (params['cmd'], cmd_name, short_help)
output = output + tmp
return output
ENVIRONMENT = (
('JUJU_ENV', textwrap.dedent("""\
Provides a way for the shell environment to specify the current Juju
environment to use. If the environment is specified explicitly using
-e ENV, this takes precedence.
""")),
('JUJU_HOME', textwrap.dedent("""\
Overrides the default Juju configuration directory of ~/.juju.
""")),
('AWS_ACCESS_KEY_ID', textwrap.dedent("""\
The access-key for your AWS account.
""")),
('AWS_SECRET_ACCESS_KEY', textwrap.dedent("""\
The secret-key for your AWS account.
""")),
('OS_USERNAME', textwrap.dedent("""\
Your openstack username.
""")),
('OS_PASSWORD', textwrap.dedent("""\
Your openstack password.
""")),
('OS_TENANT_NAME', textwrap.dedent("""\
Your openstack tenant name.
""")),
('OS_REGION_NAME', textwrap.dedent("""\
Your openstack region name.
""")),
)
def man_escape(string):
"""Escapes strings for man page compatibility"""
result = string.replace("\\","\\\\")
result = result.replace("`","\\'")
result = result.replace("'","\\*(Aq")
result = result.replace("-","\\-")
return result
def environment_variables():
yield ".SH \"ENVIRONMENT\"\n"
for k, desc in ENVIRONMENT:
yield ".TP\n"
yield ".I \"%s\"\n" % k
yield man_escape(desc) + "\n"
man_preamble = """\
.\\\"Man page for Juju (%(cmd)s)
.\\\"
.\\\" Large parts of this file are autogenerated from the output of
.\\\" \"%(cmd)s help commands\"
.\\\" \"%(cmd)s help <cmd>\"
.\\\"
.\\\" Generation time: %(timestamp)s
.\\\"
.ie \\n(.g .ds Aq \\(aq
.el .ds Aq '
"""
man_head = """\
.TH %(cmd)s 1 "%(datestamp)s" "%(version)s" "Juju"
.SH "NAME"
%(cmd)s - Juju -- devops distilled
.SH "SYNOPSIS"
.B "%(cmd)s"
.I "command"
[
.I "command_options"
]
.br
.B "%(cmd)s"
.B "help"
.br
.B "%(cmd)s"
.B "help"
.I "command"
.SH "DESCRIPTION"
Juju provides easy, intelligent service orchestration on top of environments
such as OpenStack, Amazon AWS, or bare metal.
"""
man_foot = """\
.SH "FILES"
.TP
.I "~/.juju/environments.yaml"
This is the Juju config file, which you can use to specify multiple
environments in which to deploy.
A config file can be created using
.B juju init
which you can then edit to provide the secret keys, or use environment
variables to provide the secret values.
.SH "SEE ALSO"
.UR https://juju.ubuntu.com/
.BR https://juju.ubuntu.com/
"""
|
shishaochen/TensorFlow-0.8-Win | refs/heads/master | tensorflow/python/kernel_tests/sparse_xent_op_test.py | 7 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseSoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import sparse_ops
class SparseXentTest(tf.test.TestCase):
def _npXent(self, features, labels):
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
e = np.exp(features -
np.reshape(np.amax(features, axis=class_dim), [batch_size, 1]))
probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
labels_mat = np.zeros_like(probs).astype(probs.dtype)
labels_mat[np.arange(batch_size), labels] = 1.0
bp = (probs - labels_mat)
l = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1)
return l, bp
def _testXent(self, np_features, np_labels, use_gpu=False):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.test_session(use_gpu=use_gpu) as sess:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
np_features, np_labels)
backprop = loss.op.outputs[1]
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def _testAll(self, features, labels):
self._testXent(features, labels, use_gpu=False)
self._testXent(features, labels, use_gpu=True)
def _testSingleClass(self, use_gpu=False):
for label_dtype in np.int32, np.int64:
with self.test_session(use_gpu=use_gpu) as sess:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(np.float32),
np.array([0, 0, 0]).astype(label_dtype))
backprop = loss.op.outputs[1]
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)
def testSingleClass(self):
self._testSingleClass(use_gpu=True)
self._testSingleClass(use_gpu=False)
def testRankTooLarge(self):
np_features = np.array(
[[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]).astype(np.float32)
np_labels = np.array([1, 2])
self.assertRaisesRegexp(
ValueError, "must have rank 2",
tf.nn.sparse_softmax_cross_entropy_with_logits, np_features, np_labels)
def testNpXent(self):
# We create 2 batches of logits for testing.
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
# batch 1 has a bit of difference: 1, 2, 3, 4, with target 0.
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [3, 0]
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
# With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
# The loss for this batch is -log(0.25) = 1.386
#
# For batch 1, we have:
# exp(0) = 1
# exp(1) = 2.718
# exp(2) = 7.389
# exp(3) = 20.085
# SUM = 31.192
# So we have as probabilities:
# exp(0) / SUM = 0.032
# exp(1) / SUM = 0.087
# exp(2) / SUM = 0.237
# exp(3) / SUM = 0.644
# With a hard 1, the backprop is [0.032 - 1.0 = -0.968, 0.087, 0.237, 0.644]
# The loss for this batch is [1.0 * -log(0.25), 1.0 * -log(0.032)]
# = [1.3862, 3.4420]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(np.array([[0.25, 0.25, 0.25, -0.75],
[-0.968, 0.087, 0.237, 0.6439]]),
np_backprop,
rtol=1.e-3, atol=1.e-3)
self.assertAllClose(np.array([1.3862, 3.4420]), np_loss,
rtol=1.e-3, atol=1.e-3)
def testShapeMismatch(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.nn.sparse_softmax_cross_entropy_with_logits(
[[0., 1.], [2., 3.]], [[0, 2]])
def testNotMatrix(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.nn.sparse_softmax_cross_entropy_with_logits(
[0., 1., 2., 3.], [0, 2])
def testFloat(self):
for label_dtype in np.int32, np.int64:
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([3, 0]).astype(label_dtype))
def testDouble(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([0, 3]).astype(label_dtype),
use_gpu=False)
def testHalf(self):
for label_dtype in np.int32, np.int64:
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([3, 0]).astype(label_dtype))
def testGradient(self):
with self.test_session():
l = tf.constant([3, 0, 1], name="l")
f = tf.constant([0.1, 0.2, 0.3, 0.4,
0.1, 0.4, 0.9, 1.6,
0.1, 0.8, 2.7, 6.4], shape=[3, 4],
dtype=tf.float64, name="f")
x = tf.nn.sparse_softmax_cross_entropy_with_logits(f, l, name="xent")
err = tf.test.compute_gradient_error(f, [3, 4], x, [3])
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
labels = tf.identity(labels)
logits = tf.identity(logits)
with tf.device("/cpu:0"): # Sparse-to-dense must be on CPU
batch_size = tf.shape(logits)[0]
num_entries = tf.shape(logits)[1]
length = batch_size * num_entries
labels += num_entries * tf.range(batch_size)
target = sparse_ops.sparse_to_dense(
labels, tf.pack([length]), 1.0, 0.0)
target = tf.reshape(target, tf.pack([-1, num_entries]))
crossent = tf.nn.softmax_cross_entropy_with_logits(
logits, target, name="SequenceLoss/CrossEntropy")
crossent_sum = tf.reduce_sum(crossent)
grads = tf.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def _sparse_vs_dense_xent_benchmark_sparse(labels, logits):
# Using sparse_softmax_cross_entropy_with_logits
labels = labels.astype(np.int64)
labels = tf.identity(labels)
logits = tf.identity(logits)
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name="SequenceLoss/CrossEntropy")
crossent_sum = tf.reduce_sum(crossent)
grads = tf.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu):
config = tf.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
labels = np.random.randint(num_entries, size=batch_size).astype(np.int32)
logits = np.random.randn(batch_size, num_entries).astype(np.float32)
def _timer(sess, ops):
# Warm in
for _ in range(20):
sess.run(ops)
# Timing run
start = time.time()
for _ in range(20):
sess.run(ops)
end = time.time()
return (end - start)/20.0 # Average runtime per iteration
# Using sparse_to_dense and softmax_cross_entropy_with_logits
with tf.Session(config=config) as sess:
if not use_gpu:
with tf.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
delta_dense = _timer(sess, ops)
# Using sparse_softmax_cross_entropy_with_logits
with tf.Session(config=config) as sess:
if not use_gpu:
with tf.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
delta_sparse = _timer(sess, ops)
print(
"%d \t %d \t %s \t %f \t %f \t %f"
% (batch_size, num_entries, use_gpu, delta_dense, delta_sparse,
delta_sparse/delta_dense))
def main(_):
print("Sparse Xent vs. SparseToDense + Xent")
print("batch \t depth \t gpu \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for use_gpu in (False, True):
for batch_size in (32, 64, 128):
for num_entries in (100, 1000, 10000):
sparse_vs_dense_xent_benchmark(
batch_size, num_entries, use_gpu)
sparse_vs_dense_xent_benchmark(
32, 100000, use_gpu)
sparse_vs_dense_xent_benchmark(
8, 1000000, use_gpu)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
tf.app.run()
else:
tf.test.main()
|
lucasvo/django-oembepdlugin | refs/heads/master | bootstrap.py | 21 | ##############################################################################
#
# Copyright (c) 2006 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
$Id$
"""
import os, shutil, sys, tempfile, urllib2
tmpeggs = tempfile.mkdtemp()
is_jython = sys.platform.startswith('java')
try:
import pkg_resources
except ImportError:
ez = {}
exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
import pkg_resources
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
def quote (c):
return c
cmd = 'from setuptools.command.easy_install import main; main()'
ws = pkg_resources.working_set
if len(sys.argv) > 2 and sys.argv[1] == '--version':
VERSION = '==%s' % sys.argv[2]
args = sys.argv[3:] + ['bootstrap']
else:
VERSION = ''
args = sys.argv[1:] + ['bootstrap']
if is_jython:
import subprocess
assert subprocess.Popen([sys.executable] + ['-c', quote(cmd), '-mqNxd',
quote(tmpeggs), 'zc.buildout' + VERSION],
env=dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse('setuptools')).location
),
).wait() == 0
else:
assert os.spawnle(
os.P_WAIT, sys.executable, quote (sys.executable),
'-c', quote (cmd), '-mqNxd', quote (tmpeggs), 'zc.buildout' + VERSION,
dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse('setuptools')).location
),
) == 0
ws.add_entry(tmpeggs)
ws.require('zc.buildout' + VERSION)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
|
Code4Maine/volunteer-coordination | refs/heads/master | volunteerhub/urls.py | 1 | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
(r'^admin/', include(admin.site.urls)),
(r'^accounts/', include('allauth.urls')),
(r'^ckeditor/', include('ckeditor.urls')),
(r'^', include('volunteers.urls')),
)
|
jgrocha/QGIS | refs/heads/master | python/plugins/processing/algs/gdal/rasterize_over.py | 14 | # -*- coding: utf-8 -*-
"""
***************************************************************************
rasterize_over.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingOutputRasterLayer)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class rasterize_over(GdalAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
INPUT_RASTER = 'INPUT_RASTER'
ADD = 'ADD'
EXTRA = 'EXTRA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input vector layer')))
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT_RASTER,
self.tr('Input raster layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Field to use for burn in value'),
None,
self.INPUT,
QgsProcessingParameterField.Numeric,
optional=False))
params = [
QgsProcessingParameterBoolean(self.ADD,
self.tr('Add burn in values to existing raster values'),
defaultValue=False,
),
QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
]
for p in params:
p.setFlags(p.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(p)
self.addOutput(QgsProcessingOutputRasterLayer(self.OUTPUT,
self.tr('Rasterized')))
def name(self):
return 'rasterize_over'
def displayName(self):
return self.tr('Rasterize (overwrite with attribute)')
def group(self):
return self.tr('Vector conversion')
def groupId(self):
return 'vectorconversion'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'rasterize.png'))
def commandName(self):
return 'gdal_rasterize'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT_RASTER, context)
if inLayer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_RASTER))
fieldName = self.parameterAsString(parameters, self.FIELD, context)
self.setOutputValue(self.OUTPUT, inLayer.source())
arguments = [
'-l',
layerName,
'-a',
fieldName
]
if self.parameterAsBool(parameters, self.ADD, context):
arguments.append('-add')
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
arguments.append(ogrLayer)
arguments.append(inLayer.source())
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
|
bhargav2408/python-for-android | refs/heads/master | python3-alpha/python3-src/Tools/unicode/comparecodecs.py | 45 | #!/usr/bin/env python3
""" Compare the output of two codecs.
(c) Copyright 2005, Marc-Andre Lemburg (mal@lemburg.com).
Licensed to PSF under a Contributor Agreement.
"""
import sys
def compare_codecs(encoding1, encoding2):
print('Comparing encoding/decoding of %r and %r' % (encoding1, encoding2))
mismatch = 0
# Check encoding
for i in range(sys.maxunicode):
u = chr(i)
try:
c1 = u.encode(encoding1)
except UnicodeError as reason:
c1 = '<undefined>'
try:
c2 = u.encode(encoding2)
except UnicodeError as reason:
c2 = '<undefined>'
if c1 != c2:
print(' * encoding mismatch for 0x%04X: %-14r != %r' % \
(i, c1, c2))
mismatch += 1
# Check decoding
for i in range(256):
c = chr(i)
try:
u1 = c.decode(encoding1)
except UnicodeError:
u1 = '<undefined>'
try:
u2 = c.decode(encoding2)
except UnicodeError:
u2 = '<undefined>'
if u1 != u2:
print(' * decoding mismatch for 0x%04X: %-14r != %r' % \
(i, u1, u2))
mismatch += 1
if mismatch:
print()
print('Found %i mismatches' % mismatch)
else:
print('-> Codecs are identical.')
if __name__ == '__main__':
compare_codecs(sys.argv[1], sys.argv[2])
|
openstack/senlin | refs/heads/master | senlin/objects/requests/webhooks.py | 2 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.objects import base
from senlin.objects import fields
@base.SenlinObjectRegistry.register
class WebhookTriggerRequestParamsInBody(base.SenlinObject):
fields = {
'identity': fields.StringField(),
'body': fields.JsonField(nullable=True, default={})
}
@base.SenlinObjectRegistry.register
class WebhookTriggerRequest(base.SenlinObject):
fields = {
'identity': fields.StringField(),
'body': fields.ObjectField('WebhookTriggerRequestBody')
}
@base.SenlinObjectRegistry.register
class WebhookTriggerRequestBody(base.SenlinObject):
fields = {
'params': fields.JsonField(nullable=True, default={})
}
|
luogangyi/Ceilometer-oVirt | refs/heads/stable/juno | build/lib/ceilometer/neutron_client.py | 4 | # Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from neutronclient.common import exceptions
from neutronclient.v2_0 import client as clientv20
from oslo.config import cfg
from ceilometer.openstack.common import log
service_types_opts = [
cfg.StrOpt('neutron',
default='network',
help='Neutron service type.'),
]
cfg.CONF.register_opts(service_types_opts, group='service_types')
cfg.CONF.import_opt('http_timeout', 'ceilometer.service')
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
LOG = log.getLogger(__name__)
def logged(func):
@functools.wraps(func)
def with_logging(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions.NeutronClientException as e:
# handles 404's when services are disabled in neutron
LOG.warn(e)
return []
except Exception as e:
LOG.exception(e)
raise
return with_logging
class Client(object):
"""A client which gets information via python-neutronclient."""
def __init__(self):
conf = cfg.CONF.service_credentials
params = {
'insecure': conf.insecure,
'ca_cert': conf.os_cacert,
'username': conf.os_username,
'password': conf.os_password,
'auth_url': conf.os_auth_url,
'region_name': conf.os_region_name,
'endpoint_type': conf.os_endpoint_type,
'timeout': cfg.CONF.http_timeout,
'service_type': cfg.CONF.service_types.neutron,
}
if conf.os_tenant_id:
params['tenant_id'] = conf.os_tenant_id
else:
params['tenant_name'] = conf.os_tenant_name
self.client = clientv20.Client(**params)
@logged
def network_get_all(self):
"""Returns all networks."""
resp = self.client.list_networks()
return resp.get('networks')
@logged
def port_get_all(self):
resp = self.client.list_ports()
return resp.get('ports')
@logged
def vip_get_all(self):
resp = self.client.list_vips()
return resp.get('vips')
@logged
def pool_get_all(self):
resp = self.client.list_pools()
return resp.get('pools')
@logged
def member_get_all(self):
resp = self.client.list_members()
return resp.get('members')
@logged
def health_monitor_get_all(self):
resp = self.client.list_health_monitors()
return resp.get('health_monitors')
@logged
def pool_stats(self, pool):
return self.client.retrieve_pool_stats(pool)
@logged
def vpn_get_all(self):
resp = self.client.list_vpnservices()
return resp.get('vpnservices')
@logged
def ipsec_site_connections_get_all(self):
resp = self.client.list_ipsec_site_connections()
return resp.get('ipsec_site_connections')
@logged
def firewall_get_all(self):
resp = self.client.list_firewalls()
return resp.get('firewalls')
@logged
def fw_policy_get_all(self):
resp = self.client.list_firewall_policies()
return resp.get('firewall_policies')
|
CERT-Solucom/certitude | refs/heads/master | components/scanner/flatevaluators/process.py | 2 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
CERTitude: the seeker of IOC
Copyright (c) 2016 CERT-W
Contact: cert@wavestone.com
Contributors: @iansus, @nervous, @fschwebel
CERTitude is under licence GPL-2.0:
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
import template
class Evaluator(template.EvaluatorInterface):
evalList = ['pid', 'parentpid', 'UserSID', 'Username', 'name', 'path', 'HandleList/Handle/Type', 'HandleList/Handle/Name']
def __init__(self, logger, ioc, remoteCommand, keepFiles, confidential, dirname):
template.EvaluatorInterface.__init__(self, logger, ioc, remoteCommand, keepFiles, confidential, dirname)
self.setEvaluatorParams(evalList=Evaluator.evalList, name='process', command='collector getprocess') |
cbeck88/fifengine | refs/heads/master | demos/rocket_demo/rocket_demo.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
import sys, os, re
fife_path = os.path.join('..','..','engine','python')
if os.path.isdir(fife_path) and fife_path not in sys.path:
sys.path.insert(0,fife_path)
from fife import fife
import rocket
print "Using the FIFE python module found here: ", os.path.dirname(fife.__file__)
from fife.extensions.librocket.rocketbasicapplication import RocketApplicationBase
from fife.extensions.librocket.rocketbasicapplication import RocketEventListener
from fife.extensions.fife_timer import init;
class RocketScriptMediator(object):
def __init__(self):
self.docs_to_close = []
def initialize(self, application):
self.application = application
def closeDocument(self, doc):
self.docs_to_close.append(doc)
def closeDocuments(self):
for doc in self.docs_to_close:
doc.Hide()
self.application.rocketcontext.UnloadDocument
self.docs_to_close = []
rocketscriptmediator = RocketScriptMediator()
class RocketDemoEventListener(RocketEventListener):
def __init__(self, app):
super(RocketDemoEventListener, self).__init__(app)
self.debuggeractive = True
class RocketDemo(RocketApplicationBase):
def __init__(self):
super(RocketDemo, self).__init__()
rocketscriptmediator.initialize(self)
self.guimanager.showDebugger()
self._loadFonts()
self._loadDocuments()
def _loadFonts(self):
font_dir = 'fonts/'
fonts = [
'FreeSans.ttf'
]
for font in fonts:
self.guimanager.loadFont(font_dir + font)
def _loadDocuments(self):
doc_dir = 'gui/RML/'
docs = [
'buttons.rml',
]
for doc in docs:
self._documents = [self.rocketcontext.LoadDocument(doc_dir + doc) for doc in docs]
def _pump(self):
"""
Overloaded this function to check for quit message. Quit if message
is received.
"""
rocketscriptmediator.closeDocuments()
if self._listener.quitrequested:
self.quit()
def run(self):
#use the id of a document to locate it. This is defined in the body tag of the document using the id attribute.
self.rocketcontext.documents['buttons_demo'].Show()
super(RocketDemo, self).run()
def quit(self):
for doc in self._documents:
self.rocketcontext.UnloadDocument(doc)
self._documents = []
#unload documents that may have been loaded using scripts in RML.
self.rocketcontext.UnloadAllDocuments()
super(RocketDemo, self).quit()
def createListener(self):
self._listener = RocketDemoEventListener(self)
return self._listener
if __name__ == '__main__':
app = RocketDemo()
app.run()
|
avadacatavra/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/third_party/pluggy/pluggy/__init__.py | 41 | import inspect
import warnings
from .callers import _multicall, HookCallError, _Result, _legacymulticall
__version__ = '0.5.3.dev'
__all__ = ["PluginManager", "PluginValidationError", "HookCallError",
"HookspecMarker", "HookimplMarker"]
class PluginValidationError(Exception):
""" plugin failed validation. """
class HookspecMarker(object):
""" Decorator helper class for marking functions as hook specifications.
You can instantiate it with a project_name to get a decorator.
Calling PluginManager.add_hookspecs later will discover all marked functions
if the PluginManager uses the same project_name.
"""
def __init__(self, project_name):
self.project_name = project_name
def __call__(self, function=None, firstresult=False, historic=False):
""" if passed a function, directly sets attributes on the function
which will make it discoverable to add_hookspecs(). If passed no
function, returns a decorator which can be applied to a function
later using the attributes supplied.
If firstresult is True the 1:N hook call (N being the number of registered
hook implementation functions) will stop at I<=N when the I'th function
returns a non-None result.
If historic is True calls to a hook will be memorized and replayed
on later registered plugins.
"""
def setattr_hookspec_opts(func):
if historic and firstresult:
raise ValueError("cannot have a historic firstresult hook")
setattr(func, self.project_name + "_spec",
dict(firstresult=firstresult, historic=historic))
return func
if function is not None:
return setattr_hookspec_opts(function)
else:
return setattr_hookspec_opts
class HookimplMarker(object):
""" Decorator helper class for marking functions as hook implementations.
You can instantiate with a project_name to get a decorator.
Calling PluginManager.register later will discover all marked functions
if the PluginManager uses the same project_name.
"""
def __init__(self, project_name):
self.project_name = project_name
def __call__(self, function=None, hookwrapper=False, optionalhook=False,
tryfirst=False, trylast=False):
""" if passed a function, directly sets attributes on the function
which will make it discoverable to register(). If passed no function,
returns a decorator which can be applied to a function later using
the attributes supplied.
If optionalhook is True a missing matching hook specification will not result
in an error (by default it is an error if no matching spec is found).
If tryfirst is True this hook implementation will run as early as possible
in the chain of N hook implementations for a specfication.
If trylast is True this hook implementation will run as late as possible
in the chain of N hook implementations.
If hookwrapper is True the hook implementations needs to execute exactly
one "yield". The code before the yield is run early before any non-hookwrapper
function is run. The code after the yield is run after all non-hookwrapper
function have run. The yield receives a ``_Result`` object representing
the exception or result outcome of the inner calls (including other hookwrapper
calls).
"""
def setattr_hookimpl_opts(func):
setattr(func, self.project_name + "_impl",
dict(hookwrapper=hookwrapper, optionalhook=optionalhook,
tryfirst=tryfirst, trylast=trylast))
return func
if function is None:
return setattr_hookimpl_opts
else:
return setattr_hookimpl_opts(function)
def normalize_hookimpl_opts(opts):
opts.setdefault("tryfirst", False)
opts.setdefault("trylast", False)
opts.setdefault("hookwrapper", False)
opts.setdefault("optionalhook", False)
class _TagTracer(object):
def __init__(self):
self._tag2proc = {}
self.writer = None
self.indent = 0
def get(self, name):
return _TagTracerSub(self, (name,))
def format_message(self, tags, args):
if isinstance(args[-1], dict):
extra = args[-1]
args = args[:-1]
else:
extra = {}
content = " ".join(map(str, args))
indent = " " * self.indent
lines = [
"%s%s [%s]\n" % (indent, content, ":".join(tags))
]
for name, value in extra.items():
lines.append("%s %s: %s\n" % (indent, name, value))
return lines
def processmessage(self, tags, args):
if self.writer is not None and args:
lines = self.format_message(tags, args)
self.writer(''.join(lines))
try:
self._tag2proc[tags](tags, args)
except KeyError:
pass
def setwriter(self, writer):
self.writer = writer
def setprocessor(self, tags, processor):
if isinstance(tags, str):
tags = tuple(tags.split(":"))
else:
assert isinstance(tags, tuple)
self._tag2proc[tags] = processor
class _TagTracerSub(object):
def __init__(self, root, tags):
self.root = root
self.tags = tags
def __call__(self, *args):
self.root.processmessage(self.tags, args)
def setmyprocessor(self, processor):
self.root.setprocessor(self.tags, processor)
def get(self, name):
return self.__class__(self.root, self.tags + (name,))
class _TracedHookExecution(object):
def __init__(self, pluginmanager, before, after):
self.pluginmanager = pluginmanager
self.before = before
self.after = after
self.oldcall = pluginmanager._inner_hookexec
assert not isinstance(self.oldcall, _TracedHookExecution)
self.pluginmanager._inner_hookexec = self
def __call__(self, hook, hook_impls, kwargs):
self.before(hook.name, hook_impls, kwargs)
outcome = _Result.from_call(lambda: self.oldcall(hook, hook_impls, kwargs))
self.after(outcome, hook.name, hook_impls, kwargs)
return outcome.get_result()
def undo(self):
self.pluginmanager._inner_hookexec = self.oldcall
class PluginManager(object):
""" Core Pluginmanager class which manages registration
of plugin objects and 1:N hook calling.
You can register new hooks by calling ``add_hookspec(module_or_class)``.
You can register plugin objects (which contain hooks) by calling
``register(plugin)``. The Pluginmanager is initialized with a
prefix that is searched for in the names of the dict of registered
plugin objects. An optional excludefunc allows to blacklist names which
are not considered as hooks despite a matching prefix.
For debugging purposes you can call ``enable_tracing()``
which will subsequently send debug information to the trace helper.
"""
def __init__(self, project_name, implprefix=None):
""" if implprefix is given implementation functions
will be recognized if their name matches the implprefix. """
self.project_name = project_name
self._name2plugin = {}
self._plugin2hookcallers = {}
self._plugin_distinfo = []
self.trace = _TagTracer().get("pluginmanage")
self.hook = _HookRelay(self.trace.root.get("hook"))
self._implprefix = implprefix
self._inner_hookexec = lambda hook, methods, kwargs: \
hook.multicall(
methods, kwargs,
firstresult=hook.spec_opts.get('firstresult'),
)
def _hookexec(self, hook, methods, kwargs):
# called from all hookcaller instances.
# enable_tracing will set its own wrapping function at self._inner_hookexec
return self._inner_hookexec(hook, methods, kwargs)
def register(self, plugin, name=None):
""" Register a plugin and return its canonical name or None if the name
is blocked from registering. Raise a ValueError if the plugin is already
registered. """
plugin_name = name or self.get_canonical_name(plugin)
if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers:
if self._name2plugin.get(plugin_name, -1) is None:
return # blocked plugin, return None to indicate no registration
raise ValueError("Plugin already registered: %s=%s\n%s" %
(plugin_name, plugin, self._name2plugin))
# XXX if an error happens we should make sure no state has been
# changed at point of return
self._name2plugin[plugin_name] = plugin
# register matching hook implementations of the plugin
self._plugin2hookcallers[plugin] = hookcallers = []
for name in dir(plugin):
hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
if hookimpl_opts is not None:
normalize_hookimpl_opts(hookimpl_opts)
method = getattr(plugin, name)
hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
hook = getattr(self.hook, name, None)
if hook is None:
hook = _HookCaller(name, self._hookexec)
setattr(self.hook, name, hook)
elif hook.has_spec():
self._verify_hook(hook, hookimpl)
hook._maybe_apply_history(hookimpl)
hook._add_hookimpl(hookimpl)
hookcallers.append(hook)
return plugin_name
def parse_hookimpl_opts(self, plugin, name):
method = getattr(plugin, name)
if not inspect.isroutine(method):
return
try:
res = getattr(method, self.project_name + "_impl", None)
except Exception:
res = {}
if res is not None and not isinstance(res, dict):
# false positive
res = None
elif res is None and self._implprefix and name.startswith(self._implprefix):
res = {}
return res
def unregister(self, plugin=None, name=None):
""" unregister a plugin object and all its contained hook implementations
from internal data structures. """
if name is None:
assert plugin is not None, "one of name or plugin needs to be specified"
name = self.get_name(plugin)
if plugin is None:
plugin = self.get_plugin(name)
# if self._name2plugin[name] == None registration was blocked: ignore
if self._name2plugin.get(name):
del self._name2plugin[name]
for hookcaller in self._plugin2hookcallers.pop(plugin, []):
hookcaller._remove_plugin(plugin)
return plugin
def set_blocked(self, name):
""" block registrations of the given name, unregister if already registered. """
self.unregister(name=name)
self._name2plugin[name] = None
def is_blocked(self, name):
""" return True if the name blogs registering plugins of that name. """
return name in self._name2plugin and self._name2plugin[name] is None
def add_hookspecs(self, module_or_class):
""" add new hook specifications defined in the given module_or_class.
Functions are recognized if they have been decorated accordingly. """
names = []
for name in dir(module_or_class):
spec_opts = self.parse_hookspec_opts(module_or_class, name)
if spec_opts is not None:
hc = getattr(self.hook, name, None)
if hc is None:
hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
setattr(self.hook, name, hc)
else:
# plugins registered this hook without knowing the spec
hc.set_specification(module_or_class, spec_opts)
for hookfunction in (hc._wrappers + hc._nonwrappers):
self._verify_hook(hc, hookfunction)
names.append(name)
if not names:
raise ValueError("did not find any %r hooks in %r" %
(self.project_name, module_or_class))
def parse_hookspec_opts(self, module_or_class, name):
method = getattr(module_or_class, name)
return getattr(method, self.project_name + "_spec", None)
def get_plugins(self):
""" return the set of registered plugins. """
return set(self._plugin2hookcallers)
def is_registered(self, plugin):
""" Return True if the plugin is already registered. """
return plugin in self._plugin2hookcallers
def get_canonical_name(self, plugin):
""" Return canonical name for a plugin object. Note that a plugin
may be registered under a different name which was specified
by the caller of register(plugin, name). To obtain the name
of an registered plugin use ``get_name(plugin)`` instead."""
return getattr(plugin, "__name__", None) or str(id(plugin))
def get_plugin(self, name):
""" Return a plugin or None for the given name. """
return self._name2plugin.get(name)
def has_plugin(self, name):
""" Return True if a plugin with the given name is registered. """
return self.get_plugin(name) is not None
def get_name(self, plugin):
""" Return name for registered plugin or None if not registered. """
for name, val in self._name2plugin.items():
if plugin == val:
return name
def _verify_hook(self, hook, hookimpl):
if hook.is_historic() and hookimpl.hookwrapper:
raise PluginValidationError(
"Plugin %r\nhook %r\nhistoric incompatible to hookwrapper" %
(hookimpl.plugin_name, hook.name))
# positional arg checking
notinspec = set(hookimpl.argnames) - set(hook.argnames)
if notinspec:
raise PluginValidationError(
"Plugin %r for hook %r\nhookimpl definition: %s\n"
"Argument(s) %s are declared in the hookimpl but "
"can not be found in the hookspec" %
(hookimpl.plugin_name, hook.name,
_formatdef(hookimpl.function), notinspec)
)
def check_pending(self):
""" Verify that all hooks which have not been verified against
a hook specification are optional, otherwise raise PluginValidationError"""
for name in self.hook.__dict__:
if name[0] != "_":
hook = getattr(self.hook, name)
if not hook.has_spec():
for hookimpl in (hook._wrappers + hook._nonwrappers):
if not hookimpl.optionalhook:
raise PluginValidationError(
"unknown hook %r in plugin %r" %
(name, hookimpl.plugin))
def load_setuptools_entrypoints(self, entrypoint_name):
""" Load modules from querying the specified setuptools entrypoint name.
Return the number of loaded plugins. """
from pkg_resources import (iter_entry_points, DistributionNotFound,
VersionConflict)
for ep in iter_entry_points(entrypoint_name):
# is the plugin registered or blocked?
if self.get_plugin(ep.name) or self.is_blocked(ep.name):
continue
try:
plugin = ep.load()
except DistributionNotFound:
continue
except VersionConflict as e:
raise PluginValidationError(
"Plugin %r could not be loaded: %s!" % (ep.name, e))
self.register(plugin, name=ep.name)
self._plugin_distinfo.append((plugin, ep.dist))
return len(self._plugin_distinfo)
def list_plugin_distinfo(self):
""" return list of distinfo/plugin tuples for all setuptools registered
plugins. """
return list(self._plugin_distinfo)
def list_name_plugin(self):
""" return list of name/plugin pairs. """
return list(self._name2plugin.items())
def get_hookcallers(self, plugin):
""" get all hook callers for the specified plugin. """
return self._plugin2hookcallers.get(plugin)
def add_hookcall_monitoring(self, before, after):
""" add before/after tracing functions for all hooks
and return an undo function which, when called,
will remove the added tracers.
``before(hook_name, hook_impls, kwargs)`` will be called ahead
of all hook calls and receive a hookcaller instance, a list
of HookImpl instances and the keyword arguments for the hook call.
``after(outcome, hook_name, hook_impls, kwargs)`` receives the
same arguments as ``before`` but also a :py:class:`_Result`` object
which represents the result of the overall hook call.
"""
return _TracedHookExecution(self, before, after).undo
def enable_tracing(self):
""" enable tracing of hook calls and return an undo function. """
hooktrace = self.hook._trace
def before(hook_name, methods, kwargs):
hooktrace.root.indent += 1
hooktrace(hook_name, kwargs)
def after(outcome, hook_name, methods, kwargs):
if outcome.excinfo is None:
hooktrace("finish", hook_name, "-->", outcome.get_result())
hooktrace.root.indent -= 1
return self.add_hookcall_monitoring(before, after)
def subset_hook_caller(self, name, remove_plugins):
""" Return a new _HookCaller instance for the named method
which manages calls to all registered plugins except the
ones from remove_plugins. """
orig = getattr(self.hook, name)
plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)]
if plugins_to_remove:
hc = _HookCaller(orig.name, orig._hookexec, orig._specmodule_or_class,
orig.spec_opts)
for hookimpl in (orig._wrappers + orig._nonwrappers):
plugin = hookimpl.plugin
if plugin not in plugins_to_remove:
hc._add_hookimpl(hookimpl)
# we also keep track of this hook caller so it
# gets properly removed on plugin unregistration
self._plugin2hookcallers.setdefault(plugin, []).append(hc)
return hc
return orig
def varnames(func):
"""Return tuple of positional and keywrord argument names for a function,
method, class or callable.
In case of a class, its ``__init__`` method is considered.
For methods the ``self`` parameter is not included.
"""
cache = getattr(func, "__dict__", {})
try:
return cache["_varnames"]
except KeyError:
pass
if inspect.isclass(func):
try:
func = func.__init__
except AttributeError:
return (), ()
elif not inspect.isroutine(func): # callable object?
try:
func = getattr(func, '__call__', func)
except Exception:
return ()
try: # func MUST be a function or method here or we won't parse any args
spec = _getargspec(func)
except TypeError:
return (), ()
args, defaults = tuple(spec.args), spec.defaults
if defaults:
index = -len(defaults)
args, defaults = args[:index], tuple(args[index:])
else:
defaults = ()
# strip any implicit instance arg
if args:
if inspect.ismethod(func) or (
'.' in getattr(func, '__qualname__', ()) and args[0] == 'self'
):
args = args[1:]
assert "self" not in args # best naming practises check?
try:
cache["_varnames"] = args, defaults
except TypeError:
pass
return args, defaults
class _HookRelay(object):
""" hook holder object for performing 1:N hook calls where N is the number
of registered plugins.
"""
def __init__(self, trace):
self._trace = trace
class _HookCaller(object):
def __init__(self, name, hook_execute, specmodule_or_class=None,
spec_opts=None):
self.name = name
self._wrappers = []
self._nonwrappers = []
self._hookexec = hook_execute
self._specmodule_or_class = None
self.argnames = None
self.kwargnames = None
self.multicall = _multicall
self.spec_opts = spec_opts or {}
if specmodule_or_class is not None:
self.set_specification(specmodule_or_class, spec_opts)
def has_spec(self):
return self._specmodule_or_class is not None
def set_specification(self, specmodule_or_class, spec_opts):
assert not self.has_spec()
self._specmodule_or_class = specmodule_or_class
specfunc = getattr(specmodule_or_class, self.name)
# get spec arg signature
argnames, self.kwargnames = varnames(specfunc)
self.argnames = ["__multicall__"] + list(argnames)
self.spec_opts.update(spec_opts)
if spec_opts.get("historic"):
self._call_history = []
def is_historic(self):
return hasattr(self, "_call_history")
def _remove_plugin(self, plugin):
def remove(wrappers):
for i, method in enumerate(wrappers):
if method.plugin == plugin:
del wrappers[i]
return True
if remove(self._wrappers) is None:
if remove(self._nonwrappers) is None:
raise ValueError("plugin %r not found" % (plugin,))
def _add_hookimpl(self, hookimpl):
"""A an implementation to the callback chain.
"""
if hookimpl.hookwrapper:
methods = self._wrappers
else:
methods = self._nonwrappers
if hookimpl.trylast:
methods.insert(0, hookimpl)
elif hookimpl.tryfirst:
methods.append(hookimpl)
else:
# find last non-tryfirst method
i = len(methods) - 1
while i >= 0 and methods[i].tryfirst:
i -= 1
methods.insert(i + 1, hookimpl)
if '__multicall__' in hookimpl.argnames:
warnings.warn(
"Support for __multicall__ is now deprecated and will be"
"removed in an upcoming release.",
DeprecationWarning
)
self.multicall = _legacymulticall
def __repr__(self):
return "<_HookCaller %r>" % (self.name,)
def __call__(self, *args, **kwargs):
if args:
raise TypeError("hook calling supports only keyword arguments")
assert not self.is_historic()
if self.argnames:
notincall = set(self.argnames) - set(['__multicall__']) - set(
kwargs.keys())
if notincall:
warnings.warn(
"Argument(s) {} which are declared in the hookspec "
"can not be found in this hook call"
.format(tuple(notincall)),
stacklevel=2,
)
return self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
def call_historic(self, proc=None, kwargs=None):
""" call the hook with given ``kwargs`` for all registered plugins and
for all plugins which will be registered afterwards.
If ``proc`` is not None it will be called for for each non-None result
obtained from a hook implementation.
"""
self._call_history.append((kwargs or {}, proc))
# historizing hooks don't return results
res = self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
for x in res or []:
proc(x)
def call_extra(self, methods, kwargs):
""" Call the hook with some additional temporarily participating
methods using the specified kwargs as call parameters. """
old = list(self._nonwrappers), list(self._wrappers)
for method in methods:
opts = dict(hookwrapper=False, trylast=False, tryfirst=False)
hookimpl = HookImpl(None, "<temp>", method, opts)
self._add_hookimpl(hookimpl)
try:
return self(**kwargs)
finally:
self._nonwrappers, self._wrappers = old
def _maybe_apply_history(self, method):
"""Apply call history to a new hookimpl if it is marked as historic.
"""
if self.is_historic():
for kwargs, proc in self._call_history:
res = self._hookexec(self, [method], kwargs)
if res and proc is not None:
proc(res[0])
class HookImpl(object):
def __init__(self, plugin, plugin_name, function, hook_impl_opts):
self.function = function
self.argnames, self.kwargnames = varnames(self.function)
self.plugin = plugin
self.opts = hook_impl_opts
self.plugin_name = plugin_name
self.__dict__.update(hook_impl_opts)
if hasattr(inspect, 'getfullargspec'):
def _getargspec(func):
return inspect.getfullargspec(func)
else:
def _getargspec(func):
return inspect.getargspec(func)
if hasattr(inspect, 'signature'):
def _formatdef(func):
return "%s%s" % (
func.__name__,
str(inspect.signature(func))
)
else:
def _formatdef(func):
return "%s%s" % (
func.__name__,
inspect.formatargspec(*inspect.getargspec(func))
)
|
pkuyym/Paddle | refs/heads/develop | python/paddle/trainer_config_helpers/tests/configs/test_fc.py | 7 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-5)
din = data_layer(name='data', size=100)
trans = trans_layer(input=din)
hidden = fc_layer(input=trans, size=100, bias_attr=False)
mask = data_layer(name='mask', size=100)
hidden_sel = selective_fc_layer(
input=din, select=mask, size=100, act=SigmoidActivation())
outputs(hidden, hidden_sel)
|
TansyArron/pants | refs/heads/master | src/python/pants/util/xml_parser.py | 34 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from xml.dom.minidom import parse
class XmlParser(object):
"""Parse .xml files."""
class XmlError(Exception):
"""Raise when parsing the xml results in error."""
@classmethod
def _parse(cls, xml_path):
"""Parse .xml file and return parsed text as a DOM Document.
:param string xml_path: File path of xml file to be parsed.
:returns xml.dom.minidom.Document parsed_xml: Document instance containing parsed xml.
"""
try:
parsed_xml = parse(xml_path)
# Minidom is a frontend for various parsers, only Exception covers ill-formed .xml for them all.
except Exception as e:
raise cls.XmlError('Error parsing xml file at {0}: {1}'.format(xml_path, e))
return parsed_xml
@classmethod
def from_file(cls, xml_path):
"""Parse .xml file and create a XmlParser object."""
try:
parsed_xml = cls._parse(xml_path)
except OSError as e:
raise XmlParser.XmlError("Problem reading xml file at {}: {}".format(xml_path, e))
return cls(xml_path, parsed_xml)
def __init__(self, xml_path, parsed_xml):
"""XmlParser object.
:param string xml_path: File path to original .xml file.
:param xml.dom.minidom.Document parsed_xml: Document instance containing parsed xml.
"""
self.xml_path = xml_path
self.parsed = parsed_xml
def get_attribute(self, element, attribute):
"""Retrieve the value of an attribute that is contained by the tag element.
:param string element: Name of an xml element.
:param string attribute: Name of the attribute that is to be returned.
:return: Desired attribute value.
:rtype: string
"""
parsed_element = self.parsed.getElementsByTagName(element)
if not parsed_element:
raise self.XmlError("There is no '{0}' element in "
"xml file at: {1}".format(element, self.xml_path))
parsed_attribute = parsed_element[0].getAttribute(attribute)
if not parsed_attribute:
raise self.XmlError("There is no '{0}' attribute in "
"xml at: {1}".format(attribute, self.xml_path))
return parsed_attribute
def get_optional_attribute(self, element, attribute):
"""Attempt to retrieve an optional attribute from the xml and return None on failure."""
try:
return self.get_attribute(element, attribute)
except self.XmlError:
return None
|
yongtang/tensorflow | refs/heads/master | tensorflow/tools/pip_package/simple_console.py | 603 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Start a simple interactive console with TensorFlow available."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import code
import sys
def main(_):
"""Run an interactive console."""
code.interact()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
quxiaolong1504/django | refs/heads/master | django/contrib/gis/db/models/sql/__init__.py | 476 | from django.contrib.gis.db.models.sql.conversion import (
AreaField, DistanceField, GeomField, GMLField,
)
__all__ = [
'AreaField', 'DistanceField', 'GeomField', 'GMLField'
]
|
joebowen/LogMyRocket_API | refs/heads/master | LogMyRocket/libraries/sys_packages/py/py/_code/_assertionold.py | 218 | import py
import sys, inspect
from compiler import parse, ast, pycodegen
from py._code.assertion import BuiltinAssertionError, _format_explanation
passthroughex = py.builtin._sysex
class Failure:
def __init__(self, node):
self.exc, self.value, self.tb = sys.exc_info()
self.node = node
class View(object):
"""View base class.
If C is a subclass of View, then C(x) creates a proxy object around
the object x. The actual class of the proxy is not C in general,
but a *subclass* of C determined by the rules below. To avoid confusion
we call view class the class of the proxy (a subclass of C, so of View)
and object class the class of x.
Attributes and methods not found in the proxy are automatically read on x.
Other operations like setting attributes are performed on the proxy, as
determined by its view class. The object x is available from the proxy
as its __obj__ attribute.
The view class selection is determined by the __view__ tuples and the
optional __viewkey__ method. By default, the selected view class is the
most specific subclass of C whose __view__ mentions the class of x.
If no such subclass is found, the search proceeds with the parent
object classes. For example, C(True) will first look for a subclass
of C with __view__ = (..., bool, ...) and only if it doesn't find any
look for one with __view__ = (..., int, ...), and then ..., object,...
If everything fails the class C itself is considered to be the default.
Alternatively, the view class selection can be driven by another aspect
of the object x, instead of the class of x, by overriding __viewkey__.
See last example at the end of this module.
"""
_viewcache = {}
__view__ = ()
def __new__(rootclass, obj, *args, **kwds):
self = object.__new__(rootclass)
self.__obj__ = obj
self.__rootclass__ = rootclass
key = self.__viewkey__()
try:
self.__class__ = self._viewcache[key]
except KeyError:
self.__class__ = self._selectsubclass(key)
return self
def __getattr__(self, attr):
# attributes not found in the normal hierarchy rooted on View
# are looked up in the object's real class
return getattr(self.__obj__, attr)
def __viewkey__(self):
return self.__obj__.__class__
def __matchkey__(self, key, subclasses):
if inspect.isclass(key):
keys = inspect.getmro(key)
else:
keys = [key]
for key in keys:
result = [C for C in subclasses if key in C.__view__]
if result:
return result
return []
def _selectsubclass(self, key):
subclasses = list(enumsubclasses(self.__rootclass__))
for C in subclasses:
if not isinstance(C.__view__, tuple):
C.__view__ = (C.__view__,)
choices = self.__matchkey__(key, subclasses)
if not choices:
return self.__rootclass__
elif len(choices) == 1:
return choices[0]
else:
# combine the multiple choices
return type('?', tuple(choices), {})
def __repr__(self):
return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
def enumsubclasses(cls):
for subcls in cls.__subclasses__():
for subsubclass in enumsubclasses(subcls):
yield subsubclass
yield cls
class Interpretable(View):
"""A parse tree node with a few extra methods."""
explanation = None
def is_builtin(self, frame):
return False
def eval(self, frame):
# fall-back for unknown expression nodes
try:
expr = ast.Expression(self.__obj__)
expr.filename = '<eval>'
self.__obj__.filename = '<eval>'
co = pycodegen.ExpressionCodeGenerator(expr).getCode()
result = frame.eval(co)
except passthroughex:
raise
except:
raise Failure(self)
self.result = result
self.explanation = self.explanation or frame.repr(self.result)
def run(self, frame):
# fall-back for unknown statement nodes
try:
expr = ast.Module(None, ast.Stmt([self.__obj__]))
expr.filename = '<run>'
co = pycodegen.ModuleCodeGenerator(expr).getCode()
frame.exec_(co)
except passthroughex:
raise
except:
raise Failure(self)
def nice_explanation(self):
return _format_explanation(self.explanation)
class Name(Interpretable):
__view__ = ast.Name
def is_local(self, frame):
source = '%r in locals() is not globals()' % self.name
try:
return frame.is_true(frame.eval(source))
except passthroughex:
raise
except:
return False
def is_global(self, frame):
source = '%r in globals()' % self.name
try:
return frame.is_true(frame.eval(source))
except passthroughex:
raise
except:
return False
def is_builtin(self, frame):
source = '%r not in locals() and %r not in globals()' % (
self.name, self.name)
try:
return frame.is_true(frame.eval(source))
except passthroughex:
raise
except:
return False
def eval(self, frame):
super(Name, self).eval(frame)
if not self.is_local(frame):
self.explanation = self.name
class Compare(Interpretable):
__view__ = ast.Compare
def eval(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
for operation, expr2 in self.ops:
if hasattr(self, 'result'):
# shortcutting in chained expressions
if not frame.is_true(self.result):
break
expr2 = Interpretable(expr2)
expr2.eval(frame)
self.explanation = "%s %s %s" % (
expr.explanation, operation, expr2.explanation)
source = "__exprinfo_left %s __exprinfo_right" % operation
try:
self.result = frame.eval(source,
__exprinfo_left=expr.result,
__exprinfo_right=expr2.result)
except passthroughex:
raise
except:
raise Failure(self)
expr = expr2
class And(Interpretable):
__view__ = ast.And
def eval(self, frame):
explanations = []
for expr in self.nodes:
expr = Interpretable(expr)
expr.eval(frame)
explanations.append(expr.explanation)
self.result = expr.result
if not frame.is_true(expr.result):
break
self.explanation = '(' + ' and '.join(explanations) + ')'
class Or(Interpretable):
__view__ = ast.Or
def eval(self, frame):
explanations = []
for expr in self.nodes:
expr = Interpretable(expr)
expr.eval(frame)
explanations.append(expr.explanation)
self.result = expr.result
if frame.is_true(expr.result):
break
self.explanation = '(' + ' or '.join(explanations) + ')'
# == Unary operations ==
keepalive = []
for astclass, astpattern in {
ast.Not : 'not __exprinfo_expr',
ast.Invert : '(~__exprinfo_expr)',
}.items():
class UnaryArith(Interpretable):
__view__ = astclass
def eval(self, frame, astpattern=astpattern):
expr = Interpretable(self.expr)
expr.eval(frame)
self.explanation = astpattern.replace('__exprinfo_expr',
expr.explanation)
try:
self.result = frame.eval(astpattern,
__exprinfo_expr=expr.result)
except passthroughex:
raise
except:
raise Failure(self)
keepalive.append(UnaryArith)
# == Binary operations ==
for astclass, astpattern in {
ast.Add : '(__exprinfo_left + __exprinfo_right)',
ast.Sub : '(__exprinfo_left - __exprinfo_right)',
ast.Mul : '(__exprinfo_left * __exprinfo_right)',
ast.Div : '(__exprinfo_left / __exprinfo_right)',
ast.Mod : '(__exprinfo_left % __exprinfo_right)',
ast.Power : '(__exprinfo_left ** __exprinfo_right)',
}.items():
class BinaryArith(Interpretable):
__view__ = astclass
def eval(self, frame, astpattern=astpattern):
left = Interpretable(self.left)
left.eval(frame)
right = Interpretable(self.right)
right.eval(frame)
self.explanation = (astpattern
.replace('__exprinfo_left', left .explanation)
.replace('__exprinfo_right', right.explanation))
try:
self.result = frame.eval(astpattern,
__exprinfo_left=left.result,
__exprinfo_right=right.result)
except passthroughex:
raise
except:
raise Failure(self)
keepalive.append(BinaryArith)
class CallFunc(Interpretable):
__view__ = ast.CallFunc
def is_bool(self, frame):
source = 'isinstance(__exprinfo_value, bool)'
try:
return frame.is_true(frame.eval(source,
__exprinfo_value=self.result))
except passthroughex:
raise
except:
return False
def eval(self, frame):
node = Interpretable(self.node)
node.eval(frame)
explanations = []
vars = {'__exprinfo_fn': node.result}
source = '__exprinfo_fn('
for a in self.args:
if isinstance(a, ast.Keyword):
keyword = a.name
a = a.expr
else:
keyword = None
a = Interpretable(a)
a.eval(frame)
argname = '__exprinfo_%d' % len(vars)
vars[argname] = a.result
if keyword is None:
source += argname + ','
explanations.append(a.explanation)
else:
source += '%s=%s,' % (keyword, argname)
explanations.append('%s=%s' % (keyword, a.explanation))
if self.star_args:
star_args = Interpretable(self.star_args)
star_args.eval(frame)
argname = '__exprinfo_star'
vars[argname] = star_args.result
source += '*' + argname + ','
explanations.append('*' + star_args.explanation)
if self.dstar_args:
dstar_args = Interpretable(self.dstar_args)
dstar_args.eval(frame)
argname = '__exprinfo_kwds'
vars[argname] = dstar_args.result
source += '**' + argname + ','
explanations.append('**' + dstar_args.explanation)
self.explanation = "%s(%s)" % (
node.explanation, ', '.join(explanations))
if source.endswith(','):
source = source[:-1]
source += ')'
try:
self.result = frame.eval(source, **vars)
except passthroughex:
raise
except:
raise Failure(self)
if not node.is_builtin(frame) or not self.is_bool(frame):
r = frame.repr(self.result)
self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
class Getattr(Interpretable):
__view__ = ast.Getattr
def eval(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
source = '__exprinfo_expr.%s' % self.attrname
try:
self.result = frame.eval(source, __exprinfo_expr=expr.result)
except passthroughex:
raise
except:
raise Failure(self)
self.explanation = '%s.%s' % (expr.explanation, self.attrname)
# if the attribute comes from the instance, its value is interesting
source = ('hasattr(__exprinfo_expr, "__dict__") and '
'%r in __exprinfo_expr.__dict__' % self.attrname)
try:
from_instance = frame.is_true(
frame.eval(source, __exprinfo_expr=expr.result))
except passthroughex:
raise
except:
from_instance = True
if from_instance:
r = frame.repr(self.result)
self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
# == Re-interpretation of full statements ==
class Assert(Interpretable):
__view__ = ast.Assert
def run(self, frame):
test = Interpretable(self.test)
test.eval(frame)
# simplify 'assert False where False = ...'
if (test.explanation.startswith('False\n{False = ') and
test.explanation.endswith('\n}')):
test.explanation = test.explanation[15:-2]
# print the result as 'assert <explanation>'
self.result = test.result
self.explanation = 'assert ' + test.explanation
if not frame.is_true(test.result):
try:
raise BuiltinAssertionError
except passthroughex:
raise
except:
raise Failure(self)
class Assign(Interpretable):
__view__ = ast.Assign
def run(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
self.result = expr.result
self.explanation = '... = ' + expr.explanation
# fall-back-run the rest of the assignment
ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
mod = ast.Module(None, ast.Stmt([ass]))
mod.filename = '<run>'
co = pycodegen.ModuleCodeGenerator(mod).getCode()
try:
frame.exec_(co, __exprinfo_expr=expr.result)
except passthroughex:
raise
except:
raise Failure(self)
class Discard(Interpretable):
__view__ = ast.Discard
def run(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
self.result = expr.result
self.explanation = expr.explanation
class Stmt(Interpretable):
__view__ = ast.Stmt
def run(self, frame):
for stmt in self.nodes:
stmt = Interpretable(stmt)
stmt.run(frame)
def report_failure(e):
explanation = e.node.nice_explanation()
if explanation:
explanation = ", in: " + explanation
else:
explanation = ""
sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
def check(s, frame=None):
if frame is None:
frame = sys._getframe(1)
frame = py.code.Frame(frame)
expr = parse(s, 'eval')
assert isinstance(expr, ast.Expression)
node = Interpretable(expr.node)
try:
node.eval(frame)
except passthroughex:
raise
except Failure:
e = sys.exc_info()[1]
report_failure(e)
else:
if not frame.is_true(node.result):
sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
###########################################################
# API / Entry points
# #########################################################
def interpret(source, frame, should_fail=False):
module = Interpretable(parse(source, 'exec').node)
#print "got module", module
if isinstance(frame, py.std.types.FrameType):
frame = py.code.Frame(frame)
try:
module.run(frame)
except Failure:
e = sys.exc_info()[1]
return getfailure(e)
except passthroughex:
raise
except:
import traceback
traceback.print_exc()
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --nomagic)")
else:
return None
def getmsg(excinfo):
if isinstance(excinfo, tuple):
excinfo = py.code.ExceptionInfo(excinfo)
#frame, line = gettbline(tb)
#frame = py.code.Frame(frame)
#return interpret(line, frame)
tb = excinfo.traceback[-1]
source = str(tb.statement).strip()
x = interpret(source, tb.frame, should_fail=True)
if not isinstance(x, str):
raise TypeError("interpret returned non-string %r" % (x,))
return x
def getfailure(e):
explanation = e.node.nice_explanation()
if str(e.value):
lines = explanation.split('\n')
lines[0] += " << %s" % (e.value,)
explanation = '\n'.join(lines)
text = "%s: %s" % (e.exc.__name__, explanation)
if text.startswith('AssertionError: assert '):
text = text[16:]
return text
def run(s, frame=None):
if frame is None:
frame = sys._getframe(1)
frame = py.code.Frame(frame)
module = Interpretable(parse(s, 'exec').node)
try:
module.run(frame)
except Failure:
e = sys.exc_info()[1]
report_failure(e)
if __name__ == '__main__':
# example:
def f():
return 5
def g():
return 3
def h(x):
return 'never'
check("f() * g() == 5")
check("not f()")
check("not (f() and g() or 0)")
check("f() == g()")
i = 4
check("i == f()")
check("len(f()) == 0")
check("isinstance(2+3+4, float)")
run("x = i")
check("x == 5")
run("assert not f(), 'oops'")
run("a, b, c = 1, 2")
run("a, b, c = f()")
check("max([f(),g()]) == 4")
check("'hello'[g()] == 'h'")
run("'guk%d' % h(f())")
|
languagelab/Django-Currencies | refs/heads/master | currencies/views.py | 1 | from django.http import HttpResponseRedirect
from currencies.models import Currency
def set_currency(request):
if request.method == 'POST':
currency_code = request.POST.get('currency', None)
next = request.POST.get('next', None)
else:
currency_code = request.GET.get('currency', None)
next = request.GET.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', None)
if not next:
next = '/'
response = HttpResponseRedirect(next)
if currency_code:
if hasattr(request, 'session'):
request.session['currency'] = \
Currency.objects.get(code__exact=currency_code)
else:
response.set_cookie('currency', currency_code)
return response
|
zouzhberk/ambaridemo | refs/heads/master | demo-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs.py | 1 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management import *
import sys
import os
def hdfs(name=None):
import params
# On some OS this folder could be not exists, so we will create it before pushing there files
Directory(params.limits_conf_dir,
recursive=True,
owner='root',
group='root'
)
File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
owner='root',
group='root',
mode=0644,
content=Template("hdfs.conf.j2")
)
if params.security_enabled:
tc_mode = 0644
tc_owner = "root"
else:
tc_mode = None
tc_owner = params.hdfs_user
if "hadoop-policy" in params.config['configurations']:
XmlConfig("hadoop-policy.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['hadoop-policy'],
configuration_attributes=params.config['configuration_attributes']['hadoop-policy'],
owner=params.hdfs_user,
group=params.user_group
)
XmlConfig("hdfs-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.hdfs_user,
group=params.user_group
)
XmlConfig("core-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.hdfs_user,
group=params.user_group,
mode=0644
)
File(os.path.join(params.hadoop_conf_dir, 'slaves'),
owner=tc_owner,
content=Template("slaves.j2")
)
|
ic-hep/DIRAC | refs/heads/rel-v6r15 | ConfigurationSystem/Client/Helpers/__init__.py | 19 | # $HeadURL$
"""
DIRAC.ConfigurationSystem.Client.Helpers package
"""
__RCSID__ = "$Id$"
from DIRAC.ConfigurationSystem.Client.Helpers.Path import *
from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import getCSExtensions, getInstalledExtensions, getVO
|
ThiefMaster/indico | refs/heads/master | indico/modules/events/abstracts/__init__.py | 3 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import render_template, session
from indico.core import signals
from indico.core.config import config
from indico.core.logger import Logger
from indico.core.permissions import ManagementPermission
from indico.modules.events.abstracts.clone import AbstractSettingsCloner
from indico.modules.events.abstracts.notifications import ContributionTypeCondition, StateCondition, TrackCondition
from indico.modules.events.features.base import EventFeature
from indico.modules.events.models.events import Event, EventType
from indico.modules.events.timetable.models.breaks import Break
from indico.modules.events.tracks.models.tracks import Track
from indico.util.i18n import _
from indico.util.placeholders import Placeholder
from indico.web.flask.templating import template_hook
from indico.web.flask.util import url_for
from indico.web.menu import SideMenuItem
logger = Logger.get('events.abstracts')
@signals.event.updated.connect
@signals.event.contribution_created.connect
@signals.event.contribution_updated.connect
@signals.event.contribution_deleted.connect
@signals.event.session_deleted.connect
@signals.event.session_updated.connect
@signals.event.person_updated.connect
@signals.event.times_changed.connect
def _clear_boa_cache(sender, obj=None, **kwargs):
from indico.modules.events.abstracts.util import clear_boa_cache
if isinstance(obj, Break):
# breaks do not show up in the BoA
return
event = (obj or sender).event
clear_boa_cache(event)
@signals.menu.items.connect_via('event-management-sidemenu')
def _extend_event_management_menu(sender, event, **kwargs):
if not event.can_manage(session.user) or not AbstractsFeature.is_allowed_for_event(event):
return
return SideMenuItem('abstracts', _('Call for Abstracts'), url_for('abstracts.management', event),
section='workflows', weight=30)
@signals.event.get_feature_definitions.connect
def _get_feature_definitions(sender, **kwargs):
return AbstractsFeature
@signals.event_management.get_cloners.connect
def _get_cloners(sender, **kwargs):
yield AbstractSettingsCloner
@signals.users.merged.connect
def _merge_users(target, source, **kwargs):
from indico.modules.events.abstracts.models.abstracts import Abstract
from indico.modules.events.abstracts.models.comments import AbstractComment
from indico.modules.events.abstracts.models.reviews import AbstractReview
from indico.modules.events.abstracts.settings import abstracts_settings
Abstract.query.filter_by(submitter_id=source.id).update({Abstract.submitter_id: target.id})
Abstract.query.filter_by(modified_by_id=source.id).update({Abstract.modified_by_id: target.id})
Abstract.query.filter_by(judge_id=source.id).update({Abstract.judge_id: target.id})
AbstractComment.query.filter_by(user_id=source.id).update({AbstractComment.user_id: target.id})
AbstractComment.query.filter_by(modified_by_id=source.id).update({AbstractComment.modified_by_id: target.id})
AbstractReview.query.filter_by(user_id=source.id).update({AbstractReview.user_id: target.id})
abstracts_settings.acls.merge_users(target, source)
@signals.get_conditions.connect_via('abstract-notifications')
def _get_abstract_notification_rules(sender, **kwargs):
yield StateCondition
yield TrackCondition
yield ContributionTypeCondition
class AbstractsFeature(EventFeature):
name = 'abstracts'
friendly_name = _('Call for Abstracts')
description = _('Gives event managers the opportunity to open a "Call for Abstracts" and use the abstract '
'reviewing workflow.')
@classmethod
def is_allowed_for_event(cls, event):
return event.type_ == EventType.conference
@signals.acl.get_management_permissions.connect_via(Event)
def _get_event_management_permissions(sender, **kwargs):
yield AbstractReviewerPermission
yield GlobalReviewPermission
@signals.acl.get_management_permissions.connect_via(Track)
def _get_track_management_permissions(sender, **kwargs):
yield ReviewPermission
class GlobalReviewPermission(ManagementPermission):
name = 'review_all_abstracts'
friendly_name = _('Review for all tracks')
description = _('Grants abstract reviewing rights to all tracks of the event.')
class ReviewPermission(ManagementPermission):
name = 'review'
friendly_name = _('Review')
description = _('Grants track reviewer rights in a track.')
user_selectable = True
color = 'orange'
default = True
class AbstractReviewerPermission(ManagementPermission):
name = 'abstract_reviewer'
friendly_name = _('Reviewer')
description = _('Grants abstract reviewing rights on an event.')
@signals.get_placeholders.connect_via('abstract-notification-email')
def _get_notification_placeholders(sender, **kwargs):
from indico.modules.events.abstracts import placeholders
for name in placeholders.__all__:
obj = getattr(placeholders, name)
if issubclass(obj, Placeholder):
yield obj
@signals.menu.items.connect_via('event-editing-sidemenu')
def _extend_editing_menu(sender, event, **kwargs):
if event.has_feature('abstracts'):
yield SideMenuItem('abstracts', _('Call for Abstracts'), url_for('abstracts.call_for_abstracts', event))
@signals.event.sidemenu.connect
def _extend_event_menu(sender, **kwargs):
from indico.modules.events.abstracts.util import has_user_tracks
from indico.modules.events.contributions import contribution_settings
from indico.modules.events.layout.util import MenuEntryData
def _boa_visible(event):
return (event.has_feature('abstracts') and contribution_settings.get(event, 'published')
and (config.LATEX_ENABLED or event.has_custom_boa))
def _reviewing_area_visible(event):
if not session.user or not event.has_feature('abstracts'):
return False
return has_user_tracks(event, session.user)
yield MenuEntryData(title=_("Book of Abstracts"), name='abstracts_book', endpoint='abstracts.export_boa',
position=9, visible=_boa_visible, static_site=True)
yield MenuEntryData(title=_("Call for Abstracts"), name='call_for_abstracts',
endpoint='abstracts.call_for_abstracts', position=2,
visible=lambda event: event.has_feature('abstracts'))
yield MenuEntryData(title=_("Reviewing Area"), name='abstract_reviewing_area',
endpoint='abstracts.display_reviewable_tracks', position=0, parent='call_for_abstracts',
visible=_reviewing_area_visible)
@template_hook('conference-home-info')
def _inject_cfa_announcement(event, **kwargs):
if (event.has_feature('abstracts') and
(event.cfa.is_open or (session.user and event.cfa.can_submit_abstracts(session.user)))):
return render_template('events/abstracts/display/conference_home.html', event=event)
|
JFriel/honours_project | refs/heads/master | venv/lib/python2.7/site-packages/setuptools/command/install_scripts.py | 285 | from distutils.command.install_scripts import install_scripts \
as _install_scripts
from pkg_resources import Distribution, PathMetadata, ensure_directory
import os
from distutils import log
class install_scripts(_install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
_install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
from setuptools.command.easy_install import get_script_args
from setuptools.command.easy_install import sys_executable
self.run_command("egg_info")
if self.distribution.scripts:
_install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
executable = getattr(bs_cmd,'executable',sys_executable)
is_wininst = getattr(
self.get_finalized_command("bdist_wininst"), '_is_running', False
)
for args in get_script_args(dist, executable, is_wininst):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0x1FF-mask) # 0777
|
bobsilverberg/oneanddone-sugardough | refs/heads/master | oneanddone/tasks/tests/test_views.py | 1 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.core.urlresolvers import reverse
from mock import Mock, patch
from nose.tools import eq_, ok_, assert_dict_contains_subset
from tower import ugettext as _
from oneanddone.base.tests import TestCase
from oneanddone.tasks import views
from oneanddone.tasks.models import TaskAttempt
from oneanddone.tasks.tests import (TaskAttemptFactory, TaskFactory,
TaskKeywordFactory)
from oneanddone.tasks.tests.test_forms import get_filled_taskform
from oneanddone.users.tests import UserFactory
class CreateTaskViewTests(TestCase):
def setUp(self):
self.view = views.CreateTaskView()
def test_get_context_data_returns_add_action_and_url(self):
"""
The 'Add' action and correct cancel_url
should be included in the context data.
"""
with patch('oneanddone.tasks.views.generic.CreateView.get_context_data') as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['action'], 'Add')
eq_(ctx['cancel_url'], reverse('tasks.list'))
def test_get_form_kwargs_sets_initial_owner_to_current_user(self):
"""
The initial owner for the form should be set to the current user.
"""
user = UserFactory.create()
self.view.request = Mock(user=user)
self.view.kwargs = {}
with patch('oneanddone.tasks.views.generic.CreateView.get_form_kwargs') as get_form_kwargs:
get_form_kwargs.return_value = {'initial': {}}
kwargs = self.view.get_form_kwargs()
eq_(kwargs['initial']['owner'], user)
def test_get_form_kwargs_populates_form_with_data_to_be_cloned(self):
"""
When accessed via the tasks.clone url, the view displays a form
whose initial data is that of the task being cloned, except for
the 'name' field, which should be prefixed with 'Copy of '
"""
user = UserFactory.create()
original_task = TaskFactory.create()
TaskKeywordFactory.create_batch(3, task=original_task)
original_data = get_filled_taskform(original_task).data
self.view.kwargs = {'clone': original_task.pk}
self.view.request = Mock(user=user)
with patch('oneanddone.tasks.views.generic.CreateView.get_form_kwargs') as get_form_kwargs:
get_form_kwargs.return_value = {'initial': {}}
initial = self.view.get_form_kwargs()['initial']
eq_(initial['keywords'], original_task.keywords_list)
eq_(initial['name'], ' '.join(['Copy of', original_task.name]))
del original_data['name']
assert_dict_contains_subset(original_data, initial)
class RandomTasksViewTests(TestCase):
def setUp(self):
self.view = views.RandomTasksView()
def test_get_context_data_returns_slice(self):
"""
A subset of 5 items should be returned when Random tasks are viewed.
"""
with patch('oneanddone.tasks.views.generic.ListView.get_context_data') as get_context_data:
get_context_data.return_value = {'object_list': [i for i in range(0, 10)]}
ctx = self.view.get_context_data()
eq_(len(ctx['random_task_list']), 5)
class StartTaskViewTests(TestCase):
def setUp(self):
self.view = views.StartTaskView()
self.task = TaskFactory.create()
self.view.get_object = Mock(return_value=self.task)
def test_post_create_attempt(self):
"""
If the task is available and the user doesn't have any tasks in
progress, create a new task attempt and redirect to its page.
"""
user = UserFactory.create()
self.view.request = Mock(user=user)
with patch('oneanddone.tasks.views.redirect') as redirect:
eq_(self.view.post(), redirect.return_value)
redirect.assert_called_with(self.task)
ok_(TaskAttempt.objects.filter(user=user, task=self.task, state=TaskAttempt.STARTED)
.exists())
def test_post_existing_attempts(self):
"""
If the user has an existing task attempt, redirect them to the
profile detail page.
"""
attempt = TaskAttemptFactory.create()
self.view.request = Mock(user=attempt.user)
with patch('oneanddone.tasks.views.redirect') as redirect:
eq_(self.view.post(), redirect.return_value)
redirect.assert_called_with('base.home')
ok_(not TaskAttempt.objects.filter(user=attempt.user, task=self.task).exists())
def test_post_unavailable_task(self):
"""
If the task is unavailable, redirect to the available tasks view
without creating an attempt.
"""
self.task.is_draft = True
self.task.save()
user = UserFactory.create()
self.view.request = Mock(user=user)
with patch('oneanddone.tasks.views.redirect') as redirect:
eq_(self.view.post(), redirect.return_value)
redirect.assert_called_with('tasks.available')
ok_(not TaskAttempt.objects.filter(user=user, task=self.task).exists())
class TaskDetailViewTests(TestCase):
def setUp(self):
self.view = views.TaskDetailView()
self.view.request = Mock()
self.view.object = Mock()
self.view.object.name = 'name'
def test_get_context_data_authenticated(self):
"""
If the current user is authenticated, fetch their attempt for
the current task using get_object_or_none.
"""
self.view.request.user.is_authenticated.return_value = True
get_object_patch = patch('oneanddone.tasks.views.get_object_or_none')
context_patch = patch('oneanddone.tasks.views.generic.DetailView.get_context_data')
with get_object_patch as get_object_or_none, context_patch as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['attempt'], get_object_or_none.return_value)
get_object_or_none.assert_called_with(TaskAttempt, user=self.view.request.user,
task=self.view.object, state=TaskAttempt.STARTED)
def test_get_context_data_available_task(self):
"""
If the task is taken, correct values should be added to the context.
"""
self.view.request.user.is_authenticated.return_value = False
self.view.object.is_taken = False
self.view.object.is_completed = False
with patch('oneanddone.tasks.views.generic.DetailView.get_context_data') as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['gs_button_label'], _('Get Started'))
eq_(ctx['gs_button_disabled'], False)
def test_get_context_data_completed_task(self):
"""
If the task is taken, correct values should be added to the context.
"""
self.view.request.user.is_authenticated.return_value = False
self.view.object.is_taken = False
self.view.object.is_completed = True
with patch('oneanddone.tasks.views.generic.DetailView.get_context_data') as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['gs_button_label'], _('Completed'))
eq_(ctx['gs_button_disabled'], True)
def test_get_context_data_not_authenticated(self):
"""
If the current user isn't authenticated, don't include an
attempt in the context.
"""
self.view.request.user.is_authenticated.return_value = False
with patch('oneanddone.tasks.views.generic.DetailView.get_context_data') as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
ok_('attempt' not in ctx)
def test_get_context_data_taken_task(self):
"""
If the task is taken, correct values should be added to the context.
"""
self.view.request.user.is_authenticated.return_value = False
self.view.object.is_taken = True
with patch('oneanddone.tasks.views.generic.DetailView.get_context_data') as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['gs_button_label'], _('Taken'))
eq_(ctx['gs_button_disabled'], True)
class UpdateTaskViewTests(TestCase):
def setUp(self):
self.view = views.UpdateTaskView()
def test_get_context_data_returns_update_action_and_url(self):
"""
The 'Update' action and correct cancel_url
should be included in the context data.
"""
get_object_patch = patch('oneanddone.tasks.views.generic.UpdateView.get_object')
context_patch = patch('oneanddone.tasks.views.generic.UpdateView.get_context_data')
with get_object_patch as get_object, context_patch as get_context_data:
get_object.return_value = Mock(id=1)
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['action'], 'Update')
eq_(ctx['cancel_url'], reverse('tasks.detail', args=[1]))
|
felixfontein/ansible | refs/heads/devel | test/support/windows-integration/plugins/modules/win_file.py | 52 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_file
version_added: "1.9.2"
short_description: Creates, touches or removes files or directories
description:
- Creates (empty) files, updates file modification stamps of existing files,
and can create or remove directories.
- Unlike M(file), does not modify ownership, permissions or manipulate links.
- For non-Windows targets, use the M(file) module instead.
options:
path:
description:
- Path to the file being managed.
required: yes
type: path
aliases: [ dest, name ]
state:
description:
- If C(directory), all immediate subdirectories will be created if they
do not exist.
- If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior.
- If C(absent), directories will be recursively deleted, and files will be removed.
- If C(touch), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
type: str
choices: [ absent, directory, file, touch ]
seealso:
- module: file
- module: win_acl
- module: win_acl_inheritance
- module: win_owner
- module: win_stat
author:
- Jon Hawkesworth (@jhawkesworth)
'''
EXAMPLES = r'''
- name: Touch a file (creates if not present, updates modification time if present)
win_file:
path: C:\Temp\foo.conf
state: touch
- name: Remove a file, if present
win_file:
path: C:\Temp\foo.conf
state: absent
- name: Create directory structure
win_file:
path: C:\Temp\folder\subfolder
state: directory
- name: Remove directory structure
win_file:
path: C:\Temp
state: absent
'''
|
yephper/django | refs/heads/master | tests/template_tests/filter_tests/test_center.py | 1 | from django.template.defaultfilters import center
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class CenterTests(SimpleTestCase):
@setup({'center01':
'{% autoescape off %}.{{ a|center:"5" }}. .{{ b|center:"5" }}.{% endautoescape %}'})
def test_center01(self):
output = self.engine.render_to_string('center01', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b . . a&b .")
@setup({'center02': '.{{ a|center:"5" }}. .{{ b|center:"5" }}.'})
def test_center02(self):
output = self.engine.render_to_string('center02', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b . . a&b .")
class FunctionTests(SimpleTestCase):
def test_center(self):
self.assertEqual(center('test', 6), ' test ')
def test_non_string_input(self):
self.assertEqual(center(123, 5), ' 123 ')
|
havard024/prego | refs/heads/master | venv/bin/rst2man.py | 1 | #!/var/www/django/treeio/venv/bin/python
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
|
PennartLoettring/Poettrix | refs/heads/master | rootfs/usr/lib/python3.4/idlelib/textView.py | 12 | """Simple text browser for IDLE
"""
from tkinter import *
import tkinter.messagebox as tkMessageBox
class TextViewer(Toplevel):
"""A simple text viewer dialog for IDLE
"""
def __init__(self, parent, title, text, modal=True):
"""Show the given text in a scrollable window with a 'close' button
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("=%dx%d+%d+%d" % (625, 500,
parent.winfo_rootx() + 10,
parent.winfo_rooty() + 10))
#elguavas - config placeholders til config stuff completed
self.bg = '#ffffff'
self.fg = '#000000'
self.CreateWidgets()
self.title(title)
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.textView.focus_set()
#key bindings for this dialog
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.textView.insert(0.0, text)
self.textView.config(state=DISABLED)
if modal:
self.transient(parent)
self.grab_set()
self.wait_window()
def CreateWidgets(self):
frameText = Frame(self, relief=SUNKEN, height=700)
frameButtons = Frame(self)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok, takefocus=FALSE)
self.scrollbarView = Scrollbar(frameText, orient=VERTICAL,
takefocus=FALSE, highlightthickness=0)
self.textView = Text(frameText, wrap=WORD, highlightthickness=0,
fg=self.fg, bg=self.bg)
self.scrollbarView.config(command=self.textView.yview)
self.textView.config(yscrollcommand=self.scrollbarView.set)
self.buttonOk.pack()
self.scrollbarView.pack(side=RIGHT,fill=Y)
self.textView.pack(side=LEFT,expand=TRUE,fill=BOTH)
frameButtons.pack(side=BOTTOM,fill=X)
frameText.pack(side=TOP,expand=TRUE,fill=BOTH)
def Ok(self, event=None):
self.destroy()
def view_text(parent, title, text, modal=True):
return TextViewer(parent, title, text, modal)
def view_file(parent, title, filename, encoding=None, modal=True):
try:
with open(filename, 'r', encoding=encoding) as file:
contents = file.read()
except OSError:
import tkinter.messagebox as tkMessageBox
tkMessageBox.showerror(title='File Load Error',
message='Unable to load file %r .' % filename,
parent=parent)
else:
return view_text(parent, title, contents, modal)
if __name__ == '__main__':
#test the dialog
root=Tk()
root.title('textView test')
filename = './textView.py'
with open(filename, 'r') as f:
text = f.read()
btn1 = Button(root, text='view_text',
command=lambda:view_text(root, 'view_text', text))
btn1.pack(side=LEFT)
btn2 = Button(root, text='view_file',
command=lambda:view_file(root, 'view_file', filename))
btn2.pack(side=LEFT)
btn3 = Button(root, text='nonmodal view_text',
command=lambda:view_text(root, 'nonmodal view_text', text,
modal=False))
btn3.pack(side=LEFT)
close = Button(root, text='Close', command=root.destroy)
close.pack(side=RIGHT)
root.mainloop()
|
sobercoder/gem5 | refs/heads/master | configs/ruby/GPU_VIPER.py | 12 | #
# Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Lisa Hsu
#
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
from Ruby import send_evicts
from topologies.Cluster import Cluster
from topologies.Crossbar import Crossbar
class CntrlBase:
_seqs = 0
@classmethod
def seqCount(cls):
# Use SeqCount not class since we need global count
CntrlBase._seqs += 1
return CntrlBase._seqs - 1
_cntrls = 0
@classmethod
def cntrlCount(cls):
# Use CntlCount not class since we need global count
CntrlBase._cntrls += 1
return CntrlBase._cntrls - 1
_version = 0
@classmethod
def versionCount(cls):
cls._version += 1 # Use count for this particular type
return cls._version - 1
class L1Cache(RubyCache):
resourceStalls = False
dataArrayBanks = 2
tagArrayBanks = 2
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class L2Cache(RubyCache):
resourceStalls = False
assoc = 16
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class CPCntrl(CorePair_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1Icache = L1Cache()
self.L1Icache.create(options.l1i_size, options.l1i_assoc, options)
self.L1D0cache = L1Cache()
self.L1D0cache.create(options.l1d_size, options.l1d_assoc, options)
self.L1D1cache = L1Cache()
self.L1D1cache.create(options.l1d_size, options.l1d_assoc, options)
self.L2cache = L2Cache()
self.L2cache.create(options.l2_size, options.l2_assoc, options)
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1Icache
self.sequencer.dcache = self.L1D0cache
self.sequencer.ruby_system = ruby_system
self.sequencer.coreid = 0
self.sequencer.is_cpu_sequencer = True
self.sequencer1 = RubySequencer()
self.sequencer1.version = self.seqCount()
self.sequencer1.icache = self.L1Icache
self.sequencer1.dcache = self.L1D1cache
self.sequencer1.ruby_system = ruby_system
self.sequencer1.coreid = 1
self.sequencer1.is_cpu_sequencer = True
self.issue_latency = options.cpu_to_dir_latency
self.send_evictions = send_evicts(options)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCPCache(RubyCache):
size = "16kB"
assoc = 16
dataArrayBanks = 16 #number of data banks
tagArrayBanks = 16 #number of tag banks
dataAccessLatency = 4
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.tcp_size)
self.assoc = options.tcp_assoc
self.resourceStalls = options.no_tcc_resource_stalls
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCPCntrl(TCP_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency,
dataAccessLatency = options.TCP_latency)
self.L1cache.resourceStalls = options.no_resource_stalls
self.L1cache.create(options)
self.issue_latency = 1
self.coalescer = VIPERCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def createCP(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency,
dataAccessLatency = options.TCP_latency)
self.L1cache.resourceStalls = options.no_resource_stalls
self.L1cache.create(options)
self.issue_latency = 1
self.coalescer = VIPERCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = True
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class SQCCache(RubyCache):
dataArrayBanks = 8
tagArrayBanks = 8
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.sqc_size)
self.assoc = options.sqc_assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class SQCCntrl(SQC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = SQCCache()
self.L1cache.create(options)
self.L1cache.resourceStalls = options.no_resource_stalls
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.support_data_reqs = False
self.sequencer.is_cpu_sequencer = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCC(RubyCache):
size = MemorySize("256kB")
assoc = 16
dataAccessLatency = 8
tagAccessLatency = 2
resourceStalls = True
def create(self, options):
self.assoc = options.tcc_assoc
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
s = options.num_compute_units
tcc_size = s * 128
tcc_size = str(tcc_size)+'kB'
self.size = MemorySize(tcc_size)
self.dataArrayBanks = 64
self.tagArrayBanks = 64
else:
self.size = MemorySize(options.tcc_size)
self.dataArrayBanks = 256 / options.num_tccs #number of data banks
self.tagArrayBanks = 256 / options.num_tccs #number of tag banks
self.size.value = self.size.value / options.num_tccs
if ((self.size.value / long(self.assoc)) < 128):
self.size.value = long(128 * self.assoc)
self.start_index_bit = math.log(options.cacheline_size, 2) + \
math.log(options.num_tccs, 2)
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCCCntrl(TCC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L2cache = TCC()
self.L2cache.create(options)
self.L2cache.resourceStalls = options.no_tcc_resource_stalls
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class L3Cache(RubyCache):
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, options, ruby_system, system):
self.size = MemorySize(options.l3_size)
self.size.value /= options.num_dirs
self.assoc = options.l3_assoc
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataAccessLatency = options.l3_data_latency
self.tagAccessLatency = options.l3_tag_latency
self.resourceStalls = False
self.replacement_policy = PseudoLRUReplacementPolicy()
class L3Cntrl(L3Cache_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L3cache = L3Cache()
self.L3cache.create(options, ruby_system, system)
self.l3_response_latency = max(self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
class DirMem(RubyDirectoryMemory, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
phys_mem_size = AddrRange(options.mem_size).size()
mem_module_size = phys_mem_size / options.num_dirs
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
self.size = dir_size
class DirCntrl(Directory_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.response_latency = 30
self.directory = DirMem()
self.directory.create(options, ruby_system, system)
self.L3CacheMemory = L3Cache()
self.L3CacheMemory.create(options, ruby_system, system)
self.l3_hit_latency = max(self.L3CacheMemory.dataAccessLatency,
self.L3CacheMemory.tagAccessLatency)
self.number_of_TBEs = options.num_tbes
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
def define_options(parser):
parser.add_option("--num-subcaches", type = "int", default = 4)
parser.add_option("--l3-data-latency", type = "int", default = 20)
parser.add_option("--l3-tag-latency", type = "int", default = 15)
parser.add_option("--cpu-to-dir-latency", type = "int", default = 120)
parser.add_option("--gpu-to-dir-latency", type = "int", default = 120)
parser.add_option("--no-resource-stalls", action = "store_false",
default = True)
parser.add_option("--no-tcc-resource-stalls", action = "store_false",
default = True)
parser.add_option("--use-L3-on-WT", action = "store_true", default = False)
parser.add_option("--num-tbes", type = "int", default = 256)
parser.add_option("--l2-latency", type = "int", default = 50) # load to use
parser.add_option("--num-tccs", type = "int", default = 1,
help = "number of TCC banks in the GPU")
parser.add_option("--sqc-size", type = 'string', default = '32kB',
help = "SQC cache size")
parser.add_option("--sqc-assoc", type = 'int', default = 8,
help = "SQC cache assoc")
parser.add_option("--WB_L1", action = "store_true", default = False,
help = "writeback L1")
parser.add_option("--WB_L2", action = "store_true", default = False,
help = "writeback L2")
parser.add_option("--TCP_latency", type = "int", default = 4,
help = "TCP latency")
parser.add_option("--TCC_latency", type = "int", default = 16,
help = "TCC latency")
parser.add_option("--tcc-size", type = 'string', default = '256kB',
help = "agregate tcc size")
parser.add_option("--tcc-assoc", type = 'int', default = 16,
help = "tcc assoc")
parser.add_option("--tcp-size", type = 'string', default = '16kB',
help = "tcp size")
parser.add_option("--tcp-assoc", type = 'int', default = 16,
help = "tcp assoc")
parser.add_option("--noL1", action = "store_true", default = False,
help = "bypassL1")
def create_system(options, full_system, system, dma_devices, ruby_system):
if buildEnv['PROTOCOL'] != 'GPU_VIPER':
panic("This script requires the GPU_VIPER protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes
# must be listed before the directory nodes and directory nodes before
# dma nodes, etc.
#
cp_cntrl_nodes = []
tcp_cntrl_nodes = []
sqc_cntrl_nodes = []
tcc_cntrl_nodes = []
dir_cntrl_nodes = []
l3_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
# For an odd number of CPUs, still create the right number of controllers
TCC_bits = int(math.log(options.num_tccs, 2))
# This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
# Clusters
crossbar_bw = None
mainCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
#Assuming a 2GHz clock
crossbar_bw = 16 * options.num_compute_units * options.bw_scalor
mainCluster = Cluster(intBW=crossbar_bw)
else:
mainCluster = Cluster(intBW=8) # 16 GB/s
for i in xrange(options.num_dirs):
dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits)
dir_cntrl.create(options, ruby_system, system)
dir_cntrl.number_of_TBEs = options.num_tbes
dir_cntrl.useL3OnWT = options.use_L3_on_WT
# the number_of_TBEs is inclusive of TBEs below
# Connect the Directory controller to the ruby network
dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
dir_cntrl.requestFromCores.slave = ruby_system.network.master
dir_cntrl.responseFromCores = MessageBuffer()
dir_cntrl.responseFromCores.slave = ruby_system.network.master
dir_cntrl.unblockFromCores = MessageBuffer()
dir_cntrl.unblockFromCores.slave = ruby_system.network.master
dir_cntrl.probeToCore = MessageBuffer()
dir_cntrl.probeToCore.master = ruby_system.network.slave
dir_cntrl.responseToCore = MessageBuffer()
dir_cntrl.responseToCore.master = ruby_system.network.slave
dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.responseFromMemory = MessageBuffer()
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
mainCluster.add(dir_cntrl)
cpuCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else:
cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
for i in xrange((options.num_cpus + 1) / 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
exec("ruby_system.cp_cntrl%d = cp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
# Connect the CP controllers and the network
cp_cntrl.requestFromCore = MessageBuffer()
cp_cntrl.requestFromCore.master = ruby_system.network.slave
cp_cntrl.responseFromCore = MessageBuffer()
cp_cntrl.responseFromCore.master = ruby_system.network.slave
cp_cntrl.unblockFromCore = MessageBuffer()
cp_cntrl.unblockFromCore.master = ruby_system.network.slave
cp_cntrl.probeToCore = MessageBuffer()
cp_cntrl.probeToCore.slave = ruby_system.network.master
cp_cntrl.responseToCore = MessageBuffer()
cp_cntrl.responseToCore.slave = ruby_system.network.master
cp_cntrl.mandatoryQueue = MessageBuffer()
cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
cpuCluster.add(cp_cntrl)
gpuCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else:
gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
for i in xrange(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
number_of_TBEs = 2560)
# TBEs set to max outstanding requests
tcp_cntrl.create(options, ruby_system, system)
tcp_cntrl.WB = options.WB_L1
tcp_cntrl.disableL1 = options.noL1
tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency
exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.coalescer)
tcp_cntrl_nodes.append(tcp_cntrl)
# Connect the TCP controller to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer()
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
for i in xrange(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# Connect the SQC controller to the ruby network
sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
sqc_cntrl.probeToSQC.slave = ruby_system.network.master
sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
sqc_cntrl.responseToSQC.slave = ruby_system.network.master
sqc_cntrl.mandatoryQueue = MessageBuffer()
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_cp):
tcp_ID = options.num_compute_units + i
sqc_ID = options.num_sqc + i
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
number_of_TBEs = 2560)
# TBEs set to max outstanding requests
tcp_cntrl.createCP(options, ruby_system, system)
tcp_cntrl.WB = options.WB_L1
tcp_cntrl.disableL1 = options.noL1
tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency
exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % tcp_ID)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.sequencer)
tcp_cntrl_nodes.append(tcp_cntrl)
# Connect the CP (TCP) controllers to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % sqc_ID)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_tccs):
tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency)
tcc_cntrl.create(options, ruby_system, system)
tcc_cntrl.l2_request_latency = options.gpu_to_dir_latency
tcc_cntrl.l2_response_latency = options.TCC_latency
tcc_cntrl_nodes.append(tcc_cntrl)
tcc_cntrl.WB = options.WB_L2
tcc_cntrl.number_of_TBEs = 2560 * options.num_compute_units
# the number_of_TBEs is inclusive of TBEs below
# Connect the TCC controllers to the ruby network
tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcc_cntrl.requestFromTCP.slave = ruby_system.network.master
tcc_cntrl.responseToCore = MessageBuffer(ordered = True)
tcc_cntrl.responseToCore.master = ruby_system.network.slave
tcc_cntrl.probeFromNB = MessageBuffer()
tcc_cntrl.probeFromNB.slave = ruby_system.network.master
tcc_cntrl.responseFromNB = MessageBuffer()
tcc_cntrl.responseFromNB.slave = ruby_system.network.master
tcc_cntrl.requestToNB = MessageBuffer(ordered = True)
tcc_cntrl.requestToNB.master = ruby_system.network.slave
tcc_cntrl.responseToNB = MessageBuffer()
tcc_cntrl.responseToNB.master = ruby_system.network.slave
tcc_cntrl.unblockToNB = MessageBuffer()
tcc_cntrl.unblockToNB.master = ruby_system.network.slave
tcc_cntrl.triggerQueue = MessageBuffer(ordered = True)
exec("ruby_system.tcc_cntrl%d = tcc_cntrl" % i)
# connect all of the wire buffers between L3 and dirs up
# TCC cntrls added to the GPU cluster
gpuCluster.add(tcc_cntrl)
# Assuming no DMA devices
assert(len(dma_devices) == 0)
# Add cpu/gpu clusters to main cluster
mainCluster.add(cpuCluster)
mainCluster.add(gpuCluster)
ruby_system.network.number_of_virtual_networks = 10
return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
|
tboyce021/home-assistant | refs/heads/dev | homeassistant/components/mqtt_json/__init__.py | 36 | """The mqtt_json component."""
|
marcocaccin/scikit-learn | refs/heads/master | examples/feature_selection/feature_selection_pipeline.py | 342 | """
==================
Pipeline Anova SVM
==================
Simple usage of Pipeline that runs successively a univariate
feature selection with anova and then a C-SVM of the selected features.
"""
print(__doc__)
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
# import some data to play with
X, y = samples_generator.make_classification(
n_features=20, n_informative=3, n_redundant=0, n_classes=4,
n_clusters_per_class=2)
# ANOVA SVM-C
# 1) anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# 2) svm
clf = svm.SVC(kernel='linear')
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X, y)
anova_svm.predict(X)
|
DestrinStorm/deep-learning | refs/heads/master | transfer-learning/tensorflow_vgg/test_vgg19.py | 152 | import numpy as np
import tensorflow as tf
from tensoflow_vgg import vgg19
from tensoflow_vgg import utils
img1 = utils.load_image("./test_data/tiger.jpeg")
img2 = utils.load_image("./test_data/puzzle.jpeg")
batch1 = img1.reshape((1, 224, 224, 3))
batch2 = img2.reshape((1, 224, 224, 3))
batch = np.concatenate((batch1, batch2), 0)
# with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(per_process_gpu_memory_fraction=0.7)))) as sess:
with tf.device('/cpu:0'):
with tf.Session() as sess:
images = tf.placeholder("float", [2, 224, 224, 3])
feed_dict = {images: batch}
vgg = vgg19.Vgg19()
with tf.name_scope("content_vgg"):
vgg.build(images)
prob = sess.run(vgg.prob, feed_dict=feed_dict)
print(prob)
utils.print_prob(prob[0], './synset.txt')
utils.print_prob(prob[1], './synset.txt')
|
DGrady/pandas | refs/heads/master | pandas/tests/series/test_analytics.py | 2 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from itertools import product
from distutils.version import LooseVersion
import pytest
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, Categorical, DataFrame, isna, notna,
bdate_range, date_range, _np_version_under1p10)
from pandas.core.index import MultiIndex
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import Timedelta
import pandas.core.config as cf
import pandas.core.nanops as nanops
from pandas.compat import lrange, range, is_platform_windows
from pandas import compat
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
import pandas.util.testing as tm
from .common import TestData
skip_if_bottleneck_on_windows = (is_platform_windows() and
nanops._USE_BOTTLENECK)
class TestSeriesAnalytics(TestData):
def test_sum_zero(self):
arr = np.array([])
assert nanops.nansum(arr) == 0
arr = np.empty((10, 0))
assert (nanops.nansum(arr, axis=1) == 0).all()
# GH #844
s = Series([], index=[])
assert s.sum() == 0
df = DataFrame(np.empty((10, 0)))
assert (df.sum(1) == 0).all()
def test_nansum_buglet(self):
s = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(s)
assert_almost_equal(result, 1)
def test_overflow(self):
# GH 6915
# overflowing on the smaller int dtypes
for dtype in ['int32', 'int64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
assert int(result) == v.sum(dtype='int64')
result = s.min(skipna=False)
assert int(result) == 0
result = s.max(skipna=False)
assert int(result) == v[-1]
for dtype in ['float32', 'float64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
assert result == v.sum(dtype=dtype)
result = s.min(skipna=False)
assert np.allclose(float(result), 0.0)
result = s.max(skipna=False)
assert np.allclose(float(result), v[-1])
@pytest.mark.xfail(
skip_if_bottleneck_on_windows,
reason="buggy bottleneck with sum overflow on windows")
def test_overflow_with_bottleneck(self):
# GH 6915
# overflowing on the smaller int dtypes
for dtype in ['int32', 'int64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
# use bottleneck if available
result = s.sum()
assert int(result) == v.sum(dtype='int64')
result = s.min()
assert int(result) == 0
result = s.max()
assert int(result) == v[-1]
for dtype in ['float32', 'float64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
# use bottleneck if available
result = s.sum()
assert result == v.sum(dtype=dtype)
result = s.min()
assert np.allclose(float(result), 0.0)
result = s.max()
assert np.allclose(float(result), v[-1])
@pytest.mark.xfail(
skip_if_bottleneck_on_windows,
reason="buggy bottleneck with sum overflow on windows")
def test_sum(self):
self._check_stat_op('sum', np.sum, check_allna=True)
def test_sum_inf(self):
import pandas.core.nanops as nanops
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert np.isinf(s.sum())
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
with cf.option_context("mode.use_inf_as_na", True):
assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
assert np.isinf(res).all()
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = Series(np.ones(10, dtype=int), index=lrange(10))
tm.assert_almost_equal(np.median(int_ts), int_ts.median())
def test_mode(self):
# No mode should be found.
exp = Series([], dtype=np.float64)
tm.assert_series_equal(Series([]).mode(), exp)
exp = Series([1], dtype=np.int64)
tm.assert_series_equal(Series([1]).mode(), exp)
exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(Series(['a', 'b', 'c']).mode(), exp)
# Test numerical data types.
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(s.mode(), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(s.mode(), exp)
# Test string and object types.
exp = ['b']
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
exp = Series(exp, dtype='c')
tm.assert_series_equal(s.mode(), exp)
exp = ['bar']
data = ['foo'] * 2 + ['bar'] * 3
for dt in [str, object]:
s = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_series_equal(s.mode(), exp)
# Test datetime types.
exp = Series(['1900-05-03', '2011-01-03',
'2013-01-02'], dtype='M8[ns]')
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03'], dtype='M8[ns]')
tm.assert_series_equal(s.mode(), exp)
exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]')
s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
'2011-01-03', '2013-01-02'], dtype='M8[ns]')
tm.assert_series_equal(s.mode(), exp)
# gh-5986: Test timedelta types.
exp = Series(['-1 days', '0 days', '1 days'], dtype='timedelta64[ns]')
s = Series(['1 days', '-1 days', '0 days'],
dtype='timedelta64[ns]')
tm.assert_series_equal(s.mode(), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(s.mode(), exp)
# Test mixed dtype.
exp = Series(['foo'])
s = Series([1, 'foo', 'foo'])
tm.assert_series_equal(s.mode(), exp)
# Test for uint64 overflow.
exp = Series([2**63], dtype=np.uint64)
s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(s.mode(), exp)
exp = Series([1, 2**63], dtype=np.uint64)
s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(s.mode(), exp)
# Test category dtype.
c = Categorical([1, 2])
exp = Categorical([1, 2], categories=[1, 2])
exp = Series(exp, dtype='category')
tm.assert_series_equal(Series(c).mode(), exp)
c = Categorical([1, 'a', 'a'])
exp = Categorical(['a'], categories=[1, 'a'])
exp = Series(exp, dtype='category')
tm.assert_series_equal(Series(c).mode(), exp)
c = Categorical([1, 1, 2, 3, 3])
exp = Categorical([1, 3], categories=[1, 2, 3])
exp = Series(exp, dtype='category')
tm.assert_series_equal(Series(c).mode(), exp)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_var_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
result = self.ts.std(ddof=4)
expected = np.std(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
result = self.ts.var(ddof=4)
expected = np.var(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.var(ddof=1)
assert isna(result)
result = s.std(ddof=1)
assert isna(result)
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.ts.sem(ddof=4)
expected = np.std(self.ts.values,
ddof=4) / np.sqrt(len(self.ts.values))
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.sem(ddof=1)
assert isna(result)
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
alt = lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
# test corner cases, skew() returns NaN unless there's at least 3
# values
min_N = 3
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
assert np.isnan(s.skew())
assert np.isnan(df.skew()).all()
else:
assert 0 == s.skew()
assert (df.skew() == 0).all()
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
alt = lambda x: kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
tm.assert_almost_equal(s.kurt(), s.kurt(level=0)['bar'])
# test corner cases, kurt() returns NaN unless there's at least 4
# values
min_N = 4
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
assert np.isnan(s.kurt())
assert np.isnan(df.kurt()).all()
else:
assert 0 == s.kurt()
assert (df.kurt() == 0).all()
def test_describe(self):
s = Series([0, 1, 2, 3, 4], name='int_data')
result = s.describe()
expected = Series([5, 2, s.std(), 0, 1, 2, 3, 4],
name='int_data',
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_series_equal(result, expected)
s = Series([True, True, False, False, False], name='bool_data')
result = s.describe()
expected = Series([5, 2, False, 3], name='bool_data',
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
s = Series(['a', 'a', 'b', 'c', 'd'], name='str_data')
result = s.describe()
expected = Series([5, 4, 'a', 2], name='str_data',
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
def test_argsort(self):
self._check_accum_op('argsort', check_dtype=False)
argsorted = self.ts.argsort()
assert issubclass(argsorted.dtype.type, np.integer)
# GH 2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])
assert s.dtype == 'datetime64[ns]'
shifted = s.shift(-1)
assert shifted.dtype == 'datetime64[ns]'
assert isna(shifted[4])
result = s.argsort()
expected = Series(lrange(5), dtype='int64')
assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(lrange(4) + [-1], dtype='int64')
assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind='mergesort')
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind='mergesort')
qexpected = np.argsort(s.values, kind='quicksort')
tm.assert_series_equal(mindexer, Series(mexpected),
check_dtype=False)
tm.assert_series_equal(qindexer, Series(qexpected),
check_dtype=False)
pytest.raises(AssertionError, tm.assert_numpy_array_equal,
qindexer, mindexer)
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def test_cummin(self):
tm.assert_numpy_array_equal(self.ts.cummin().values,
np.minimum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.valid())
tm.assert_series_equal(result, expected)
def test_cummax(self):
tm.assert_numpy_array_equal(self.ts.cummax().values,
np.maximum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.valid())
tm.assert_series_equal(result, expected)
def test_cummin_datetime64(self):
s = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT', '2000-1-1',
'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT',
'2000-1-1', 'NaT', '2000-1-1']))
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1'
]))
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_datetime64(self):
s = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT', '2000-1-1',
'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT',
'2000-1-2', 'NaT', '2000-1-3']))
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3'
]))
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'3 min', ]))
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'1 min', ]))
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'2 min',
'1 min',
'1 min',
'1 min', ]))
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'3 min', ]))
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'2 min',
'NaT',
'3 min', ]))
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'2 min',
'2 min',
'2 min',
'3 min', ]))
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_npdiff(self):
pytest.skip("skipping due to Series no longer being an "
"ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
r = np.diff(s)
assert_series_equal(Series([nan, 0, 0, 0, nan]), r)
def _check_stat_op(self, name, alternate, check_objects=False,
check_allna=False):
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# idxmax, idxmin, min, and max are valid for dates
if name not in ['max', 'min']:
ds = Series(date_range('1/1/2001', periods=10))
pytest.raises(TypeError, f, ds)
# skipna or no
assert notna(f(self.series))
assert isna(f(self.series, skipna=False))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona.values))
assert_almost_equal(f(self.series), alternate(nona.values))
allna = self.series * nan
if check_allna:
# xref 9422
# bottleneck >= 1.0 give 0.0 for an allna Series sum
try:
assert nanops._USE_BOTTLENECK
import bottleneck as bn # noqa
assert bn.__version__ >= LooseVersion('1.0')
assert f(allna) == 0.0
except:
assert np.isnan(f(allna))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# 2888
l = [0]
l.extend(lrange(2 ** 40, 2 ** 40 + 1000))
s = Series(l, dtype='int64')
assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
s = Series(bdate_range('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
assert res == exp
# check on string data
if name not in ['sum', 'min', 'max']:
pytest.raises(TypeError, f, Series(list('abc')))
# Invalid axis.
pytest.raises(ValueError, f, self.series, axis=1)
# Unimplemented numeric_only parameter.
if 'numeric_only' in compat.signature(f).args:
tm.assert_raises_regex(NotImplementedError, name, f,
self.series, numeric_only=True)
testit()
try:
import bottleneck as bn # noqa
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(func(self.ts).values,
func(np.array(self.ts)),
check_dtype=check_dtype)
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
tm.assert_numpy_array_equal(result.values, expected,
check_dtype=False)
def test_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7],
index=list('abcde'), name='foo')
expected = Series(s.values.compress(cond),
index=list('ac'), name='foo')
tm.assert_series_equal(s.compress(cond), expected)
def test_numpy_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7],
index=list('abcde'), name='foo')
expected = Series(s.values.compress(cond),
index=list('ac'), name='foo')
tm.assert_series_equal(np.compress(cond, s), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.compress,
cond, s, axis=1)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.compress,
cond, s, out=s)
def test_round(self):
self.ts.index.name = "index_name"
result = self.ts.round(2)
expected = Series(np.round(self.ts.values, 2),
index=self.ts.index, name='ts')
assert_series_equal(result, expected)
assert result.name == self.ts.name
def test_numpy_round(self):
# See gh-12600
s = Series([1.53, 1.36, 0.06])
out = np.round(s, decimals=0)
expected = Series([2., 1., 0.])
assert_series_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assert_raises_regex(ValueError, msg):
np.round(s, decimals=0, out=s)
def test_built_in_round(self):
if not compat.PY3:
pytest.skip(
'build in round cannot be overriden prior to Python 3')
s = Series([1.123, 2.123, 3.123], index=lrange(3))
result = round(s)
expected_rounded0 = Series([1., 2., 3.], index=lrange(3))
tm.assert_series_equal(result, expected_rounded0)
decimals = 2
expected_rounded = Series([1.12, 2.12, 3.12], index=lrange(3))
result = round(s, decimals)
tm.assert_series_equal(result, expected_rounded)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.], index=lrange(3))
result = s.prod()
assert not isinstance(result, Series)
def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
assert not bool_series.all()
assert bool_series.any()
# Alternative types, with implicit 'object' dtype.
s = Series(['abc', True])
assert 'abc' == s.any() # 'abc' || True => 'abc'
def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
assert s1.all(skipna=False) # nan && True => True
assert s1.all(skipna=True)
assert np.isnan(s2.any(skipna=False)) # nan || False => nan
assert not s2.any(skipna=True)
# Check level.
s = pd.Series([False, False, True, True, False, True],
index=[0, 0, 1, 1, 2, 2])
assert_series_equal(s.all(level=0), Series([False, True, False]))
assert_series_equal(s.any(level=0), Series([False, True, True]))
# bool_only is not implemented with level option.
pytest.raises(NotImplementedError, s.any, bool_only=True, level=0)
pytest.raises(NotImplementedError, s.all, bool_only=True, level=0)
# bool_only is not implemented alone.
pytest.raises(NotImplementedError, s.any, bool_only=True)
pytest.raises(NotImplementedError, s.all, bool_only=True)
def test_modulo(self):
with np.errstate(all='ignore'):
# GH3590, modulo as ints
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.nan
assert_series_equal(result, expected)
result = p['first'] % 0
expected = Series(np.nan, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
result2 = p['second'] % p['first']
assert not np.array_equal(result, result2)
# GH 9144
s = Series([0, 1])
result = s % 0
expected = Series([nan, nan])
assert_series_equal(result, expected)
result = 0 % s
expected = Series([nan, 0.0])
assert_series_equal(result, expected)
def test_ops_consistency_on_empty(self):
# GH 7869
# consistency on empty
# float
result = Series(dtype=float).sum()
assert result == 0
result = Series(dtype=float).mean()
assert isna(result)
result = Series(dtype=float).median()
assert isna(result)
# timedelta64[ns]
result = Series(dtype='m8[ns]').sum()
assert result == Timedelta(0)
result = Series(dtype='m8[ns]').mean()
assert result is pd.NaT
result = Series(dtype='m8[ns]').median()
assert result is pd.NaT
def test_corr(self):
tm._skip_if_no_scipy()
import scipy.stats as stats
# full overlap
tm.assert_almost_equal(self.ts.corr(self.ts), 1)
# partial overlap
tm.assert_almost_equal(self.ts[:15].corr(self.ts[5:]), 1)
assert isna(self.ts[:15].corr(self.ts[5:], min_periods=12))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
assert isna(ts1.corr(ts2, min_periods=12))
# No overlap
assert np.isnan(self.ts[::2].corr(self.ts[1::2]))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
assert isna(cp.corr(cp))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
tm.assert_almost_equal(result, expected)
def test_corr_rank(self):
tm._skip_if_no_scipy()
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
tm.assert_almost_equal(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
tm.assert_almost_equal(result, expected)
# these methods got rewritten in 0.8
if scipy.__version__ < LooseVersion('0.9'):
pytest.skip("skipping corr rank because of scipy version "
"{0}".format(scipy.__version__))
# results from R
A = Series(
[-0.89926396, 0.94209606, -1.03289164, -0.95445587, 0.76910310, -
0.06430576, -2.09704447, 0.40660407, -0.89926396, 0.94209606])
B = Series(
[-1.01270225, -0.62210117, -1.56895827, 0.59592943, -0.01680292,
1.17258718, -1.06009347, -0.10222060, -0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
tm.assert_almost_equal(A.corr(B, method='kendall'), kexp)
tm.assert_almost_equal(A.corr(B, method='spearman'), sexp)
def test_cov(self):
# full overlap
tm.assert_almost_equal(self.ts.cov(self.ts), self.ts.std() ** 2)
# partial overlap
tm.assert_almost_equal(self.ts[:15].cov(self.ts[5:]),
self.ts[5:15].std() ** 2)
# No overlap
assert np.isnan(self.ts[::2].cov(self.ts[1::2]))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
assert isna(cp.cov(cp))
# min_periods
assert isna(self.ts[:15].cov(self.ts[5:], min_periods=12))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
assert isna(ts1.cov(ts2, min_periods=12))
def test_count(self):
assert self.ts.count() == len(self.ts)
self.ts[::2] = np.NaN
assert self.ts.count() == np.isfinite(self.ts).sum()
mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]])
ts = Series(np.arange(len(mi)), index=mi)
left = ts.count(level=1)
right = Series([2, 3, 1], index=[1, 2, nan])
assert_series_equal(left, right)
ts.iloc[[0, 3, 5]] = nan
assert_series_equal(ts.count(level=1), right - 1)
def test_dot(self):
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
result = a.dot(b)
expected = Series(np.dot(a.values, b.values), index=['1', '2', '3'])
assert_series_equal(result, expected)
# Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_series_equal(result, expected)
# Check ndarray argument
result = a.dot(b.values)
assert np.all(result == expected.values)
assert_almost_equal(a.dot(b['2'].values), expected['2'])
# Check series argument
assert_almost_equal(a.dot(b['1']), expected['1'])
assert_almost_equal(a.dot(b2['1']), expected['1'])
pytest.raises(Exception, a.dot, a.values[:3])
pytest.raises(ValueError, a.dot, b.T)
def test_value_counts_nunique(self):
# basics.rst doc example
series = Series(np.random.randn(500))
series[20:500] = np.nan
series[10:20] = 5000
result = series.nunique()
assert result == 11
def test_unique(self):
# 714 also, dtype=float
s = Series([1.2345] * 100)
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
s = Series([1.2345] * 100, dtype='f4')
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
# NAs in object arrays #714
s = Series(['foo'] * 100, dtype='O')
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
# decision about None
s = Series([1, 2, 3, None, None, None], dtype=object)
result = s.unique()
expected = np.array([1, 2, 3, None], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_drop_duplicates(self):
# check both int and object
for s in [Series([1, 2, 3, 3]), Series(['1', '2', '3', '3'])]:
expected = Series([False, False, False, True])
assert_series_equal(s.duplicated(), expected)
assert_series_equal(s.drop_duplicates(), s[~expected])
sc = s.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, False, True, False])
assert_series_equal(s.duplicated(keep='last'), expected)
assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, False, True, True])
assert_series_equal(s.duplicated(keep=False), expected)
assert_series_equal(s.drop_duplicates(keep=False), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, s[~expected])
for s in [Series([1, 2, 3, 5, 3, 2, 4]),
Series(['1', '2', '3', '5', '3', '2', '4'])]:
expected = Series([False, False, False, False, True, True, False])
assert_series_equal(s.duplicated(), expected)
assert_series_equal(s.drop_duplicates(), s[~expected])
sc = s.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, True, True, False, False, False, False])
assert_series_equal(s.duplicated(keep='last'), expected)
assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, True, True, False, True, True, False])
assert_series_equal(s.duplicated(keep=False), expected)
assert_series_equal(s.drop_duplicates(keep=False), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, s[~expected])
def test_clip(self):
val = self.ts.median()
assert self.ts.clip_lower(val).min() == val
assert self.ts.clip_upper(val).max() == val
assert self.ts.clip(lower=val).min() == val
assert self.ts.clip(upper=val).max() == val
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
assert_series_equal(result, expected)
assert isinstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [Series([np.nan, 1.0, 2.0, 3.0]), Series([None, 'a', 'b', 'c']),
Series(pd.to_datetime(
[np.nan, 1, 2, 3], unit='D'))]
for s in sers:
thresh = s[2]
l = s.clip_lower(thresh)
u = s.clip_upper(thresh)
assert l[notna(l)].min() == thresh
assert u[notna(u)].max() == thresh
assert list(isna(s)) == list(isna(l))
assert list(isna(s)) == list(isna(u))
def test_clip_against_series(self):
# GH #6966
s = Series([1.0, 1.0, 4.0])
threshold = Series([1.0, 2.0, 3.0])
assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0]))
assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0]))
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
def test_clip_against_list_like(self, inplace, upper):
# GH #15390
original = pd.Series([5, 6, 7])
result = original.clip(upper=upper, inplace=inplace)
expected = pd.Series([1, 2, 3])
if inplace:
result = original
tm.assert_series_equal(result, expected, check_exact=True)
def test_clip_with_datetimes(self):
# GH 11838
# naive and tz-aware datetimes
t = Timestamp('2015-12-01 09:30:30')
s = Series([Timestamp('2015-12-01 09:30:00'),
Timestamp('2015-12-01 09:31:00')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00'),
Timestamp('2015-12-01 09:30:30')])
assert_series_equal(result, expected)
t = Timestamp('2015-12-01 09:30:30', tz='US/Eastern')
s = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:31:00', tz='US/Eastern')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:30:30', tz='US/Eastern')])
assert_series_equal(result, expected)
def test_cummethods_bool(self):
# GH 6270
# looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2
def cummin(x):
return np.minimum.accumulate(x)
def cummax(x):
return np.maximum.accumulate(x)
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {'cumsum': np.cumsum,
'cumprod': np.cumprod,
'cummin': cummin,
'cummax': cummax}
args = product((a, b, c, d), methods)
for s, method in args:
expected = Series(methods[method](s.values))
result = getattr(s, method)()
assert_series_equal(result, expected)
e = pd.Series([False, True, nan, False])
cse = pd.Series([0, 1, nan, 1], dtype=object)
cpe = pd.Series([False, 0, nan, 0])
cmin = pd.Series([False, False, nan, False])
cmax = pd.Series([False, True, nan, True])
expecteds = {'cumsum': cse,
'cumprod': cpe,
'cummin': cmin,
'cummax': cmax}
for method in methods:
res = getattr(e, method)()
assert_series_equal(res, expecteds[method])
def test_isin(self):
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
result = s.isin(['A', 'C'])
expected = Series([True, False, True, False, False, False, True, True])
assert_series_equal(result, expected)
# GH: 16012
# This specific issue has to have a series over 1e6 in len, but the
# comparison array (in_list) must be large enough so that numpy doesn't
# do a manual masking trick that will avoid this issue altogether
s = Series(list('abcdefghijk' * 10 ** 5))
# If numpy doesn't do the manual comparison/mask, these
# unorderable mixed types are what cause the exception in numpy
in_list = [-1, 'a', 'b', 'G', 'Y', 'Z', 'E',
'K', 'E', 'S', 'I', 'R', 'R'] * 6
assert s.isin(in_list).sum() == 200000
def test_isin_with_string_scalar(self):
# GH4763
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
with pytest.raises(TypeError):
s.isin('a')
with pytest.raises(TypeError):
s = Series(['aaa', 'b', 'c'])
s.isin('aaa')
def test_isin_with_i8(self):
# GH 5021
expected = Series([True, True, False, False, False])
expected2 = Series([False, True, False, False, False])
# datetime64[ns]
s = Series(date_range('jan-01-2013', 'jan-05-2013'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(s[0:2].values.astype('datetime64[D]'))
assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
assert_series_equal(result, expected2)
result = s.isin(set(s[0:2]))
assert_series_equal(result, expected)
# timedelta64[ns]
s = Series(pd.to_timedelta(lrange(5), unit='d'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
s = Series(["a", "b"])
expected = Series([False, False])
result = s.isin(empty)
tm.assert_series_equal(expected, result)
def test_timedelta64_analytics(self):
from pandas import date_range
# index min/max
td = Series(date_range('2012-1-1', periods=3, freq='D')) - \
Timestamp('20120101')
result = td.idxmin()
assert result == 0
result = td.idxmax()
assert result == 2
# GH 2982
# with NaT
td[0] = np.nan
result = td.idxmin()
assert result == 1
result = td.idxmax()
assert result == 2
# abs
s1 = Series(date_range('20120101', periods=3))
s2 = Series(date_range('20120102', periods=3))
expected = Series(s2 - s1)
# this fails as numpy returns timedelta64[us]
# result = np.abs(s1-s2)
# assert_frame_equal(result,expected)
result = (s1 - s2).abs()
assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta('2 days')
assert result == expected
result = td.min()
expected = Timedelta('1 days')
assert result == expected
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isna check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
assert self.series[self.series.idxmin()] == self.series.min()
assert isna(self.series.idxmin(skipna=False))
# no NaNs
nona = self.series.dropna()
assert nona[nona.idxmin()] == nona.min()
assert (nona.index.values.tolist().index(nona.idxmin()) ==
nona.values.argmin())
# all NaNs
allna = self.series * nan
assert isna(allna.idxmin())
# datetime64[ns]
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmin()
assert result == 0
s[0] = np.nan
result = s.idxmin()
assert result == 1
def test_numpy_argmin(self):
# argmin is aliased to idxmin
data = np.random.randint(0, 11, size=10)
result = np.argmin(Series(data))
assert result == np.argmin(data)
if not _np_version_under1p10:
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argmin,
Series(data), out=data)
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isna check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
assert self.series[self.series.idxmax()] == self.series.max()
assert isna(self.series.idxmax(skipna=False))
# no NaNs
nona = self.series.dropna()
assert nona[nona.idxmax()] == nona.max()
assert (nona.index.values.tolist().index(nona.idxmax()) ==
nona.values.argmax())
# all NaNs
allna = self.series * nan
assert isna(allna.idxmax())
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmax()
assert result == 5
s[5] = np.nan
result = s.idxmax()
assert result == 4
# Float64Index
# GH 5914
s = pd.Series([1, 2, 3], [1.1, 2.1, 3.1])
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
s = pd.Series(s.index, s.index)
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
def test_numpy_argmax(self):
# argmax is aliased to idxmax
data = np.random.randint(0, 11, size=10)
result = np.argmax(Series(data))
assert result == np.argmax(data)
if not _np_version_under1p10:
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argmax,
Series(data), out=data)
def test_ptp(self):
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
assert np.ptp(ser) == np.ptp(arr)
# GH11163
s = Series([3, 5, np.nan, -3, 10])
assert s.ptp() == 13
assert pd.isna(s.ptp(skipna=False))
mi = pd.MultiIndex.from_product([['a', 'b'], [1, 2, 3]])
s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi)
expected = pd.Series([6, 2], index=['a', 'b'], dtype=np.float64)
tm.assert_series_equal(s.ptp(level=0), expected)
expected = pd.Series([np.nan, np.nan], index=['a', 'b'])
tm.assert_series_equal(s.ptp(level=0, skipna=False), expected)
with pytest.raises(ValueError):
s.ptp(axis=1)
s = pd.Series(['a', 'b', 'c', 'd', 'e'])
with pytest.raises(TypeError):
s.ptp()
with pytest.raises(NotImplementedError):
s.ptp(numeric_only=True)
def test_empty_timeseries_redections_return_nat(self):
# covers #11245
for dtype in ('m8[ns]', 'm8[ns]', 'M8[ns]', 'M8[ns, UTC]'):
assert Series([], dtype=dtype).min() is pd.NaT
assert Series([], dtype=dtype).max() is pd.NaT
def test_unique_data_ownership(self):
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_repeat(self):
s = Series(np.random.randn(3), index=['a', 'b', 'c'])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
assert_series_equal(reps, exp)
with tm.assert_produces_warning(FutureWarning):
result = s.repeat(reps=5)
assert_series_equal(result, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep),
index=s.index.values.repeat(to_rep))
assert_series_equal(reps, exp)
def test_numpy_repeat(self):
s = Series(np.arange(3), name='x')
expected = Series(s.values.repeat(2), name='x',
index=s.index.values.repeat(2))
assert_series_equal(np.repeat(s, 2), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.repeat, s, 2, axis=0)
def test_searchsorted(self):
s = Series([1, 2, 3])
idx = s.searchsorted(1, side='left')
tm.assert_numpy_array_equal(idx, np.array([0], dtype=np.intp))
idx = s.searchsorted(1, side='right')
tm.assert_numpy_array_equal(idx, np.array([1], dtype=np.intp))
with tm.assert_produces_warning(FutureWarning):
idx = s.searchsorted(v=1, side='left')
tm.assert_numpy_array_equal(idx, np.array([0], dtype=np.intp))
def test_searchsorted_numeric_dtypes_scalar(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted(30)
e = 2
assert r == e
r = s.searchsorted([30])
e = np.array([2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_numeric_dtypes_vector(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted([91, 2e6])
e = np.array([3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_search_sorted_datetime64_scalar(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = pd.Timestamp('20120102')
r = s.searchsorted(v)
e = 1
assert r == e
def test_search_sorted_datetime64_list(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]
r = s.searchsorted(v)
e = np.array([1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_sorter(self):
# GH8490
s = Series([3, 1, 2])
r = s.searchsorted([0, 3], sorter=np.argsort(s))
e = np.array([0, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_is_unique(self):
# GH11946
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_unique
s = Series(np.arange(1000))
assert s.is_unique
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_monotonic
s = Series(np.arange(1000))
assert s.is_monotonic
assert s.is_monotonic_increasing
s = Series(np.arange(1000, 0, -1))
assert s.is_monotonic_decreasing
s = Series(pd.date_range('20130101', periods=10))
assert s.is_monotonic
assert s.is_monotonic_increasing
s = Series(list(reversed(s.tolist())))
assert not s.is_monotonic
assert s.is_monotonic_decreasing
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sort_index(level='A')
assert_series_equal(backwards, res)
res = s.sort_index(level=['A', 'B'])
assert_series_equal(backwards, res)
res = s.sort_index(level='A', sort_remaining=False)
assert_series_equal(s, res)
res = s.sort_index(level=['A', 'B'], sort_remaining=False)
assert_series_equal(s, res)
def test_apply_categorical(self):
values = pd.Categorical(list('ABBABCD'), categories=list('DCBA'),
ordered=True)
s = pd.Series(values, name='XX', index=list('abcdefg'))
result = s.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list('abbabcd'), categories=list('dcba'),
ordered=True)
exp = pd.Series(values, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = s.apply(lambda x: 'A')
exp = pd.Series(['A'] * 7, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
def test_shift_int(self):
ts = self.ts.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
def test_shift_categorical(self):
# GH 9416
s = pd.Series(['a', 'b', 'c', 'd'], dtype='category')
assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).valid())
sp1 = s.shift(1)
assert_index_equal(s.index, sp1.index)
assert np.all(sp1.values.codes[:1] == -1)
assert np.all(s.values.codes[:-1] == sp1.values.codes[1:])
sn2 = s.shift(-2)
assert_index_equal(s.index, sn2.index)
assert np.all(sn2.values.codes[-2:] == -1)
assert np.all(s.values.codes[2:] == sn2.values.codes[:-2])
assert_index_equal(s.values.categories, sp1.values.categories)
assert_index_equal(s.values.categories, sn2.values.categories)
def test_reshape_deprecate(self):
x = Series(np.random.random(10), name='x')
tm.assert_produces_warning(FutureWarning, x.reshape, x.shape)
def test_reshape_non_2d(self):
# see gh-4554
with tm.assert_produces_warning(FutureWarning):
x = Series(np.random.random(201), name='x')
assert x.reshape(x.shape, ) is x
# see gh-2719
with tm.assert_produces_warning(FutureWarning):
a = Series([1, 2, 3, 4])
result = a.reshape(2, 2)
expected = a.values.reshape(2, 2)
tm.assert_numpy_array_equal(result, expected)
assert isinstance(result, type(expected))
def test_reshape_2d_return_array(self):
x = Series(np.random.random(201), name='x')
with tm.assert_produces_warning(FutureWarning):
result = x.reshape((-1, 1))
assert not isinstance(result, Series)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result2 = np.reshape(x, (-1, 1))
assert not isinstance(result2, Series)
with tm.assert_produces_warning(FutureWarning):
result = x[:, None]
expected = x.reshape((-1, 1))
tm.assert_almost_equal(result, expected)
def test_reshape_bad_kwarg(self):
a = Series([1, 2, 3, 4])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = "'foo' is an invalid keyword argument for this function"
tm.assert_raises_regex(
TypeError, msg, a.reshape, (2, 2), foo=2)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = r"reshape\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(
TypeError, msg, a.reshape, a.shape, foo=2)
def test_numpy_reshape(self):
a = Series([1, 2, 3, 4])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.reshape(a, (2, 2))
expected = a.values.reshape(2, 2)
tm.assert_numpy_array_equal(result, expected)
assert isinstance(result, type(expected))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.reshape(a, a.shape)
tm.assert_series_equal(result, a)
def test_unstack(self):
from numpy import nan
index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],
labels=[[1, 1, 0, 0], [0, 1, 0, 2]])
s = Series(np.arange(4.), index=index)
unstacked = s.unstack()
expected = DataFrame([[2., nan, 3.], [0., 1., nan]],
index=['bar', 'foo'],
columns=['one', 'three', 'two'])
assert_frame_equal(unstacked, expected)
unstacked = s.unstack(level=0)
assert_frame_equal(unstacked, expected.T)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],
labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]])
expected = DataFrame({'bar': s.values},
index=exp_index).sort_index(level=0)
unstacked = s.unstack(0).sort_index()
assert_frame_equal(unstacked, expected)
# GH5873
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
ts = pd.Series([1, 2], index=idx)
left = ts.unstack()
right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],
columns=[nan, 3.5])
assert_frame_equal(left, right)
idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'
], ['a', 'a', 'b', 'a', 'b'],
[1, 2, 1, 1, np.nan]])
ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],
columns=['cat', 'dog'])
tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]
right.index = pd.MultiIndex.from_tuples(tpls)
assert_frame_equal(ts.unstack(level=0), right)
def test_value_counts_datetime(self):
# most dtypes are tested in test_base.py
values = [pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00'),
pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 11:00')]
exp_idx = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 11:00',
'2011-01-01 10:00'])
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.DatetimeIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_datetime_tz(self):
values = [pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 11:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 11:00', tz='US/Eastern')]
exp_idx = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 11:00',
'2011-01-01 10:00'], tz='US/Eastern')
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
idx = pd.DatetimeIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_period(self):
values = [pd.Period('2011-01', freq='M'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-01', freq='M'),
pd.Period('2011-01', freq='M'),
pd.Period('2011-03', freq='M')]
exp_idx = pd.PeriodIndex(['2011-01', '2011-03', '2011-02'], freq='M')
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.PeriodIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_ordered(self):
# most dtypes are tested in test_base.py
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=True)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3],
ordered=True)
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_not_ordered(self):
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=False)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3],
ordered=False)
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
@pytest.fixture
def s_main_dtypes():
df = pd.DataFrame(
{'datetime': pd.to_datetime(['2003', '2002',
'2001', '2002',
'2005']),
'datetimetz': pd.to_datetime(
['2003', '2002',
'2001', '2002',
'2005']).tz_localize('US/Eastern'),
'timedelta': pd.to_timedelta(['3d', '2d', '1d',
'2d', '5d'])})
for dtype in ['int8', 'int16', 'int32', 'int64',
'float32', 'float64',
'uint8', 'uint16', 'uint32', 'uint64']:
df[dtype] = Series([3, 2, 1, 2, 5], dtype=dtype)
return df
class TestNLargestNSmallest(object):
@pytest.mark.parametrize(
"r", [Series([3., 2, 1, 2, '5'], dtype='object'),
Series([3., 2, 1, 2, 5], dtype='object'),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3., 2, 1, 2, 5], dtype='complex128'),
Series(list('abcde'), dtype='category'),
Series(list('abcde'))])
def test_error(self, r):
dt = r.dtype
msg = ("Cannot use method 'n(larg|small)est' with "
"dtype {dt}".format(dt=dt))
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
with tm.assert_raises_regex(TypeError, msg):
method(arg)
@pytest.mark.parametrize(
"s",
[v for k, v in s_main_dtypes().iteritems()])
def test_nsmallest_nlargest(self, s):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
assert_series_equal(s.nsmallest(-1), empty)
assert_series_equal(s.nlargest(0), empty)
assert_series_equal(s.nlargest(-1), empty)
assert_series_equal(s.nsmallest(len(s)), s.sort_values())
assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
assert_series_equal(s.nlargest(len(s) + 1),
s.iloc[[4, 0, 1, 3, 2]])
def test_misc(self):
s = Series([3., np.nan, 1, 2, 5])
assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
msg = 'keep must be either "first", "last"'
with tm.assert_raises_regex(ValueError, msg):
s.nsmallest(keep='invalid')
with tm.assert_raises_regex(ValueError, msg):
s.nlargest(keep='invalid')
# GH 15297
s = Series([1] * 5, index=[1, 2, 3, 4, 5])
expected_first = Series([1] * 3, index=[1, 2, 3])
expected_last = Series([1] * 3, index=[5, 4, 3])
result = s.nsmallest(3)
assert_series_equal(result, expected_first)
result = s.nsmallest(3, keep='last')
assert_series_equal(result, expected_last)
result = s.nlargest(3)
assert_series_equal(result, expected_first)
result = s.nlargest(3, keep='last')
assert_series_equal(result, expected_last)
@pytest.mark.parametrize('n', range(1, 5))
def test_n(self, n):
# GH 13412
s = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
result = s.nlargest(n)
expected = s.sort_values(ascending=False).head(n)
assert_series_equal(result, expected)
result = s.nsmallest(n)
expected = s.sort_values().head(n)
assert_series_equal(result, expected)
|
mderanty/Projet_Insset | refs/heads/master | vendor/doctrine/orm/docs/en/conf.py | 2448 | # -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
jfdsmit/django-blog-zinnia | refs/heads/master | docs/conf.py | 1 | # -*- coding: utf-8 -*-
#
# django-blog-zinnia documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 21 17:44:20 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import date
sys.path.append(os.path.abspath('..'))
import zinnia
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Blog Zinnia'
copyright = u'%s, %s' % (date.today().year, zinnia.__author__)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = zinnia.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-blog-zinniadoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-blog-zinnia.tex', u'django-blog-zinnia Documentation',
u'Fantomas42', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-blog-zinnia', u'django-blog-zinnia Documentation',
[u'Fantomas42'], 1)
]
|
GeoMop/GeoMop | refs/heads/master | testing/LayerEditor/test_layer_editor.py | 1 | import LayerEditor.layer_editor as layer_editor
from PyQt5.QtTest import QTest
from PyQt5.QtCore import QTimer, Qt
import testing.LayerEditor.mock.mock_config as mockcfg
import pytest
import pytestqt
import sys
@pytest.mark.qt
def test_err_dialog():
global dialog_result, editor
dialog_result = {}
mockcfg.set_empty_config()
editor = layer_editor.LayerEditor(False)
timer = QTimer(editor.mainwindow)
timer.timeout.connect(lambda: start_dialog(editor))
print("start timer")
timer.start(100)
print("before main")
editor.main()
print("after main")
print("Delete fixture app: ", str(editor._app))
del editor._app
del editor
#sys.exit(0)
assert dialog_result['title'] == "GeoMop Layer Editor - New File"
assert dialog_result['closed_window'] is True
def start_dialog(editor):
print("start_dialog")
global dialog_result
dialog_result['title'] = editor.mainwindow.windowTitle()
# simulate press of 'Ctrl-Q'
QTest.keyPress(editor.mainwindow, Qt.Key_Q, Qt.ControlModifier)
dialog_result['closed_window'] = editor.mainwindow.close()
editor._app.quit()
# def test_region_panel(qtbot, qapp):
# """
# Proof of concept for testing Layer editor GUI.
# :param qtbot: pytest-qt, qtbot to test widgets
# :param qapp: common QApplication instance for all tests.
# :return:
# """
|
MrReN/django-oscar | refs/heads/master | sites/demo/settings.py | 1 | """
Settings for Oscar's demo site.
Notes:
* The demo site uses the stores extension which requires a spatial database.
Only the postgis and spatialite backends are tested, but all backends
supported by GeoDjango should work.
"""
import os
# Django settings for oscar project.
PROJECT_DIR = os.path.dirname(__file__)
location = lambda x: os.path.join(
os.path.dirname(os.path.realpath(__file__)), x)
DEBUG = True
TEMPLATE_DEBUG = True
SQL_DEBUG = True
ADMINS = (
)
EMAIL_SUBJECT_PREFIX = '[Oscar demo] '
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ALLOWED_HOSTS = ['demo.oscarcommerce.com',
'demo.oscar.tangentlabs.co.uk']
MANAGERS = ADMINS
# Use settings_local to override this default
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'oscar_demo',
'USER': 'm',
},
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
LANGUAGES = (
('en-gb', 'English'),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = location("public/media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
#ADMIN_MEDIA_PREFIX = '/media/admin/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
location('static'),
)
STATIC_ROOT = location('public/static')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$)a7n&o80u!6y5t-+jrd3)3!%vh&shg$wqpjpxc!ar&p#!)n1a'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# needed by django-treebeard for admin (and potentially other libs)
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
'oscar.apps.customer.notifications.context_processors.notifications',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
ROOT_URLCONF = 'urls'
from oscar import OSCAR_MAIN_TEMPLATE_DIR
TEMPLATE_DIRS = (
location('templates'),
OSCAR_MAIN_TEMPLATE_DIR,
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'checkout_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'checkout.log',
'formatter': 'verbose'
},
'error_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'errors.log',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
},
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['mail_admins', 'error_file'],
'level': 'ERROR',
'propagate': False,
},
'oscar.checkout': {
'handlers': ['console', 'checkout_file'],
'propagate': True,
'level': 'INFO',
},
'datacash': {
'handlers': ['console'],
'propagate': True,
'level': 'INFO',
},
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
'level': 'DEBUG',
},
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'django.contrib.gis',
# Oscar dependencies
'compressor',
'south',
# Oscar extensions
'stores',
'paypal',
'datacash',
# External apps
'django_extensions',
'debug_toolbar',
# For profile testing
'apps.user',
'apps.bigbang',
# Sentry (for live demo site)
'raven.contrib.django.raven_compat'
]
# Include core apps with a few overrides:
# - a shipping override app to provide some shipping methods
# - an order app to provide order processing logic
from oscar import get_core_apps
INSTALLED_APPS = INSTALLED_APPS + get_core_apps(
['apps.shipping', 'apps.order'])
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.Emailbackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_REDIRECT_URL = '/'
APPEND_SLASH = True
# Haystack settings - we use a local Solr instance running on the default port
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://127.0.0.1:8983/solr',
},
}
AUTH_PROFILE_MODULE = 'user.Profile'
# Oscar settings
from oscar.defaults import *
OSCAR_RECENTLY_VIEWED_PRODUCTS = 20
OSCAR_ALLOW_ANON_CHECKOUT = True
OSCAR_SHOP_NAME = 'Oscar'
OSCAR_SHOP_TAGLINE = 'Demo'
COMPRESS_ENABLED = False
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
THUMBNAIL_KEY_PREFIX = 'oscar-demo'
LOG_ROOT = location('logs')
# Ensure log root exists
if not os.path.exists(LOG_ROOT):
os.mkdir(LOG_ROOT)
DISPLAY_VERSION = False
USE_TZ = True
# Must be within MEDIA_ROOT for sorl to work
OSCAR_MISSING_IMAGE_URL = 'image_not_found.jpg'
GOOGLE_ANALYTICS_ID = 'UA-45363517-4'
# Add stores node to navigation
new_nav = OSCAR_DASHBOARD_NAVIGATION
new_nav.append(
{
'label': 'Stores',
'icon': 'icon-shopping-cart',
'children': [
{
'label': 'Stores',
'url_name': 'stores-dashboard:store-list',
},
{
'label': 'Store groups',
'url_name': 'stores-dashboard:store-group-list',
},
]
})
new_nav.append(
{
'label': 'Datacash',
'icon': 'icon-globe',
'children': [
{
'label': 'Transactions',
'url_name': 'datacash-transaction-list',
},
]
})
OSCAR_DASHBOARD_NAVIGATION = new_nav
GEOIP_PATH = os.path.join(os.path.dirname(__file__), 'geoip')
#default currency for django-oscar-datacash
DATACASH_CURRENCY = "GBP"
try:
from settings_local import *
except ImportError:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.