repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
MichSchli/QuestionAnsweringGCN
|
refs/heads/master
|
models/tensorflow_components/loss_functions/__init__.py
|
12133432
| |
vladmm/intellij-community
|
refs/heads/master
|
python/testData/addImport/newFirstImportInProjectGroupWithExistingBlankLineAbove/a.py
|
12133432
| |
ximion/dak
|
refs/heads/master
|
daklib/architecture.py
|
5
|
"""architecture matching
@copyright: 2014, Ansgar Burchardt <ansgar@debian.org>
@license: GPL-2+
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
def _load_table(path):
table = []
with open(path, 'r') as fh:
for line in fh:
if not line or line.startswith('#'):
continue
table.append(line.split())
return table
_cached_cputable = None
def _cputable():
global _cached_cputable
if _cached_cputable is None:
_cached_cputable = _load_table('/usr/share/dpkg/cputable')
return _cached_cputable
_cached_arch2triplet = None
_cached_triplet2arch = None
def _triplettable():
global _cached_arch2triplet, _cached_triplet2arch
if _cached_arch2triplet is None or _cached_triplet2arch is None:
table = _load_table('/usr/share/dpkg/triplettable')
arch2triplet = {}
triplet2arch = {}
for row in table:
if '<cpu>' in row[0] or '<cpu>' in row[1]:
for cpu in _cputable():
replaced_row = [ column.replace('<cpu>', cpu[0]) for column in row ]
arch2triplet[replaced_row[1]] = replaced_row[0]
triplet2arch[replaced_row[0]] = replaced_row[1]
else:
arch2triplet[row[1]] = row[0]
triplet2arch[row[0]] = row[1]
_cached_arch2triplet = arch2triplet
_cached_triplet2arch = triplet2arch
return _cached_triplet2arch, _cached_arch2triplet
class InvalidArchitecture(Exception):
pass
def Debian_arch_to_Debian_triplet(arch):
parts = arch.split('-')
# Handle architecture wildcards
if 'any' in parts:
if len(parts) == 3:
return parts
elif len(parts) == 2:
return 'any', parts[0], parts[1]
else:
return 'any', 'any', 'any'
if len(parts) == 2 and parts[0] == 'linux':
arch = parts[1]
triplet = _triplettable()[1].get(arch, None)
if triplet is None:
return None
return triplet.split('-', 2)
def match_architecture(arch, wildcard):
# 'all' has no valid triplet
if arch == 'all' or wildcard == 'all':
return arch == wildcard
if wildcard is 'any' or arch == wildcard:
return True
triplet_arch = Debian_arch_to_Debian_triplet(arch)
triplet_wildcard = Debian_arch_to_Debian_triplet(wildcard)
if triplet_arch is None or len(triplet_arch) != 3:
raise InvalidArchitecture('{0} is not a valid architecture name'.format(arch))
if triplet_wildcard is None or len(triplet_wildcard) != 3:
raise InvalidArchitecture('{0} is not a valid architecture name or wildcard'.format(wildcard))
for i in range(0,3):
if triplet_arch[i] != triplet_wildcard[i] and triplet_wildcard[i] != 'any':
return False
return True
|
shead-custom-design/pipecat
|
refs/heads/master
|
features/steps/common.py
|
1
|
# Copyright 2016 Timothy M. Shead
#
# This file is part of Pipecat.
#
# Pipecat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pipecat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pipecat. If not, see <http://www.gnu.org/licenses/>.
# Copyright 2016 Timothy M. Shead
import io
import os
from behave import *
import arrow
import nose.tools
import six
import pipecat.store
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "data"))
reference_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "reference"))
@given(u'a file named {filename}.')
def step_impl(context, filename):
context.pipe = open(os.path.join(data_dir, filename), "rb")
@given(u'an instance of pipecat.store.cache.')
def step_impl(context):
context.pipe = pipecat.store.cache(context.pipe)
@then(u'the pipe can be iterated to completion.')
def step_impl(context):
for record in context.pipe:
pass
@when(u'iterating through the pipe contents.')
def step_impl(context):
context.records = []
for record in context.pipe:
context.records.append(record)
@then(u'{count} records will be returned.')
def step_impl(context, count):
nose.tools.assert_equal(len(context.records), int(count))
@then(u'every record will contain a {key} key with a string value.')
def step_impl(context, key):
key = eval(key)
for record in context.records:
nose.tools.assert_in(key, record)
nose.tools.assert_is_instance(record[key], six.string_types)
@then(u'every record will contain a {key} key with a bytes value.')
def step_impl(context, key):
key = eval(key)
for record in context.records:
nose.tools.assert_in(key, record)
nose.tools.assert_is_instance(record[key], six.binary_type)
@then(u'every record will contain a {key} key with an arrow value.')
def step_impl(context, key):
key = eval(key)
for record in context.records:
nose.tools.assert_in(key, record)
nose.tools.assert_is_instance(record[key], arrow.arrow.Arrow)
@then(u'every record will contain a {key} key with an address value.')
def step_impl(context, key):
key = eval(key)
for record in context.records:
nose.tools.assert_in(key, record)
nose.tools.assert_is_instance(record[key], tuple)
nose.tools.assert_equal(len(record[key]), 2)
nose.tools.assert_is_instance(record[key][0], six.string_types)
nose.tools.assert_is_instance(record[key][1], int)
@given(u'a pyserial connection.')
def step_impl(context):
import serial
context.pipe = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000)
@given(u'a string stream.')
def step_impl(context):
context.stream = io.StringIO()
@then(u'the stream contents will match {filename}')
def step_impl(context, filename):
with open(os.path.join(reference_dir, filename), "r") as reference:
nose.tools.assert_equal(reference.read(), context.stream.getvalue())
|
starlightme/python
|
refs/heads/master
|
renzongxian/0024/mysite/todolist/__init__.py
|
12133432
| |
takesxi-shimada/livehousemap
|
refs/heads/master
|
livehousemap/livehouse/models.py
|
1
|
from django.db.models import (
Model,
ManyToManyField,
)
from django.db.models.fields import (
CharField,
FloatField,
)
# Create your models here.
class House(Model):
name = CharField(max_length=1024)
lat = FloatField()
lon = FloatField()
def __unicode__(self):
return self.name
class Landmark(Model):
name = CharField(max_length=1024)
lat = FloatField()
lon = FloatField()
houses = ManyToManyField(House, null=False, blank=True)
def __unicode__(self):
return self.name
|
Donkyhotay/MoonPy
|
refs/heads/master
|
twisted/names/test/test_srvconnect.py
|
53
|
# Copyright (c) 2007-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.names.srvconnect}.
"""
from twisted.internet import defer, protocol
from twisted.names import client, dns, srvconnect
from twisted.names.common import ResolverBase
from twisted.names.error import DNSNameError
from twisted.internet.error import DNSLookupError
from twisted.trial import unittest
from twisted.test.proto_helpers import MemoryReactor
class FakeResolver(ResolverBase):
"""
Resolver that only gives out one given result.
Either L{results} or L{failure} must be set and will be used for
the return value of L{_lookup}
@ivar results: List of L{dns.RRHeader} for the desired result.
@type results: C{list}
@ivar failure: Failure with an exception from L{twisted.names.error}.
@type failure: L{Failure<twisted.python.failure.Failure>}
"""
def __init__(self, results=None, failure=None):
self.results = results
self.failure = failure
def _lookup(self, name, cls, qtype, timeout):
"""
Return the result or failure on lookup.
"""
if self.results is not None:
return defer.succeed((self.results, [], []))
else:
return defer.fail(self.failure)
class DummyFactory(protocol.ClientFactory):
"""
Dummy client factory that stores the reason of connection failure.
"""
def __init__(self):
self.reason = None
def clientConnectionFailed(self, connector, reason):
self.reason = reason
class SRVConnectorTest(unittest.TestCase):
def setUp(self):
self.patch(client, 'theResolver', FakeResolver())
self.reactor = MemoryReactor()
self.factory = DummyFactory()
self.connector = srvconnect.SRVConnector(self.reactor, 'xmpp-server',
'example.org', self.factory)
def test_SRVPresent(self):
"""
Test connectTCP gets called with the address from the SRV record.
"""
payload = dns.Record_SRV(port=6269, target='host.example.org', ttl=60)
client.theResolver.results = [dns.RRHeader(name='example.org',
type=dns.SRV,
cls=dns.IN, ttl=60,
payload=payload)]
self.connector.connect()
self.assertIdentical(None, self.factory.reason)
self.assertEquals(
self.reactor.tcpClients.pop()[:2], ('host.example.org', 6269))
def test_SRVNotPresent(self):
"""
Test connectTCP gets called with fallback parameters on NXDOMAIN.
"""
client.theResolver.failure = DNSNameError('example.org')
self.connector.connect()
self.assertIdentical(None, self.factory.reason)
self.assertEquals(
self.reactor.tcpClients.pop()[:2], ('example.org', 'xmpp-server'))
def test_SRVNoResult(self):
"""
Test connectTCP gets called with fallback parameters on empty result.
"""
client.theResolver.results = []
self.connector.connect()
self.assertIdentical(None, self.factory.reason)
self.assertEquals(
self.reactor.tcpClients.pop()[:2], ('example.org', 'xmpp-server'))
def test_SRVBadResult(self):
"""
Test connectTCP gets called with fallback parameters on bad result.
"""
client.theResolver.results = [dns.RRHeader(name='example.org',
type=dns.CNAME,
cls=dns.IN, ttl=60,
payload=None)]
self.connector.connect()
self.assertIdentical(None, self.factory.reason)
self.assertEquals(
self.reactor.tcpClients.pop()[:2], ('example.org', 'xmpp-server'))
def test_SRVNoService(self):
"""
Test that connecting fails when no service is present.
"""
payload = dns.Record_SRV(port=5269, target='.', ttl=60)
client.theResolver.results = [dns.RRHeader(name='example.org',
type=dns.SRV,
cls=dns.IN, ttl=60,
payload=payload)]
self.connector.connect()
self.assertNotIdentical(None, self.factory.reason)
self.factory.reason.trap(DNSLookupError)
self.assertEquals(self.reactor.tcpClients, [])
|
blrm/openshift-tools
|
refs/heads/stg
|
openshift_tools/monitoring/hawk_sender.py
|
13
|
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
Collect metrics and send metrics to Hawk. The data
being sent to Hawk is done using REST API using the HawkClient
module
Examples:
from openshift_tools.monitoring.hawk_common import HawkConnection, HawkHeartbeat
from openshift_tools.monitoring.hawk_sender import HawkSender
HOSTNAME = 'host.example.com'
ZAGGCONN = HawkConnection(url='https://172.17.0.151', user='admin', password='pass')
ZAGGHEARTBEAT = HawkHeartbeat(templates=['template1', 'template2'], hostgroups=['hostgroup1', 'hostgroup2'])
zs = HawkSender(host=HOSTNAME, hawk_connection=ZAGGCONN)
zs.add_heartbeat(ZAGGHEARTBEAT)
zs.add_zabbix_keys({ 'test.key' : '1' })
zs.send_metrics()
"""
import re
from openshift_tools.monitoring.metricmanager import UniqueMetric
from openshift_tools.monitoring.hawk_client import HawkClient
from openshift_tools.monitoring.hawk_common import HawkConnection
from openshift_tools.monitoring.generic_metric_sender import GenericMetricSender
class HawkSender(GenericMetricSender):
"""
collect and create UniqueMetrics and send them to Hawk
"""
# Allow for 6 arguments (including 'self')
# pylint: disable=too-many-arguments
def __init__(self, host=None, hawk_connection=None, verbose=False, debug=False, config_file=None):
"""
set up the hawk client and unique_metrics
"""
super(HawkSender, self).__init__()
if not config_file:
config_file = '/etc/openshift_tools/metric_sender.yaml'
self.config_file = config_file
self.unique_metrics = []
self.verbose = verbose
self.debug = debug
if not host:
host = self.get_default_host()
if not hawk_connection:
hawk_connection = self._get_default_hawk_connection()
self.host = host
self.hawkclient = HawkClient(hawk_connection=hawk_connection)
def get_default_host(self):
""" get the 'host' value from the config file """
self.parse_config()
return self.config['host']['name']
def _get_default_hawk_connection(self):
""" get the values and create a hawk_connection """
self.parse_config()
hawk_server = self.config['hawk']['url']
hawk_user = self.config['hawk']['user']
hawk_password = self.config['hawk']['pass']
hawk_ssl_verify = self.config['hawk'].get('ssl_verify', False)
hawk_debug = self.config['hawk'].get('debug', False)
hawk_active = self.config['hawk'].get('active', True)
if isinstance(hawk_ssl_verify, str):
hawk_ssl_verify = (hawk_ssl_verify == 'True')
if self.debug:
hawk_debug = self.debug
elif isinstance(hawk_debug, str):
hawk_debug = (hawk_debug == 'True')
hawk_connection = HawkConnection(url=hawk_server,
user=hawk_user,
password=hawk_password,
ssl_verify=hawk_ssl_verify,
debug=hawk_debug,
active=hawk_active,
)
return hawk_connection
def add_metric(self, metrics, host=None, synthetic=False, key_tags=None):
""" create unique metric from key value pair """
if not key_tags:
key_tags = {}
if synthetic and not host:
host = self.config['synthetic_clusterwide']['host']['name']
elif not host:
host = self.host
hawk_metrics = []
config_rules = self.config.get('metadata_rules') or []
metric_tags = {}
for key, value in metrics.iteritems():
#check config rules - add tags that match this key
for rule in config_rules:
compiled_rule = re.compile(rule.get('regex'))
if compiled_rule.match(key):
metric_tags.update(rule.get('tags') or {})
#override configuration with runtime parameters
metric_tags.update(key_tags)
hawk_metric = UniqueMetric(host, key, value, tags=metric_tags)
hawk_metrics.append(hawk_metric)
self.unique_metrics += hawk_metrics
def send_metrics(self):
"""
Send list of Unique Metrics to Hawk
clear self.unique_metrics
"""
if self.verbose:
self.print_unique_metrics_key_value()
if self.debug:
self.print_unique_metrics()
self.hawkclient.push_metrics(self.unique_metrics)
self.unique_metrics = []
|
Gimpneek/exclusive-raid-gym-tracker
|
refs/heads/master
|
app/tests/api/personalised/profile_resource/profile_common.py
|
1
|
from app.tests.api.api_common import APICommonCase
class ProfileCommonCase(APICommonCase):
"""
Set up the tests for the Profile endpoint
"""
def setUp(self):
"""
Set up the tests
"""
super(ProfileCommonCase, self).setUp()
self.url = '/api/v1/me/'
|
volodymyrss/3ML
|
refs/heads/master
|
threeML/io/plotting/data_residual_plot.py
|
1
|
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
from threeML.io.plotting.step_plot import step_plot
from threeML.config.config import threeML_config
from threeML.exceptions.custom_exceptions import custom_warnings
class ResidualPlot(object):
def __init__(self,**kwargs):
"""
:param kwargs:
"""
self._ratio_residuals = False
if 'ratio_residuals' in kwargs:
self._ratio_residuals = bool(kwargs.pop('ratio_residuals'))
self._fig, (self._ax, self._ax1) = plt.subplots(2, 1, sharex=True,
gridspec_kw={'height_ratios': [2, 1]}, **kwargs)
@property
def ratio_residuals(self):
return self._ratio_residuals
def add_model_step(self, xmin, xmax, xwidth, y, label, color='r'):
"""
:param xmin:
:param xmax:
:param xwidth:
:param y:
:param residuals:
:param label:
:param color:
:return:
"""
step_plot(np.asarray(zip(xmin, xmax)),
y / xwidth,
self._ax, alpha=.8,
label=label, color=color)
def add_model(self,x,y,label,color):
"""
:param x:
:param y:
:param label:
:param color:
:return:
"""
self._ax.plot(x,y,label=label,color=color,alpha=.8)
def add_data(self, x, y, residuals, label, xerr=None, yerr=None, residual_yerr=None, color='r'):
"""
:param x:
:param y:
:param residuals:
:param label:
:param xerr:
:param yerr:
:param color:
:return:
"""
self._ax.errorbar(x,
y,
yerr=yerr,
xerr=xerr,
fmt=threeML_config['residual plot']['error marker'],
markersize=threeML_config['residual plot']['error marker size'],
linestyle='',
elinewidth=threeML_config['residual plot']['error line width'],
alpha=.9,
capsize=0,
label=label,
color=color)
#ax.plot(x, expected_model_magnitudes, label='%s Model' % data._name, color=model_color)
#residuals = (expected_model_magnitudes - mag_errors) / mag_errors
if not self.ratio_residuals:
residual_yerr=np.ones_like(residuals)
self._ax1.axhline(0, linestyle='--', color='k')
self._ax1.errorbar(x,
residuals,
xerr=xerr,
yerr=residual_yerr,
capsize=0,
fmt=threeML_config['residual plot']['error marker'],
elinewidth=threeML_config['residual plot']['error line width'],
markersize=threeML_config['residual plot']['error marker size'],
color=color)
def finalize(self, xlabel='x', ylabel='y',xscale='log',yscale='log', show_legend=True,invert_y=False):
"""
:param xlabel:
:param ylabel:
:param xscale:
:param yscale:
:param show_legend:
:return:
"""
if show_legend:
self._ax.legend(fontsize='x-small', loc=0)
self._ax.set_ylabel(ylabel)
self._ax.set_xscale(xscale)
if yscale == 'log':
self._ax.set_yscale(yscale, nonposy='clip')
else:
self._ax.set_yscale(yscale)
self._ax1.set_xscale(xscale)
locator = MaxNLocator(prune='upper', nbins=5)
self._ax1.yaxis.set_major_locator(locator)
self._ax1.set_xlabel(xlabel)
if self.ratio_residuals:
custom_warnings.warn("Residuals plotted as ratios: beware that they are not statistical quantites, and can not be used to asses fit quality")
self._ax1.set_ylabel("Residuals\n(fraction of model)")
else:
self._ax1.set_ylabel("Residuals\n($\sigma$)")
# This takes care of making space for all labels around the figure
self._fig.tight_layout()
# Now remove the space between the two subplots
# NOTE: this must be placed *after* tight_layout, otherwise it will be ineffective
self._fig.subplots_adjust(hspace=0)
if invert_y:
self._ax.set_ylim(self._ax.get_ylim()[::-1])
return self._fig
|
jfpla/odoo
|
refs/heads/8.0
|
addons/warning/__openerp__.py
|
261
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Warning Messages and Alerts',
'version': '1.0',
'category': 'Tools',
'description': """
Module to trigger warnings in OpenERP objects.
==============================================
Warning messages can be displayed for objects like sale order, purchase order,
picking and invoice. The message is triggered by the form's onchange event.
""",
'author': 'OpenERP SA',
'depends': ['base', 'sale_stock', 'purchase'],
'data': ['warning_view.xml'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
AlecAivazis/nautilus
|
refs/heads/master
|
nautilus/management/util/__init__.py
|
2
|
from .render_template import render_template
|
lucafavatella/intellij-community
|
refs/heads/cli-wip
|
python/lib/Lib/site-packages/django/contrib/auth/models.py
|
71
|
import datetime
import urllib
from django.contrib import auth
from django.contrib.auth.signals import user_logged_in
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.manager import EmptyManager
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_str
from django.utils.hashcompat import md5_constructor, sha_constructor
from django.utils.translation import ugettext_lazy as _
UNUSABLE_PASSWORD = '!' # This will never be a valid hash
def get_hexdigest(algorithm, salt, raw_password):
"""
Returns a string of the hexdigest of the given plaintext password and salt
using the given algorithm ('md5', 'sha1' or 'crypt').
"""
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'crypt':
try:
import crypt
except ImportError:
raise ValueError('"crypt" password algorithm not supported in this environment')
return crypt.crypt(raw_password, salt)
if algorithm == 'md5':
return md5_constructor(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return sha_constructor(salt + raw_password).hexdigest()
raise ValueError("Got unknown password algorithm type in password.")
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return hsh == get_hexdigest(algo, salt, raw_password)
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = datetime.datetime.now()
user.save()
user_logged_in.connect(update_last_login)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label, model)
)
class Permission(models.Model):
"""The permissions system provides a way to assign permissions to specific users and groups of users.
The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form and add an object.
- The "change" permission limits a user's ability to view the change list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she created herself" or "Mary may only change news stories that have a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model', 'codename')
def __unicode__(self):
return u"%s | %s | %s" % (
unicode(self.content_type.app_label),
unicode(self.content_type),
unicode(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class Group(models.Model):
"""Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups.
A user in a group automatically has all the permissions granted to that group. For example, if the group Site editors has the permission can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only e-mail messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission, verbose_name=_('permissions'), blank=True)
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
class UserManager(models.Manager):
def create_user(self, username, email, password=None):
"""
Creates and saves a User with the given username, e-mail and password.
"""
now = datetime.datetime.now()
# Normalize the address by lowercasing the domain part of the email
# address.
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = self.model(username=username, email=email, is_staff=False,
is_active=True, is_superuser=False, last_login=now,
date_joined=now)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
def make_random_password(self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"Generates a random password with the given length and given allowed_chars"
# Note that default value of allowed_chars does not have "I" or letters
# that look like it -- just to avoid confusion.
from random import choice
return ''.join([choice(allowed_chars) for i in range(length)])
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
anon = user.is_anonymous()
for backend in auth.get_backends():
if not anon or backend.supports_anonymous_user:
if hasattr(backend, "get_all_permissions"):
if obj is not None:
if backend.supports_object_permissions:
permissions.update(
backend.get_all_permissions(user, obj)
)
else:
permissions.update(backend.get_all_permissions(user))
return permissions
def _user_has_perm(user, perm, obj):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if (not active and not anon and backend.supports_inactive_user) or \
(not anon or backend.supports_anonymous_user):
if hasattr(backend, "has_perm"):
if obj is not None:
if (backend.supports_object_permissions and
backend.has_perm(user, perm, obj)):
return True
else:
if backend.has_perm(user, perm):
return True
return False
def _user_has_module_perms(user, app_label):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if (not active and not anon and backend.supports_inactive_user) or \
(not anon or backend.supports_anonymous_user):
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class User(models.Model):
"""
Users within the Django authentication system are represented by this model.
Username and password are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True, help_text=_("Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters"))
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
password = models.CharField(_('password'), max_length=128, help_text=_("Use '[algo]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = models.BooleanField(_('staff status'), default=False, help_text=_("Designates whether the user can log into this admin site."))
is_active = models.BooleanField(_('active'), default=True, help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = models.BooleanField(_('superuser status'), default=False, help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = models.DateTimeField(_('last login'), default=datetime.datetime.now)
date_joined = models.DateTimeField(_('date joined'), default=datetime.datetime.now)
groups = models.ManyToManyField(Group, verbose_name=_('groups'), blank=True,
help_text=_("In addition to the permissions manually assigned, this user will also get all permissions granted to each group he/she is in."))
user_permissions = models.ManyToManyField(Permission, verbose_name=_('user permissions'), blank=True)
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.username
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.username))
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def get_full_name(self):
"Returns the first_name plus the last_name, with a space in between."
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def set_password(self, raw_password):
if raw_password is None:
self.set_unusable_password()
else:
import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(algo, salt, raw_password)
self.password = '%s$%s$%s' % (algo, salt, hsh)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
# Backwards-compatibility check. Older passwords won't include the
# algorithm or salt.
if '$' not in self.password:
is_correct = (self.password == get_hexdigest('md5', '', raw_password))
if is_correct:
# Convert the password to the new, more secure format.
self.set_password(raw_password)
self.save()
return is_correct
return check_password(raw_password, self.password)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def has_usable_password(self):
if self.password is None \
or self.password == UNUSABLE_PASSWORD:
return False
else:
return True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through
his/her groups. This method queries all available auth backends.
If an object is passed in, only permissions matching this object
are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
if obj is not None:
if backend.supports_object_permissions:
permissions.update(
backend.get_group_permissions(self, obj)
)
else:
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object
is provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions.
If object is passed, it checks if the user has all required perms
for this object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app
label. Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def get_and_delete_messages(self):
messages = []
for m in self.message_set.all():
messages.append(m.message)
m.delete()
return messages
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MO'
'DULE in your project settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable('app_label and model_name should'
' be separated by a dot in the AUTH_PROFILE_MODULE set'
'ting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable('Unable to load the profile '
'model, check AUTH_PROFILE_MODULE in your project sett'
'ings')
self._profile_cache = model._default_manager.using(self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
def _get_message_set(self):
import warnings
warnings.warn('The user messaging API is deprecated. Please update'
' your code to use the new messages framework.',
category=DeprecationWarning)
return self._message_set
message_set = property(_get_message_set)
class Message(models.Model):
"""
The message system is a lightweight way to queue messages for given
users. A message is associated with a User instance (so it is only
applicable for registered users). There's no concept of expiration or
timestamps. Messages are created by the Django admin after successful
actions. For example, "The poll Foo was created successfully." is a
message.
"""
user = models.ForeignKey(User, related_name='_message_set')
message = models.TextField(_('message'))
def __unicode__(self):
return self.message
class AnonymousUser(object):
id = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager()
_user_permissions = EmptyManager()
def __init__(self):
pass
def __unicode__(self):
return 'AnonymousUser'
def __str__(self):
return unicode(self).encode('utf-8')
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def get_and_delete_messages(self):
return []
def is_anonymous(self):
return True
def is_authenticated(self):
return False
|
Xion/taipan
|
refs/heads/master
|
tests/test_api/test_properties.py
|
1
|
"""
Tests for .api.properties module.
"""
from taipan.testing import TestCase
import taipan.api.properties as __unit__
class _Property(TestCase):
VALUE = 42
class ObjectProperty(_Property):
def test_none(self):
with self.assertRaises(TypeError):
__unit__.objectproperty(None)
def test_some_object(self):
with self.assertRaises(TypeError):
__unit__.objectproperty(object())
def test_class(self):
with self.assertRaises(TypeError):
@__unit__.objectproperty
class Foo(object):
pass
def test_method__incorrectly_with_args(self):
with self.assertRaises(TypeError):
class Foo(object):
@__unit__.objectproperty
def foo(self):
pass
def test_method__locals__just_get(self):
class Foo(object):
@__unit__.objectproperty
def foo():
def get(self):
return ObjectProperty.VALUE
self.assertEquals(ObjectProperty.VALUE, Foo().foo)
def test_method__locals__get_and_set(self):
class Foo(object):
@__unit__.objectproperty
def foo():
def get(self):
return self._foo
def set(self, value):
self._foo = value
obj = Foo()
obj.foo = ObjectProperty.VALUE
self.assertEquals(ObjectProperty.VALUE, obj.foo)
def test_method__locals__get_and_del(self):
class Foo(object):
@__unit__.objectproperty
def foo():
def get(self):
if getattr(self, '_deleted', False):
raise AttributeError('foo')
return ObjectProperty.VALUE
def del_(self):
if getattr(self, '_deleted', False):
raise AttributeError('foo')
self._deleted = True
obj = Foo()
self.assertEquals(ObjectProperty.VALUE, obj.foo)
del obj.foo
with self.assertRaises(AttributeError):
obj.foo
with self.assertRaises(AttributeError):
del obj.foo
def test_method__locals__all(self):
class Foo(object):
@__unit__.objectproperty
def foo():
def get(self):
return self._foo
def set(self, value):
self._foo = value
def del_(self):
del self._foo
obj = Foo()
obj.foo = ObjectProperty.VALUE
self.assertEquals(ObjectProperty.VALUE, obj.foo)
del obj.foo
with self.assertRaises(AttributeError):
obj.foo
with self.assertRaises(AttributeError):
del obj.foo
def test_method__retval__same_as_locals(self):
class Foo(object):
@__unit__.objectproperty
def foo():
def get(self):
return self._foo
def set(self, value):
self._foo = value
return locals()
obj = Foo()
obj.foo = ObjectProperty.VALUE
self.assertEquals(ObjectProperty.VALUE, obj.foo)
def test_method__retval__different_than_locals(self):
class Foo(object):
def __init__(self):
self._foo = 0
@__unit__.objectproperty
def foo():
def get(self):
return self._foo
def set(self, value):
self._foo = value
return {'get': get} # should take precedence over ``locals()``
obj = Foo()
self.assertZero(obj.foo)
with self.assertRaises(AttributeError):
obj.foo = ObjectProperty.VALUE
class ClassProperty(_Property):
def test_none(self):
with self.assertRaises(TypeError):
__unit__.classproperty(None)
def test_some_object(self):
with self.assertRaises(TypeError):
__unit__.classproperty(object())
def test_method__no_args(self):
class Foo(object):
@__unit__.classproperty
def FOO():
return ClassProperty.VALUE
with self.assertRaises(TypeError):
Foo.FOO
def test_method__just_cls_arg(self):
class Foo(object):
@__unit__.classproperty
def FOO(cls):
return ClassProperty.VALUE
self.assertEquals(ClassProperty.VALUE, Foo.FOO)
def test_method__too_many_args(self):
class Foo(object):
@__unit__.classproperty
def FOO(cls, a):
return ClassProperty.VALUE
with self.assertRaises(TypeError):
Foo.FOO
|
openpathsampling/openpathsampling
|
refs/heads/master
|
openpathsampling/tests/test_storage.py
|
3
|
"""
@author David W.H. Swenson
@author Jan-Hendrik Prinz
"""
from __future__ import absolute_import
from builtins import zip
from builtins import range
from builtins import object
import os
import pytest
from nose.tools import (assert_equal)
import openpathsampling as paths
import openpathsampling.engines.openmm as peng
import openpathsampling.engines.toy as toys
from openpathsampling.netcdfplus import ObjectJSON
from openpathsampling.storage import Storage
from .test_helpers import (data_filename, md, compare_snapshot)
import numpy as np
from nose.plugins.skip import SkipTest
class TestStorage(object):
def setup(self):
if not md:
raise SkipTest("mdtraj not installed")
self.mdtraj = md.load(data_filename("ala_small_traj.pdb"))
_ = pytest.importorskip('simtk.unit')
self.traj = peng.trajectory_from_mdtraj(
self.mdtraj, simple_topology=True)
self.filename = data_filename("storage_test.nc")
self.filename_clone = data_filename("storage_test_clone.nc")
self.simplifier = ObjectJSON()
self.template_snapshot = self.traj[0]
self.solute_indices = list(range(22))
self.toy_topology = toys.Topology(
n_spatial=2,
masses=[1.0, 1.0],
pes=None
)
self.engine = toys.Engine({}, self.toy_topology)
self.toy_template = toys.Snapshot(
coordinates=np.array([[-0.5, -0.5]]),
velocities=np.array([[0.0,0.0]]),
engine=self.engine
)
def teardown(self):
if os.path.isfile(self.filename):
os.remove(self.filename)
if os.path.isfile(self.filename_clone):
os.remove(self.filename_clone)
def test_create_storage(self):
store = Storage(filename=self.filename, mode='w')
assert(os.path.isfile(data_filename("storage_test.nc")))
store.close()
def test_stored_topology(self):
raise SkipTest
store = Storage(
filename=self.filename,
mode='w')
assert(os.path.isfile(self.filename))
store.close()
store = Storage(filename=self.filename, mode='a')
loaded_topology = store.template.topology
# check if path topologies have the same JSON string
# this also tests the simplifier for topologies
assert_equal(
self.simplifier.to_json(self.template_snapshot.topology),
self.simplifier.to_json(loaded_topology)
)
store.close()
def test_safemode(self):
fname = data_filename("cv_storage_safemode_test.nc")
if os.path.isfile(fname):
os.remove(fname)
cv = paths.CoordinateFunctionCV('cv', lambda x: x)
traj = paths.Trajectory(list(self.traj))
template = traj[0]
storage_w = paths.Storage(fname, "w")
storage_w.snapshots.save(template)
storage_w.cvs.save(cv)
storage_w.close()
storage_r = paths.Storage(fname, 'r')
# default safemode = False
assert(storage_r.simplifier.safemode is False)
cv_r = storage_r.cvs[0]
assert(cv_r == cv)
assert(cv.cv_callable is not None)
storage_r.close()
storage_r = paths.Storage(fname, 'r')
storage_r.simplifier.safemode = True
cv_r = storage_r.cvs[0]
assert(cv_r == cv)
assert(cv_r.cv_callable is None)
storage_r.close()
def test_store_snapshots(self):
fname = data_filename("cv_storage_test.nc")
if os.path.isfile(fname):
os.remove(fname)
traj = paths.Trajectory(list(self.traj))
template = traj[0]
for use_cache in (False, True):
# print '=========================================================='
# print 'UUID', use_uuid, 'CACHE', use_cache
# print '=========================================================='
storage_w = paths.Storage(fname, "w")
storage_w.snapshots.save(template)
# let's mess up the order in which we save and include
# reversed ones as well
assert(len(storage_w.snapshots) == 2)
assert(len(storage_w.trajectories) == 0)
assert(len(storage_w.stores['snapshot0']) == 2)
storage_w.snapshots.save(traj[8].reversed)
assert(len(storage_w.snapshots) == 4)
assert(len(storage_w.trajectories) == 0)
assert(len(storage_w.stores['snapshot0']) == 4)
# this will store traj[6:] under pos IDX #0
storage_w.trajectories.save(traj[6:])
assert(len(storage_w.snapshots) == 10)
assert(len(storage_w.trajectories) == 1)
assert(len(storage_w.stores['snapshot0']) == 10)
traj_rev = traj.reversed
# this will store traj_rev under pos IDX #1
storage_w.trajectories.mention(traj_rev)
assert(len(storage_w.snapshots) == 20)
assert(len(storage_w.trajectories) == 2)
assert(len(storage_w.stores['snapshot0']) == 10)
# this will not do anything since traj is already saved
storage_w.trajectories.save(traj_rev)
assert(len(storage_w.snapshots) == 20)
assert(len(storage_w.trajectories) == 2)
assert(len(storage_w.stores['snapshot0']) == 10)
# this will store traj under pos IDX #2
storage_w.trajectories.save(traj)
assert(len(storage_w.snapshots) == 20)
assert(len(storage_w.trajectories) == 3)
assert(len(storage_w.stores['snapshot0']) == 20)
# this will not store since traj is already stored
storage_w.trajectories.save(traj)
assert(len(storage_w.snapshots) == 20)
assert(len(storage_w.trajectories) == 3)
assert(len(storage_w.stores['snapshot0']) == 20)
# we saved in this order [0f, 8r, 6f, 7f, 9f, 5r, 4r, 3r, 2r, 1r ]
# these are indices [ 0, 17, 12, 14, 18, 3, 5, 7, 9, 11 ]
storage_w.close()
if use_cache:
storage_r = paths.AnalysisStorage(fname)
else:
storage_r = paths.Storage(fname, 'r')
storage_r.snapshots.set_caching(False)
storage_r.stores['snapshot0'].set_caching(False)
# check if the loaded trajectory is reproduced
for s1, s2 in zip(traj, storage_r.trajectories[2]):
compare_snapshot(s1, s2, True)
# this is the expected order in which it is saved
eff_traj = [
traj[0],
traj[8].reversed,
traj[6],
traj[7],
traj[9],
traj[5].reversed,
traj[4].reversed,
traj[3].reversed,
traj[2].reversed,
traj[1].reversed,
]
# load from hidden and see, if the hidden store looks as expected
# we open every second snapshot from the hidden store because the
# ones in between correspond to the reversed ones
hidden_snapshots = storage_r.stores['snapshot0'][:]
for idx in range(10):
s1 = eff_traj[idx]
s1r = s1.reversed
s2 = hidden_snapshots[2 * idx]
s2r = hidden_snapshots[2 * idx + 1]
compare_snapshot(s1, s2, True)
compare_snapshot(s1r, s2r, True)
storage_r.close()
def test_load_save(self):
for use_uuid in [True]:
store = Storage(filename=self.filename, mode='w')
assert(os.path.isfile(self.filename))
store.save(self.template_snapshot)
store.close()
store = Storage(filename=self.filename, mode='a')
loaded_template = store.snapshots[0]
loaded_r = store.snapshots[1]
compare_snapshot(loaded_template, self.template_snapshot, True)
compare_snapshot(
loaded_template.reversed,
self.template_snapshot.reversed, True)
compare_snapshot(loaded_r, self.template_snapshot.reversed)
store.close()
def test_proxy(self):
for use_uuid in [True]:
store = Storage(filename=self.filename, mode='w')
assert(os.path.isfile(self.filename))
tm = self.template_snapshot
store.save(tm)
px = store.snapshots.proxy(store.snapshots.index.list[0])
# make sure that the proxy and
assert(hash(px) == hash(tm))
assert(px == tm)
store.snapshots.cache.clear()
s0 = store.snapshots[0]
assert(hash(px) == hash(s0))
assert(px == s0)
compare_snapshot(px, tm)
compare_snapshot(s0, tm)
px = store.snapshots.proxy(store.snapshots.index.list[0])
# make sure that after reloading it still works
assert(hash(px) == hash(tm))
assert(px == tm)
store.close()
store = Storage(filename=self.filename, mode='a')
s1 = store.snapshots[0]
store.close()
# when loading only for uuid based storages you get the same id
assert((hash(px) == hash(s1)) is use_uuid)
assert((px == s1) is use_uuid)
def test_mention_only(self):
storage_w = paths.Storage(self.filename, "w")
template = self.template_snapshot
storage_w.snapshots.add_type(template)
test_snap = self.traj[2]
# only touch a new snapshot
storage_w.snapshots.only_mention = True
storage_w.snapshots.save(test_snap)
# check that the snapshot is there
assert(len(storage_w.snapshots) == 4)
# in the memory uuid index
assert(test_snap.__uuid__ in storage_w.snapshots.index)
# and stored
assert(test_snap.__uuid__ == storage_w.snapshots.vars['uuid'][1])
# but no real snapshot has been stored
# print len(storage_w.objects['snapshot0'])
assert(len(storage_w.objects['snapshot0']) == 2)
# switch on normal saving
storage_w.snapshots.only_mention = False
test_snap = self.traj[4]
storage_w.snapshots.mention(test_snap)
# check that the snapshot is there
assert(len(storage_w.snapshots) == 6)
# in the memory uuid index
assert(test_snap.__uuid__ in storage_w.snapshots.index)
# and stored
assert(test_snap.__uuid__ == storage_w.snapshots.vars['uuid'][2])
# but no real snapshot has been stored
assert(len(storage_w.objects['snapshot0']) == 2)
# try to now add it
storage_w.snapshots.save(test_snap)
# check that the snapshot is not stored again (only 3 snapshots)
assert(len(storage_w.snapshots) == 6)
assert(len(storage_w.objects['snapshot0']) == 4)
# print storage_w.objects['snapshot0'][1].coordinates
# print template.coordinates
# print storage_w.objects['snapshot0'][0].coordinates
# print test_snap.coordinates
# print storage_w.objects['snapshot0'].vars['statics'][0].coordinates
# print storage_w.objects['snapshot0'].vars['statics'][1].coordinates
# print storage_w.objects['snapshot0'].index
compare_snapshot(storage_w.objects['snapshot0'][4], test_snap)
storage_w.close()
def test_load_save_uuid(self):
store = Storage(filename=self.filename, mode='w')
assert(os.path.isfile(self.filename))
store.save(self.template_snapshot)
store.close()
store = Storage(filename=self.filename, mode='a')
loaded_template = store.snapshots[self.template_snapshot.__uuid__]
loaded_r = store.snapshots[self.template_snapshot.reversed.__uuid__]
compare_snapshot(loaded_template, self.template_snapshot, True)
compare_snapshot(
loaded_template.reversed,
self.template_snapshot.reversed, True)
compare_snapshot(loaded_r, self.template_snapshot.reversed)
store.close()
def test_load_save_toy(self):
store = Storage(filename=self.filename, mode='w')
assert(os.path.isfile(self.filename))
store.save(self.toy_template)
store.close()
store = Storage(filename=self.filename, mode='a')
loaded_template = store.snapshots[0]
loaded_r = store.snapshots[1]
compare_snapshot(loaded_template, self.toy_template, True)
compare_snapshot(
loaded_template.reversed,
self.toy_template.reversed, True)
compare_snapshot(loaded_r, self.toy_template.reversed)
store.close()
def test_reverse_bug(self):
store = Storage(filename=self.filename,
mode='w')
assert(os.path.isfile(self.filename))
store.snapshots.save(self.template_snapshot)
rev = self.template_snapshot.reversed
# save the reversed one
store.snapshots.save(rev)
# check that the reversed one has index 1 and not 3!
assert(store.idx(rev) == 1)
# and we have exactly one snapshot
assert(len(store.snapshots) == 2)
assert(len(store.dimensions['snapshots']) == 1)
store.close()
def test_version(self):
store = Storage(
filename=self.filename, mode='w')
assert(os.path.isfile(self.filename))
assert(store.storage_version == paths.version.version)
|
jaysuk/Printrun
|
refs/heads/master
|
printrun/gui/bufferedcanvas.py
|
4
|
"""
BufferedCanvas -- flicker-free canvas widget
Copyright (C) 2005, 2006 Daniel Keep, 2011 Duane Johnson
To use this widget, just override or replace the draw method.
This will be called whenever the widget size changes, or when
the update method is explicitly called.
Please submit any improvements/bugfixes/ideas to the following
url:
http://wiki.wxpython.org/index.cgi/BufferedCanvas
2006-04-29: Added bugfix for a crash on Mac provided by Marc Jans.
"""
# Hint: try removing '.sp4msux0rz'
__author__ = 'Daniel Keep <daniel.keep.sp4msux0rz@gmail.com>'
__license__ = """
This file is part of the Printrun suite.
Printrun is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Printrun is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Printrun. If not, see <http://www.gnu.org/licenses/>.
"""
__all__ = ['BufferedCanvas']
import wx
class BufferedCanvas(wx.Panel):
"""
Implements a flicker-free canvas widget.
Standard usage is to subclass this class, and override the
draw method. The draw method is passed a device context, which
should be used to do your drawing.
If you want to force a redraw (for whatever reason), you should
call the update method. This is because the draw method is never
called as a result of an EVT_PAINT event.
"""
# These are our two buffers. Just be aware that when the buffers
# are flipped, the REFERENCES are swapped. So I wouldn't want to
# try holding onto explicit references to one or the other ;)
buffer = None
backbuffer = None
def __init__(self,
parent,
ID=-1,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
style = wx.NO_FULL_REPAINT_ON_RESIZE | wx.WANTS_CHARS):
wx.Panel.__init__(self, parent, ID, pos, size, style)
# Bind events
self.Bind(wx.EVT_PAINT, self.onPaint)
# Disable background erasing (flicker-licious)
def disable_event(*pargs, **kwargs):
pass # the sauce, please
self.Bind(wx.EVT_ERASE_BACKGROUND, disable_event)
#
# General methods
#
def draw(self, dc, w, h):
"""
Stub: called when the canvas needs to be re-drawn.
"""
pass
def update(self):
"""
Causes the canvas to be updated.
"""
self.Refresh()
def getWidthHeight(self):
width, height = self.GetClientSize()
if width == 0:
width = 1
if height == 0:
height = 1
return (width, height)
#
# Event handlers
#
def onPaint(self, event):
# Blit the front buffer to the screen
w, h = self.GetClientSize()
if not w or not h:
return
else:
dc = wx.BufferedPaintDC(self)
self.draw(dc, w, h)
|
ProgVal/Limnoria-test
|
refs/heads/debug-pypy-sqlite
|
plugins/Alias/config.py
|
6
|
###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Alias')
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified themself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Alias', True)
Alias = conf.registerPlugin('Alias')
conf.registerGroup(Alias, 'aliases')
conf.registerGroup(Alias, 'escapedaliases')
conf.registerGlobalValue(Alias, 'validName',
registry.String(r'^[^\x00-\x20]+$', _("""Regex which alias names must match in order to be valid""")))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
OmgOhnoes/Flexget
|
refs/heads/develop
|
flexget/plugins/sites/rlsbb.py
|
5
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import re
from flexget import plugin
from flexget.event import event
from flexget.plugins.internal.urlrewriting import UrlRewritingError
from flexget.utils.soup import get_soup
from flexget.utils.search import normalize_unicode
from requests.exceptions import RequestException
log = logging.getLogger('rlsbb')
class UrlRewriteRlsbb(object):
"""
rlsbb.ru urlrewriter
Version 0.1
Configuration
rlsbb:
filehosters_re:
- domain\.com
- domain2\.org
link_text_re:
- UPLOADGiG
- NiTROFLARE
- RAPiDGATOR
parse_comments: no
filehosters_re: Only add links that match any of the regular expressions
listed under filehosters_re.
link_text_re: search for <a> tags where the text (not the url) matches
one of the given regular expressions. The href property of these <a> tags
will be used as the url (or urls).
parse_comments: whether the plugin should also parse the comments or only
the main post. Note that it is highly recommended to use filehosters_re
if you enable parse_comments. Otherwise, the plugin may return too
many and even some potentially dangerous links.
If more than one valid link is found, the url of the entry is rewritten to
the first link found. The complete list of valid links is placed in the
'urls' field of the entry.
Therefore, it is recommended, that you configure your output to use the
'urls' field instead of the 'url' field.
For example, to use jdownloader 2 as output, you would use the exec plugin:
exec:
- echo "text={{urls}}" >> "/path/to/jd2/folderwatch/{{title}}.crawljob"
"""
DEFAULT_DOWNLOAD_TEXT = ['UPLOADGiG', 'NiTROFLARE', 'RAPiDGATOR']
schema = {
'type': 'object',
'properties': {
'filehosters_re': {'type': 'array', 'items': {'format': 'regexp'}, 'default': []},
'link_text_re': {'type': 'array', 'items': {'format': 'regexp'}, 'default': DEFAULT_DOWNLOAD_TEXT},
'parse_comments': {'type': 'boolean', 'default': False}
},
'additionalProperties': False
}
# Since the urlrewriter relies on a config, we need to create a default one
config = {
'filehosters_re': [],
'link_text_re': DEFAULT_DOWNLOAD_TEXT,
'parse_comments': False
}
# grab config
def on_task_start(self, task, config):
self.config = config
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
rewritable_regex = '^https?:\/\/(www.)?rlsbb\.(ru|com)\/.*'
return re.match(rewritable_regex, url) is not None
def _get_soup(self, task, url):
try:
page = task.requests.get(url)
except RequestException as e:
raise UrlRewritingError(str(e))
try:
return get_soup(page.text)
except Exception as e:
raise UrlRewritingError(str(e))
# if the file is split into several parts, rlsbb links to a separate page
# which we parse here
def _grab_multilinks(self, task, url):
soup = self._get_soup(task, url)
link_list = soup.find('ol')
link_divs = link_list.find_all('div')
links = []
for link in link_divs:
links.append(link.string())
return links
@plugin.internet(log)
# urlrewriter API
def url_rewrite(self, task, entry):
soup = self._get_soup(task, entry['url'])
# grab links from the main post:
link_elements = []
log.debug('Searching %s for a tags where the text matches one of: %s',
entry['url'], str(self.config.get('link_text_re')))
for regexp in self.config.get('link_text_re'):
link_elements.extend(soup.find_all('a', string=re.compile(regexp)))
log.debug('Original urls: %s', str(entry['urls']))
if 'urls' in entry:
urls = list(entry['urls'])
log.debug('Original urls: %s', str(entry['urls']))
else:
urls = []
log.debug('Found link elements: %s', str(link_elements))
for element in link_elements:
if re.search('nfo1.rlsbb.(ru|com)', element['href']):
# grab multipart links
urls.extend(self.grab_multilinks(task, element['href']))
else:
urls.append(element['href'])
# grab links from comments
regexps = self.config.get('filehosters_re', [])
if self.config.get('parse_comments'):
comments = soup.find_all('div', id=re.compile("commentbody"))
log.debug('Comment parsing enabled: found %d comments.', len(comments))
if comments and not regexps:
log.warn('You have enabled comment parsing but you did not define any filehoster_re filter. You may get a lot of unwanted and potentially dangerous links from the comments.')
for comment in comments:
links = comment.find_all('a')
for link in links:
urls.append(link['href'])
# filter urls:
filtered_urls = []
for i, url in enumerate(urls):
urls[i] = normalize_unicode(url)
for regexp in regexps:
if re.search(regexp, urls[i]):
filtered_urls.append(urls[i])
log.debug('Url: "%s" matched filehoster filter: %s', urls[i], regexp)
break
else:
if regexps:
log.debug(
'Url: "%s" was discarded because it does not match any of the given filehoster filters: %s', urls[i], str(regexps))
if regexps:
log.debug('Using filehosters_re filters: %s', str(regexps))
urls = filtered_urls
else:
log.debug('No filehoster filters configured, using all found links.')
num_links = len(urls)
log.verbose('Found %d links at %s.', num_links, entry['url'])
if num_links:
entry['urls'] = urls
entry['url'] = urls[0]
else:
raise UrlRewritingError('No useable links found at %s' % entry['url'])
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteRlsbb, 'rlsbb', interfaces=['urlrewriter', 'task'], api_ver=2)
|
glorion13/TextEngine
|
refs/heads/master
|
src/core/components/customisable/effects.py
|
5
|
"""
Copyright (c) 2013 ICRL
See the file license.txt for copying permission.
"""
class EffectFunctions:
"""
This class contains all the customisable functions which are used to represent the output of an action e.g. go to another scene or give information to the player.
The `effectDict` container is a dictionary containing key-value pairs of a function (e.g. :func:`goToScene`) and its name in a human-readable form, for use in an editor or elsewhere (e.g. 'Go to scene')
"""
def cmdOutputText(self, text):
self.narrative += [text]
def goToScene(self, scene):
self.currentScene = scene
self.cmdOutputText(scene.description)
def outputVisibleSceneActions(self, scene):
possibleActions = [ action.name for action in scene.actions if action.visible ]
if not possibleActions == []:
self.cmdOutputText(str(possibleActions))
def addGlobalResource(self, text, primitive):
self.addResource(text, primitive)
def editGlobalResource(self, gresource, primitive):
gresource.value = primitive
def deleteGlobalResource(self, gresource):
self.removeResource(gresource)
def addLocalResource(self, text, primitive):
self.currentScene.addResource(text, primitive)
def editLocalResource(self, lresource, primitive):
lresource.value = primitive
def deleteLocalResource(self, lresource):
self.currentScene.removeResource(lresource)
#def enableGlobalAction(self, gaction):
# return 0
#def disableGlobalAction(self, gaction):
# return 0
#def makeGlobalActionVisible(self, gaction):
# return 0
#def makeGlobalActionInvisible(self, gaction):
# return 0
#def enableLocalAction(self, laction):
# return 0
#def disableLocalAction(self, laction):
# return 0
#def makeLocalActionVisible(self, laction):
# return 0
#def makeLocalActionInvisible(self, laction):
# return 0
effectDict = {
'Tell player' : cmdOutputText,
'Add global resource' : addGlobalResource,
'Edit global resource' : editGlobalResource,
'Delete global resource' : deleteGlobalResource,
'Add local resource' : addLocalResource,
'Edit local resource' : editLocalResource,
'Delete local resource' : deleteLocalResource,
#'Enable global action' : enableGlobalAction,
#'Disable global action' : disableGlobalAction,
#'Make global action visible' : makeGlobalActionVisible,
#'Make global action invisible' : makeGlobalActionInvisible,
#'Enable local action' : enableLocalAction,
#'Disable local action' : disableLocalAction,
#'Make local action visible' : makeLocalActionVisible,
#'Make local action invisible' : makeLocalActionInvisible,
'Go to scene' : goToScene
}
|
zieckey/sdhash
|
refs/heads/master
|
external/tools/build/v2/util/utility.py
|
49
|
# (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
""" Utility functions to add/remove/get grists.
Grists are string enclosed in angle brackets (<>) that are used as prefixes. See Jam for more information.
"""
import re
import os
import bjam
from b2.exceptions import *
__re_grist_and_value = re.compile (r'(<[^>]*>)(.*)')
__re_grist_content = re.compile ('^<(.*)>$')
__re_backslash = re.compile (r'\\')
def to_seq (value):
""" If value is a sequence, returns it.
If it is a string, returns a sequence with value as its sole element.
"""
if not value:
return []
if isinstance (value, str):
return [value]
else:
return value
def replace_references_by_objects (manager, refs):
objs = []
for r in refs:
objs.append (manager.get_object (r))
return objs
def add_grist (features):
""" Transform a string by bracketing it with "<>". If already bracketed, does nothing.
features: one string or a sequence of strings
return: the gristed string, if features is a string, or a sequence of gristed strings, if features is a sequence
"""
def grist_one (feature):
if feature [0] != '<' and feature [len (feature) - 1] != '>':
return '<' + feature + '>'
else:
return feature
if isinstance (features, str):
return grist_one (features)
else:
return [ grist_one (feature) for feature in features ]
def replace_grist (features, new_grist):
""" Replaces the grist of a string by a new one.
Returns the string with the new grist.
"""
def replace_grist_one (name, new_grist):
split = __re_grist_and_value.match (name)
if not split:
return new_grist + name
else:
return new_grist + split.group (2)
if isinstance (features, str):
return replace_grist_one (features, new_grist)
else:
return [ replace_grist_one (feature, new_grist) for feature in features ]
def get_value (property):
""" Gets the value of a property, that is, the part following the grist, if any.
"""
return replace_grist (property, '')
def get_grist (value):
""" Returns the grist of a string.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
def get_grist_one (name):
split = __re_grist_and_value.match (name)
if not split:
return ''
else:
return split.group (1)
if isinstance (value, str):
return get_grist_one (value)
else:
return [ get_grist_one (v) for v in value ]
def ungrist (value):
""" Returns the value without grist.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
def ungrist_one (value):
stripped = __re_grist_content.match (value)
if not stripped:
raise BaseException ("in ungrist: '%s' is not of the form <.*>" % value)
return stripped.group (1)
if isinstance (value, str):
return ungrist_one (value)
else:
return [ ungrist_one (v) for v in value ]
def replace_suffix (name, new_suffix):
""" Replaces the suffix of name by new_suffix.
If no suffix exists, the new one is added.
"""
split = os.path.splitext (name)
return split [0] + new_suffix
def forward_slashes (s):
""" Converts all backslashes to forward slashes.
"""
return __re_backslash.sub ('/', s)
def split_action_id (id):
""" Splits an id in the toolset and specific rule parts. E.g.
'gcc.compile.c++' returns ('gcc', 'compile.c++')
"""
split = id.split ('.', 1)
toolset = split [0]
name = ''
if len (split) > 1:
name = split [1]
return (toolset, name)
def os_name ():
result = bjam.variable("OS")
assert(len(result) == 1)
return result[0]
def platform ():
return bjam.variable("OSPLAT")
def os_version ():
return bjam.variable("OSVER")
def on_windows ():
""" Returns true if running on windows, whether in cygwin or not.
"""
if bjam.variable("NT"):
return True
elif bjam.variable("UNIX"):
uname = bjam.variable("JAMUNAME")
if uname and uname[0].startswith("CYGWIN"):
return True
return False
|
meh/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/webdriver/webdriver/wait.py
|
263
|
# Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Waiting functionality."""
import time
from exceptions import NoSuchElementException
from exceptions import TimeoutException
POLL_FREQUENCY = 0.5 # How long to sleep inbetween calls to the method
IGNORED_EXCEPTIONS = [NoSuchElementException] # list of exceptions ignored during calls to the method
class WebDriverWait(object):
def __init__(self, driver, timeout, poll_frequency=POLL_FREQUENCY, ignored_exceptions=None):
"""Constructor, takes a WebDriver instance and timeout in seconds.
:Args:
- driver - Instance of WebDriver (Ie, Firefox, Chrome or Remote)
- timeout - Number of seconds before timing out
- poll_frequency - sleep interval between calls
By default, it is 0.5 second.
- ignored_exceptions - iterable structure of exception classes ignored during calls.
By default, it contains NoSuchElementException only.
Example:
from selenium.webdriver.support.ui import WebDriverWait \n
element = WebDriverWait(driver, 10).until(lambda x: x.find_element_by_id("someId")) \n
is_disappeared = WebDriverWait(driver, 30, 1, (ElementNotVisibleException)).\ \n
until_not(lambda x: x.find_element_by_id("someId").is_displayed())
"""
self._driver = driver
self._timeout = timeout
self._poll = poll_frequency
# avoid the divide by zero
if self._poll == 0:
self._poll = POLL_FREQUENCY
exceptions = IGNORED_EXCEPTIONS
if ignored_exceptions is not None:
try:
exceptions.extend(iter(ignored_exceptions))
except TypeError: # ignored_exceptions is not iterable
exceptions.append(ignored_exceptions)
self._ignored_exceptions = tuple(exceptions)
def until(self, method, message=''):
"""Calls the method provided with the driver as an argument until the \
return value is not False."""
end_time = time.time() + self._timeout
while(True):
try:
value = method(self._driver)
if value:
return value
except self._ignored_exceptions:
pass
time.sleep(self._poll)
if(time.time() > end_time):
break
raise TimeoutException(message)
def until_not(self, method, message=''):
"""Calls the method provided with the driver as an argument until the \
return value is False."""
end_time = time.time() + self._timeout
while(True):
try:
value = method(self._driver)
if not value:
return value
except self._ignored_exceptions:
return True
time.sleep(self._poll)
if(time.time() > end_time):
break
raise TimeoutException(message)
|
dancingdan/tensorflow
|
refs/heads/master
|
tensorflow/python/autograph/pyct/static_analysis/live_values_test.py
|
15
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for live_values module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import live_values
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.autograph.pyct.static_analysis import type_info
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
class LiveValuesResolverTest(test.TestCase):
def _parse_and_analyze(self,
test_fn,
namespace,
literals=None,
arg_types=None):
literals = literals or {}
node, source = parser.parse_entity(test_fn)
entity_info = transformer.EntityInfo(
source_code=source,
source_file=None,
namespace=namespace,
arg_values=None,
arg_types=arg_types,
owner_type=None)
node = qual_names.resolve(node)
graphs = cfg.build(node)
node = activity.resolve(node, entity_info)
node = reaching_definitions.resolve(node, entity_info, graphs,
reaching_definitions.Definition)
node = live_values.resolve(node, entity_info, literals)
node = type_info.resolve(node, entity_info)
node = live_values.resolve(node, entity_info, literals)
return node
def test_literals(self):
a = None
def test_fn():
return a
node = self._parse_and_analyze(test_fn, {}, literals={'a': 'bar'})
retval_node = node.body[0].body[0].value
self.assertEquals('bar', anno.getanno(retval_node, 'live_val'))
def test_primitive_values(self):
a = None
def test_fn():
return a
node = self._parse_and_analyze(test_fn, {'a': True})
retval_node = node.body[0].body[0].value
if six.PY2:
self.assertEqual(
anno.getanno(retval_node, 'fqn'), ('__builtin__', 'bool'))
else:
self.assertEqual(anno.getanno(retval_node, 'fqn'), ('builtins', 'bool'))
def test_namespace(self):
def foo():
return 'bar'
def test_fn():
return foo()
node = self._parse_and_analyze(test_fn, {'foo': foo})
func_node = node.body[0].body[0].value.func
self.assertEquals(foo, anno.getanno(func_node, 'live_val'))
self.assertEquals(('foo',), anno.getanno(func_node, 'fqn'))
def test_attribute_names(self):
def test_fn():
return constant_op.constant(0)
node = self._parse_and_analyze(test_fn, {'constant_op': constant_op})
func_node = node.body[0].body[0].value.func
self.assertEquals(constant_op.constant, anno.getanno(func_node, 'live_val'))
self.assertEquals((constant_op.__name__, 'constant'),
anno.getanno(func_node, 'fqn'))
def test_attributes_with_type_hints(self):
class TestClass(object):
def member(self):
pass
def test_fn(self):
return self.member()
node = self._parse_and_analyze(
TestClass.test_fn, {'constant_op': constant_op},
arg_types={'self': (TestClass.__name__, TestClass)})
func_node = node.body[0].body[0].value.func
self.assertEquals(TestClass.member, anno.getanno(func_node, 'live_val'))
self.assertEquals(TestClass, anno.getanno(func_node, 'parent_type'))
self.assertEquals(('TestClass', 'member'), anno.getanno(func_node, 'fqn'))
if __name__ == '__main__':
test.main()
|
deveninfotech/deven-frappe
|
refs/heads/develop
|
frappe/model/__init__.py
|
28
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# model __init__.py
from __future__ import unicode_literals
import frappe
import json
no_value_fields = ['Section Break', 'Column Break', 'HTML', 'Table', 'Button', 'Image', 'Fold']
default_fields = ['doctype','name','owner','creation','modified','modified_by','parent','parentfield','parenttype','idx','docstatus']
integer_docfield_properties = ["reqd", "search_index", "in_list_view", "permlevel", "hidden", "read_only", "ignore_user_permissions", "allow_on_submit", "report_hide", "in_filter", "no_copy", "print_hide"]
def insert(doclist):
if not isinstance(doclist, list):
doclist = [doclist]
for d in doclist:
if isinstance(d, dict):
d["__islocal"] = 1
else:
d.set("__islocal", 1)
wrapper = frappe.get_doc(doclist)
wrapper.save()
return wrapper
def rename(doctype, old, new, debug=False):
import frappe.model.rename_doc
frappe.model.rename_doc.rename_doc(doctype, old, new, debug)
def copytables(srctype, src, srcfield, tartype, tar, tarfield, srcfields, tarfields=[]):
if not tarfields:
tarfields = srcfields
l = []
data = src.get(srcfield)
for d in data:
newrow = tar.append(tarfield)
newrow.idx = d.idx
for i in range(len(srcfields)):
newrow.set(tarfields[i], d.get(srcfields[i]))
l.append(newrow)
return l
def db_exists(dt, dn):
import frappe
return frappe.db.exists(dt, dn)
def delete_fields(args_dict, delete=0):
"""
Delete a field.
* Deletes record from `tabDocField`
* If not single doctype: Drops column from table
* If single, deletes record from `tabSingles`
args_dict = { dt: [field names] }
"""
import frappe.utils
for dt in args_dict.keys():
fields = args_dict[dt]
if not fields: continue
frappe.db.sql("""\
DELETE FROM `tabDocField`
WHERE parent=%s AND fieldname IN (%s)
""" % ('%s', ", ".join(['"' + f + '"' for f in fields])), dt)
# Delete the data / column only if delete is specified
if not delete: continue
if frappe.db.get_value("DocType", dt, "issingle"):
frappe.db.sql("""\
DELETE FROM `tabSingles`
WHERE doctype=%s AND field IN (%s)
""" % ('%s', ", ".join(['"' + f + '"' for f in fields])), dt)
else:
existing_fields = frappe.db.sql("desc `tab%s`" % dt)
existing_fields = existing_fields and [e[0] for e in existing_fields] or []
query = "ALTER TABLE `tab%s` " % dt + \
", ".join(["DROP COLUMN `%s`" % f for f in fields if f in existing_fields])
frappe.db.commit()
frappe.db.sql(query)
def rename_field(doctype, old_fieldname, new_fieldname):
"""This functions assumes that doctype is already synced"""
meta = frappe.get_meta(doctype)
new_field = meta.get_field(new_fieldname)
if not new_field:
print "rename_field: " + (new_fieldname) + " not found in " + doctype
return
if new_field.fieldtype == "Table":
# change parentfield of table mentioned in options
frappe.db.sql("""update `tab%s` set parentfield=%s
where parentfield=%s""" % (new_field.options.split("\n")[0], "%s", "%s"),
(new_fieldname, old_fieldname))
elif new_field.fieldtype not in no_value_fields:
if meta.issingle:
frappe.db.sql("""update `tabSingles` set field=%s
where doctype=%s and field=%s""",
(new_fieldname, doctype, old_fieldname))
else:
# copy field value
frappe.db.sql("""update `tab%s` set `%s`=`%s`""" % \
(doctype, new_fieldname, old_fieldname))
# update in property setter
frappe.db.sql("""update `tabProperty Setter` set field_name = %s
where doc_type=%s and field_name=%s""", (new_fieldname, doctype, old_fieldname))
update_reports(doctype, old_fieldname, new_fieldname)
update_users_report_view_settings(doctype, old_fieldname, new_fieldname)
def update_reports(doctype, old_fieldname, new_fieldname):
def _get_new_sort_by(report_dict, report, key):
sort_by = report_dict.get(key) or ""
if sort_by:
sort_by = sort_by.split(".")
if len(sort_by) > 1:
if sort_by[0]==doctype and sort_by[1]==old_fieldname:
sort_by = doctype + "." + new_fieldname
report_dict["updated"] = True
elif report.ref_doctype == doctype and sort_by[0]==old_fieldname:
sort_by = doctype + "." + new_fieldname
report_dict["updated"] = True
if isinstance(sort_by, list):
sort_by = '.'.join(sort_by)
return sort_by
reports = frappe.db.sql("""select name, ref_doctype, json from tabReport
where report_type = 'Report Builder' and ifnull(is_standard, 'No') = 'No'
and json like %s and json like %s""",
('%%%s%%' % old_fieldname , '%%%s%%' % doctype), as_dict=True)
for r in reports:
report_dict = json.loads(r.json)
# update filters
new_filters = []
for f in report_dict.get("filters"):
if f[0] == doctype and f[1] == old_fieldname:
new_filters.append([doctype, new_fieldname, f[2], f[3]])
report_dict["updated"] = True
else:
new_filters.append(f)
# update columns
new_columns = []
for c in report_dict.get("columns"):
if c[0] == old_fieldname and c[1] == doctype:
new_columns.append([new_fieldname, doctype])
report_dict["updated"] = True
else:
new_columns.append(c)
# update sort by
new_sort_by = _get_new_sort_by(report_dict, r, "sort_by")
new_sort_by_next = _get_new_sort_by(report_dict, r, "sort_by_next")
if report_dict.get("updated"):
new_val = json.dumps({
"filters": new_filters,
"columns": new_columns,
"sort_by": new_sort_by,
"sort_order": report_dict.get("sort_order"),
"sort_by_next": new_sort_by_next,
"sort_order_next": report_dict.get("sort_order_next")
})
frappe.db.sql("""update `tabReport` set `json`=%s where name=%s""", (new_val, r.name))
def update_users_report_view_settings(doctype, ref_fieldname, new_fieldname):
user_report_cols = frappe.db.sql("""select defkey, defvalue from `tabDefaultValue` where
defkey like '_list_settings:%'""")
for key, value in user_report_cols:
new_columns = []
columns_modified = False
for field, field_doctype in json.loads(value):
if field == ref_fieldname and field_doctype == doctype:
new_columns.append([new_fieldname, field_doctype])
columns_modified=True
else:
new_columns.append([field, field_doctype])
if columns_modified:
frappe.db.sql("""update `tabDefaultValue` set defvalue=%s
where defkey=%s""" % ('%s', '%s'), (json.dumps(new_columns), key))
|
jymannob/CouchPotatoServer
|
refs/heads/develop
|
libs/bs4/element.py
|
438
|
import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child == self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, basestring):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif attrs and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token in enumerate(tokens):
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
new_context = []
new_context_ids = set([])
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
|
SanchayanMaity/gem5
|
refs/heads/CS570
|
src/arch/x86/isa/insts/simd128/floating_point/data_conversion/__init__.py
|
91
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["convert_floating_point_to_floating_point",
"convert_floating_point_to_xmm_integer",
"convert_floating_point_to_mmx_integer",
"convert_floating_point_to_gpr_integer"]
microcode = '''
# SSE instructions
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
|
amenonsen/ansible
|
refs/heads/devel
|
test/units/modules/network/fortimanager/__init__.py
|
12133432
| |
djtaylor/lense-client
|
refs/heads/dev
|
usr/share/lense/python/__init__.py
|
12133432
| |
peterhinch/micropython-lcd160cr-gui
|
refs/heads/master
|
font10.py
|
3
|
# Code generated by font-to-py.py.
# Font: FreeSans.ttf
version = '0.1'
def height():
return 17
def max_width():
return 17
def hmap():
return True
def reverse():
return False
def monospaced():
return False
_font =\
b'\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x06\x00\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\x80'\
b'\x00\xc0\x00\x00\x00\x00\x06\x00\x00\xf0\xf0\xf0\xa0\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x19'\
b'\x00\x19\x00\x13\x00\x7f\x80\x12\x00\x32\x00\x32\x00\xff\x80\x26'\
b'\x00\x24\x00\x64\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x10'\
b'\x00\x3c\x00\x56\x00\xd3\x00\xd3\x00\xd0\x00\xd0\x00\x3c\x00\x17'\
b'\x00\x13\x00\xd3\x00\xd6\x00\x7c\x00\x10\x00\x00\x00\x00\x00\x00'\
b'\x00\x0f\x00\x00\x00\x78\x20\xcc\x40\xcc\x80\xcc\x80\xc9\x00\x31'\
b'\x00\x02\x78\x04\xcc\x04\xcc\x08\xcc\x08\xcc\x10\x78\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x1e\x00\x33\x00\x33\x00\x33'\
b'\x00\x1e\x00\x18\x00\x74\xc0\xe6\xc0\xc3\x80\xc1\x80\xe3\x80\x3c'\
b'\x40\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\xc0\xc0\xc0\x80'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x10\x20'\
b'\x20\x60\x40\xc0\xc0\xc0\xc0\xc0\xc0\x40\x60\x20\x30\x10\x00\x06'\
b'\x00\x80\xc0\x40\x60\x20\x30\x30\x30\x30\x30\x30\x20\x60\x40\xc0'\
b'\x80\x00\x07\x00\x20\xa8\x70\x50\x50\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x30\x00\x30\x00\x30\x00\xfc\x00\x30\x00\x30\x00\x30'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xc0\x40\x40\x80\x00\x06\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x04'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x00\x00'\
b'\x00\x00\x05\x00\x08\x08\x10\x10\x10\x20\x20\x20\x40\x40\x40\x80'\
b'\x80\x00\x00\x00\x00\x09\x00\x00\x00\x3c\x00\x66\x00\x42\x00\xc3'\
b'\x00\xc3\x00\xc3\x00\xc3\x00\xc3\x00\xc3\x00\x42\x00\x66\x00\x3c'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x10\x00\x30'\
b'\x00\xf0\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30\x00\x30'\
b'\x00\x30\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00'\
b'\x00\x3c\x00\x66\x00\xc3\x00\xc3\x00\x03\x00\x06\x00\x0c\x00\x38'\
b'\x00\x60\x00\x40\x00\xc0\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x09\x00\x00\x00\x7c\x00\xe7\x00\xc3\x00\x03\x00\x02\x00\x1c'\
b'\x00\x07\x00\x03\x00\x03\x00\xc3\x00\xe6\x00\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x09\x00\x00\x00\x0c\x00\x0c\x00\x1c\x00\x2c'\
b'\x00\x2c\x00\x4c\x00\x8c\x00\x8c\x00\xfe\x00\x0c\x00\x0c\x00\x0c'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x7e\x00\x40'\
b'\x00\x40\x00\x80\x00\xbc\x00\xe6\x00\x03\x00\x03\x00\x03\x00\xc3'\
b'\x00\x66\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00'\
b'\x00\x3c\x00\x66\x00\x43\x00\xc0\x00\xc0\x00\xfc\x00\xe6\x00\xc3'\
b'\x00\xc3\x00\xc3\x00\x66\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x09\x00\x00\x00\xff\x00\x03\x00\x02\x00\x06\x00\x04\x00\x0c'\
b'\x00\x08\x00\x18\x00\x18\x00\x10\x00\x30\x00\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x09\x00\x00\x00\x3c\x00\x66\x00\xc3\x00\xc3'\
b'\x00\x66\x00\x3c\x00\x66\x00\xc3\x00\xc3\x00\xc3\x00\x66\x00\x3c'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x3c\x00\x66'\
b'\x00\xc3\x00\xc3\x00\xc3\x00\x67\x00\x3b\x00\x03\x00\x03\x00\xc2'\
b'\x00\x66\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00'\
b'\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00'\
b'\x04\x00\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00\xc0\x40'\
b'\x40\x80\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03'\
b'\x00\x0e\x00\x38\x00\xc0\x00\xe0\x00\x38\x00\x07\x00\x01\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00\x00\x00\x00\xff\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xc0\x00\x70\x00\x1c\x00\x03\x00\x07'\
b'\x00\x1c\x00\xe0\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09'\
b'\x00\x3c\x00\xc7\x00\xc3\x00\x03\x00\x03\x00\x06\x00\x0c\x00\x08'\
b'\x00\x18\x00\x18\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x11\x00\x07\xe0\x00\x0c\x38\x00\x30\x0c\x00\x20\x06'\
b'\x00\x63\xb7\x00\x4c\x73\x00\xcc\x63\x00\xd8\x63\x00\xd8\x63\x00'\
b'\xd8\x46\x00\xdc\xce\x00\x6f\x78\x00\x30\x00\x00\x18\x00\x00\x0f'\
b'\xe0\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x06\x00\x0e\x00\x0b\x00'\
b'\x1b\x00\x1b\x00\x11\x80\x31\x80\x31\x80\x3f\xc0\x60\xc0\x60\x40'\
b'\x40\x60\xc0\x60\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\xfe\x00'\
b'\xc3\x80\xc1\x80\xc1\x80\xc1\x80\xc3\x00\xfe\x00\xc1\x80\xc0\xc0'\
b'\xc0\xc0\xc0\xc0\xc1\x80\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0c\x00\x1f\x80\x30\xc0\x60\x60\x40\x60\xc0\x00\xc0\x00\xc0\x00'\
b'\xc0\x00\xc0\x00\x40\x60\x60\x60\x30\xc0\x1f\x80\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x0c\x00\xff\x00\xc1\x80\xc0\xc0\xc0\x60\xc0\x60'\
b'\xc0\x60\xc0\x60\xc0\x60\xc0\x60\xc0\x60\xc0\xc0\xc1\x80\xff\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\xff\x00\xc0\x00\xc0\x00'\
b'\xc0\x00\xc0\x00\xc0\x00\xff\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00'\
b'\xc0\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\xff\x00'\
b'\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xfe\x00\xc0\x00\xc0\x00'\
b'\xc0\x00\xc0\x00\xc0\x00\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0d\x00\x0f\xc0\x30\x60\x60\x30\x60\x00\xc0\x00\xc0\x00\xc1\xf0'\
b'\xc0\x30\xc0\x30\x60\x30\x60\x70\x30\xf0\x0f\x10\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x0c\x00\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0'\
b'\xc0\xc0\xff\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\xc0\xc0\xc0\xc0\xc0\xc0'\
b'\xc0\xc0\xc0\xc0\xc0\xc0\xc0\x00\x00\x00\x00\x09\x00\x06\x00\x06'\
b'\x00\x06\x00\x06\x00\x06\x00\x06\x00\x06\x00\x06\x00\x06\x00\xc6'\
b'\x00\xc6\x00\xc4\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b'\
b'\x00\xc0\xc0\xc1\x80\xc3\x00\xc6\x00\xcc\x00\xd8\x00\xfc\x00\xe6'\
b'\x00\xc6\x00\xc3\x00\xc1\x80\xc1\x80\xc0\xc0\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x0a\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0'\
b'\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xfe\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x0e\x00\xe0\x38\xe0\x38\xf0\x78\xf0'\
b'\x78\xd0\x58\xd8\xd8\xd8\xd8\xc8\x98\xcd\x98\xcd\x98\xc5\x18\xc7'\
b'\x18\xc7\x18\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00\xe0\x60\xe0'\
b'\x60\xf0\x60\xd0\x60\xd8\x60\xcc\x60\xc4\x60\xc6\x60\xc3\x60\xc3'\
b'\x60\xc1\xe0\xc0\xe0\xc0\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x0d'\
b'\x00\x1f\x80\x30\xc0\x60\x60\xe0\x60\xc0\x30\xc0\x30\xc0\x30\xc0'\
b'\x30\xc0\x30\xe0\x60\x60\x60\x30\xc0\x1f\x80\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x0b\x00\xff\x00\xc1\x80\xc0\xc0\xc0\xc0\xc0\xc0\xc1'\
b'\x80\xff\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x1f\x80\x30\xc0\x60\x60\xe0'\
b'\x60\xc0\x30\xc0\x30\xc0\x30\xc0\x30\xc0\x30\xe1\x60\x61\xe0\x30'\
b'\xc0\x1f\xe0\x00\x20\x00\x00\x00\x00\x00\x00\x0c\x00\xff\x00\xc1'\
b'\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc1\x80\xff\x00\xc1\xc0\xc0\xc0\xc0'\
b'\xc0\xc0\xc0\xc0\xc0\xc0\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x0b'\
b'\x00\x3f\x00\x61\x80\xc0\xc0\xc0\x00\xc0\x00\x60\x00\x3e\x00\x07'\
b'\x80\x01\xc0\xc0\xc0\xc0\xc0\x61\x80\x3f\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x0b\x00\xff\x00\x18\x00\x18\x00\x18\x00\x18\x00\x18'\
b'\x00\x18\x00\x18\x00\x18\x00\x18\x00\x18\x00\x18\x00\x18\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x0c\x00\xc0\xc0\xc0\xc0\xc0\xc0\xc0'\
b'\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\x61'\
b'\x80\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\xc0\x60\x40'\
b'\x40\x60\xc0\x60\xc0\x20\x80\x31\x80\x31\x80\x11\x00\x1b\x00\x0b'\
b'\x00\x0a\x00\x0e\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10'\
b'\x00\xc1\x83\xc1\x82\x42\x86\x62\xc6\x62\xc6\x62\x44\x24\x44\x24'\
b'\x6c\x34\x2c\x3c\x28\x18\x38\x18\x38\x18\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x0b\x00\x60\x40\x20\xc0\x31\x80\x19\x00\x1b\x00\x0e'\
b'\x00\x06\x00\x0e\x00\x1b\x00\x11\x80\x31\x80\x60\xc0\x40\x60\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x40\x60\x60\x60\x30\xc0\x30'\
b'\xc0\x19\x80\x0d\x00\x0f\x00\x06\x00\x06\x00\x06\x00\x06\x00\x06'\
b'\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\xff\x80\x01'\
b'\x80\x03\x00\x06\x00\x06\x00\x0c\x00\x18\x00\x18\x00\x30\x00\x60'\
b'\x00\x60\x00\xc0\x00\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x05'\
b'\x00\xe0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0'\
b'\xc0\xe0\x05\x00\x80\x80\x40\x40\x40\x20\x20\x20\x10\x10\x10\x08'\
b'\x08\x00\x00\x00\x00\x05\x00\xe0\x60\x60\x60\x60\x60\x60\x60\x60'\
b'\x60\x60\x60\x60\x60\x60\x60\xe0\x08\x00\x00\x30\x30\x78\x48\x48'\
b'\x84\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xff\xc0\x00\x00\x00\x00\x00\x00\x04'\
b'\x00\xc0\x40\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7c\x00\xc6\x00'\
b'\x06\x00\x06\x00\x7e\x00\xc6\x00\xc6\x00\xce\x00\x77\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x09\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00'\
b'\xde\x00\xe3\x00\xc1\x80\xc1\x80\xc1\x80\xc1\x80\xc1\x80\xe3\x00'\
b'\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x3c\x00\x66\x00\xc3\x00\xc0\x00\xc0\x00\xc0\x00'\
b'\xc3\x00\x66\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00'\
b'\x03\x00\x03\x00\x03\x00\x03\x00\x3b\x00\x67\x00\xc3\x00\xc3\x00'\
b'\xc3\x00\xc3\x00\xc3\x00\x67\x00\x3b\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3c\x00\x66\x00'\
b'\xc3\x00\xc3\x00\xff\x00\xc0\x00\xc3\x00\x66\x00\x3c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x05\x00\x30\x60\x60\x60\xf0\x60\x60\x60'\
b'\x60\x60\x60\x60\x60\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x3b\x00\x67\x00\xc3\x00\xc3\x00\xc3\x00\xc3\x00\xc3'\
b'\x00\x67\x00\x3b\x00\x03\x00\x03\x00\xc6\x00\x7c\x00\x09\x00\xc0'\
b'\x00\xc0\x00\xc0\x00\xc0\x00\xde\x00\xe3\x00\xc3\x00\xc3\x00\xc3'\
b'\x00\xc3\x00\xc3\x00\xc3\x00\xc3\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x04\x00\xc0\x00\x00\x00\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0'\
b'\x00\x00\x00\x00\x04\x00\x60\x00\x00\x00\x60\x60\x60\x60\x60\x60'\
b'\x60\x60\x60\x60\x60\x60\xc0\x09\x00\xc0\x00\xc0\x00\xc0\x00\xc0'\
b'\x00\xc6\x00\xcc\x00\xd8\x00\xf8\x00\xe8\x00\xcc\x00\xc6\x00\xc6'\
b'\x00\xc3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\xc0\xc0\xc0'\
b'\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\x00\x00\x00\x00\x0e\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\xdd\xe0\xe7\x30\xc6\x30\xc6\x30'\
b'\xc6\x30\xc6\x30\xc6\x30\xc6\x30\xc6\x30\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\x00\xe3\x00'\
b'\xc3\x00\xc3\x00\xc3\x00\xc3\x00\xc3\x00\xc3\x00\xc3\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x3c\x00\x66\x00\xc3\x00\xc3\x00\xc3\x00\xc3\x00\xc3\x00\x66\x00'\
b'\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xde\x00\xe3\x00\xc1\x80\xc1\x80\xc1\x80\xc1\x80'\
b'\xc1\x80\xe3\x00\xde\x00\xc0\x00\xc0\x00\xc0\x00\x00\x00\x0a\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x3b\x00\x67\x00\xc3\x00\xc3\x00'\
b'\xc3\x00\xc3\x00\xc3\x00\x67\x00\x3b\x00\x03\x00\x03\x00\x03\x00'\
b'\x00\x00\x06\x00\x00\x00\x00\x00\xd8\xe0\xc0\xc0\xc0\xc0\xc0\xc0'\
b'\xc0\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\x70'\
b'\x0e\xc6\xc6\x7c\x00\x00\x00\x00\x05\x00\x00\x00\x60\x60\xf0\x60'\
b'\x60\x60\x60\x60\x60\x60\x70\x00\x00\x00\x00\x09\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc3\x00\xc3\x00\xc3\x00\xc3\x00\xc3\x00\xc3'\
b'\x00\xc3\x00\xc7\x00\x7b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08'\
b'\x00\x00\x00\x00\x00\xc3\x43\x62\x66\x26\x34\x3c\x18\x18\x00\x00'\
b'\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc6\x30\x46\x30'\
b'\x47\x20\x6f\x20\x69\x60\x29\x60\x29\xc0\x39\xc0\x10\xc0\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x42\x66\x34\x18'\
b'\x18\x1c\x24\x66\x43\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\xc3'\
b'\x42\x42\x66\x24\x24\x3c\x18\x18\x18\x10\x30\x60\x08\x00\x00\x00'\
b'\x00\x00\xfe\x0c\x08\x18\x30\x60\x40\xc0\xfe\x00\x00\x00\x00\x06'\
b'\x00\x30\x60\x60\x60\x60\x60\x60\xe0\xc0\xe0\x60\x60\x60\x60\x60'\
b'\x60\x30\x04\x00\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0'\
b'\xc0\xc0\xc0\xc0\x00\x06\x00\xc0\x60\x60\x60\x60\x60\x60\x70\x30'\
b'\x70\x60\x60\x60\x60\x60\x60\xc0\x09\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x62\x00\x9e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
_index =\
b'\x00\x00\x13\x00\x26\x00\x39\x00\x5d\x00\x81\x00\xa5\x00\xc9\x00'\
b'\xdc\x00\xef\x00\x02\x01\x15\x01\x39\x01\x4c\x01\x5f\x01\x72\x01'\
b'\x85\x01\xa9\x01\xcd\x01\xf1\x01\x15\x02\x39\x02\x5d\x02\x81\x02'\
b'\xa5\x02\xc9\x02\xed\x02\x00\x03\x13\x03\x37\x03\x5b\x03\x7f\x03'\
b'\xa3\x03\xd8\x03\xfc\x03\x20\x04\x44\x04\x68\x04\x8c\x04\xb0\x04'\
b'\xd4\x04\xf8\x04\x0b\x05\x2f\x05\x53\x05\x77\x05\x9b\x05\xbf\x05'\
b'\xe3\x05\x07\x06\x2b\x06\x4f\x06\x73\x06\x97\x06\xbb\x06\xdf\x06'\
b'\x03\x07\x27\x07\x4b\x07\x6f\x07\x82\x07\x95\x07\xa8\x07\xbb\x07'\
b'\xdf\x07\xf2\x07\x16\x08\x3a\x08\x5e\x08\x82\x08\xa6\x08\xb9\x08'\
b'\xdd\x08\x01\x09\x14\x09\x27\x09\x4b\x09\x5e\x09\x82\x09\xa6\x09'\
b'\xca\x09\xee\x09\x12\x0a\x25\x0a\x38\x0a\x4b\x0a\x6f\x0a\x82\x0a'\
b'\xa6\x0a\xb9\x0a\xcc\x0a\xdf\x0a\xf2\x0a\x05\x0b\x18\x0b\x3c\x0b'\
_mvfont = memoryview(_font)
def _chr_addr(ordch):
offset = 2 * (ordch - 32)
return int.from_bytes(_index[offset:offset + 2], 'little')
def get_ch(ch):
ordch = ord(ch)
ordch = ordch if ordch >= 32 and ordch <= 126 else ord('?')
offset = _chr_addr(ordch)
width = int.from_bytes(_font[offset:offset + 2], 'little')
next_offs = _chr_addr(ordch +1)
return _mvfont[offset + 2:next_offs], 17, width
|
ran5515/DeepDecision
|
refs/heads/master
|
tensorflow/python/kernel_tests/weights_broadcast_test.py
|
130
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for broadcast rules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import test
def _test_values(shape):
return np.reshape(np.cumsum(np.ones(shape), dtype=np.int32), newshape=shape)
class AssertBroadcastableTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _test_valid(self, weights, values):
static_op = weights_broadcast_ops.assert_broadcastable(
weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.assert_broadcastable(
weights=weights_placeholder, values=values_placeholder)
with self.test_session():
static_op.run()
dynamic_op.run(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
def testScalar(self):
self._test_valid(weights=5, values=_test_values((3, 2, 4)))
def test1x1x1(self):
self._test_valid(
weights=np.asarray((5,)).reshape((1, 1, 1)),
values=_test_values((3, 2, 4)))
def test1x1xN(self):
self._test_valid(
weights=np.asarray((5, 7, 11, 3)).reshape((1, 1, 4)),
values=_test_values((3, 2, 4)))
def test1xNx1(self):
self._test_valid(
weights=np.asarray((5, 11)).reshape((1, 2, 1)),
values=_test_values((3, 2, 4)))
def test1xNxN(self):
self._test_valid(
weights=np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4)),
values=_test_values((3, 2, 4)))
def testNx1x1(self):
self._test_valid(
weights=np.asarray((5, 7, 11)).reshape((3, 1, 1)),
values=_test_values((3, 2, 4)))
def testNx1xN(self):
self._test_valid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4)),
values=_test_values((3, 2, 4)))
def testNxNxN(self):
self._test_valid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4)),
values=_test_values((3, 2, 4)))
def _test_invalid(self, weights, values):
error_msg = 'weights can not be broadcast to values'
with self.assertRaisesRegexp(ValueError, error_msg):
weights_broadcast_ops.assert_broadcastable(weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.assert_broadcastable(
weights=weights_placeholder, values=values_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
dynamic_op.run(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
def testInvalid1(self):
self._test_invalid(weights=np.asarray((5,)), values=_test_values((3, 2, 4)))
def testInvalid1x1(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12)).reshape((3, 2)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12, 7, 5)).reshape((2, 4)),
values=_test_values((3, 2, 4)))
def testInvalidOnesExtraDim(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1, 1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4, 1)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((1, 3, 2, 4)),
values=_test_values((3, 2, 4)))
class BroadcastWeightsTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _test_valid(self, weights, values, expected):
static_op = weights_broadcast_ops.broadcast_weights(
weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.broadcast_weights(
weights=weights_placeholder, values=values_placeholder)
with self.test_session():
self.assertAllEqual(expected, static_op.eval())
self.assertAllEqual(expected, dynamic_op.eval(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
}))
def testScalar(self):
self._test_valid(
weights=5,
values=_test_values((3, 2, 4)),
expected=5 * np.ones((3, 2, 4)))
def test1x1x1(self):
self._test_valid(
weights=np.asarray((5,)).reshape((1, 1, 1)),
values=_test_values((3, 2, 4)),
expected=5 * np.ones((3, 2, 4)))
def test1x1xN(self):
weights = np.asarray((5, 7, 11, 3)).reshape((1, 1, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 2, 1)))
def test1xNx1(self):
weights = np.asarray((5, 11)).reshape((1, 2, 1))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 1, 4)))
def test1xNxN(self):
weights = np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(3, 1, 1)))
def testNx1x1(self):
weights = np.asarray((5, 7, 11)).reshape((3, 1, 1))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(1, 2, 4)))
def testNx1xN(self):
weights = np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4))
self._test_valid(
weights=weights,
values=_test_values((3, 2, 4)),
expected=np.tile(weights, reps=(1, 2, 1)))
def testNxNxN(self):
weights = np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4))
self._test_valid(
weights=weights, values=_test_values((3, 2, 4)), expected=weights)
def _test_invalid(self, weights, values):
error_msg = 'weights can not be broadcast to values'
with self.assertRaisesRegexp(ValueError, error_msg):
weights_broadcast_ops.broadcast_weights(weights=weights, values=values)
weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
values_placeholder = array_ops.placeholder(dtypes_lib.float32)
dynamic_op = weights_broadcast_ops.broadcast_weights(
weights=weights_placeholder, values=values_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
dynamic_op.eval(feed_dict={
weights_placeholder: weights,
values_placeholder: values,
})
def testInvalid1(self):
self._test_invalid(weights=np.asarray((5,)), values=_test_values((3, 2, 4)))
def testInvalid1x1(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12)).reshape((3, 2)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatch(self):
self._test_invalid(
weights=np.asarray((5, 7, 11, 3, 2, 12, 7, 5)).reshape((2, 4)),
values=_test_values((3, 2, 4)))
def testInvalidOnesExtraDim(self):
self._test_invalid(
weights=np.asarray((5,)).reshape((1, 1, 1, 1)),
values=_test_values((3, 2, 4)))
def testInvalidPrefixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4, 1)),
values=_test_values((3, 2, 4)))
def testInvalidSuffixMatchExtraDim(self):
self._test_invalid(
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((1, 3, 2, 4)),
values=_test_values((3, 2, 4)))
if __name__ == '__main__':
test.main()
|
seann1/portfolio5
|
refs/heads/master
|
.meteor/dev_bundle/python/Lib/test/test_pkgimport.py
|
127
|
import os, sys, string, random, tempfile, unittest
from test.test_support import run_unittest
class TestImport(unittest.TestCase):
def __init__(self, *args, **kw):
self.package_name = 'PACKAGE_'
while self.package_name in sys.modules:
self.package_name += random.choose(string.letters)
self.module_name = self.package_name + '.foo'
unittest.TestCase.__init__(self, *args, **kw)
def remove_modules(self):
for module_name in (self.package_name, self.module_name):
if module_name in sys.modules:
del sys.modules[module_name]
def setUp(self):
self.test_dir = tempfile.mkdtemp()
sys.path.append(self.test_dir)
self.package_dir = os.path.join(self.test_dir,
self.package_name)
os.mkdir(self.package_dir)
open(os.path.join(
self.package_dir, '__init__'+os.extsep+'py'), 'w').close()
self.module_path = os.path.join(self.package_dir, 'foo'+os.extsep+'py')
def tearDown(self):
for file in os.listdir(self.package_dir):
os.remove(os.path.join(self.package_dir, file))
os.rmdir(self.package_dir)
os.rmdir(self.test_dir)
self.assertNotEqual(sys.path.count(self.test_dir), 0)
sys.path.remove(self.test_dir)
self.remove_modules()
def rewrite_file(self, contents):
for extension in "co":
compiled_path = self.module_path + extension
if os.path.exists(compiled_path):
os.remove(compiled_path)
f = open(self.module_path, 'w')
f.write(contents)
f.close()
def test_package_import__semantics(self):
# Generate a couple of broken modules to try importing.
# ...try loading the module when there's a SyntaxError
self.rewrite_file('for')
try: __import__(self.module_name)
except SyntaxError: pass
else: raise RuntimeError, 'Failed to induce SyntaxError'
self.assertNotIn(self.module_name, sys.modules)
self.assertFalse(hasattr(sys.modules[self.package_name], 'foo'))
# ...make up a variable name that isn't bound in __builtins__
var = 'a'
while var in dir(__builtins__):
var += random.choose(string.letters)
# ...make a module that just contains that
self.rewrite_file(var)
try: __import__(self.module_name)
except NameError: pass
else: raise RuntimeError, 'Failed to induce NameError.'
# ...now change the module so that the NameError doesn't
# happen
self.rewrite_file('%s = 1' % var)
module = __import__(self.module_name).foo
self.assertEqual(getattr(module, var), 1)
def test_main():
run_unittest(TestImport)
if __name__ == "__main__":
test_main()
|
wengzhilai/family
|
refs/heads/master
|
iSoft/dal/FamilyDal.py
|
1
|
from iSoft.model.AppReturnDTO import AppReturnDTO
from iSoft.entity.model import db, FaUserInfo
from iSoft.model.FamilyRelative.Relative import Relative
from iSoft.model.FamilyRelative.RelativeItem import RelativeItem
from iSoft.model.FamilyRelative.RelativeItem import RelativeItem, HorizonVal, AxisXY
class FamilyDal(object):
def UserInfoRelative(self, userId):
userInfoEnt = FaUserInfo.query.filter(FaUserInfo.ID == userId).first()
if userInfoEnt is None:
return None, AppReturnDTO(False, "用户不存在")
reEnt = Relative()
nowPlace, msg = self.AddSonItem(reEnt.ItemList, userInfoEnt, 1, 7,
AxisXY(0, 0))
item = self.UserInfoToRelativeItem(userInfoEnt, nowPlace.Between(), 0)
msg = self.AddFatherItem(reEnt.ItemList, userInfoEnt, 1, 4,AxisXY(nowPlace.Between(), 0))
reEnt.ItemList.append(item)
# 求最小值,原理是,不能让坐标出现负数
minX = min([item.x for item in reEnt.ItemList])
minY = min([item.y for item in reEnt.ItemList])
for item in reEnt.ItemList:
if minX<0 :
item.x=item.x-minX
if minY<0:
item.y=item.y-minY
allUserId=[item.Id for item in reEnt.ItemList]
reEnt.RelativeList = [{
"K": item.Id,
"V": item.FatherId
} for item in reEnt.ItemList if item.FatherId is not None and item.FatherId in allUserId]
reEnt.FormatItemList()
return reEnt, AppReturnDTO(True)
def AddFatherItem(self, mainList, inSon, levelId, maxLevelId, inAxisXY):
if levelId > maxLevelId:
return True;
if inSon.FATHER_ID is None:
return True;
# 获取父级的所有子,也包括本人,并排序
father=FaUserInfo.query.filter(FaUserInfo.ID == inSon.FATHER_ID).first()
sonList=sorted(father.fa_user_infos, key=lambda x: x.LEVEL_ID)
#获取当前传入值的位置
myPlace = 0;
myPlace=max(index for index in range(len(sonList)) if sonList[index].ID == inSon.ID)
brotherXList=[]
# 添加自己
brotherXList.append(inAxisXY.X)
#比传入用的值小的兄弟
for i in range(0,myPlace):
nowI= myPlace-i
x = inAxisXY.X-((i+1)*2)
brotherXList.append(x)
item = self.UserInfoToRelativeItem(sonList[nowI-1], x, inAxisXY.Y)
mainList.append(item)
# 添加比传入大的兄弟
for i in range(1,len(sonList)-myPlace):
nowI= myPlace+i
x = inAxisXY.X+(i*2)
brotherXList.append(x)
item = self.UserInfoToRelativeItem(sonList[nowI], x, inAxisXY.Y)
mainList.append(item)
minX=min(brotherXList)
maxX=max(brotherXList)
# 添加父亲
mainList.append(self.UserInfoToRelativeItem(father, (minX+maxX)/2 , inAxisXY.Y-1))
self.AddFatherItem(mainList, father, levelId+1, maxLevelId,AxisXY((minX+maxX)/2, inAxisXY.Y-1))
return True
def AddSonItem(self, mainList, inFather, levelId, maxLevelId, inAxisXY):
"""
添加子节点,和计算当前坐标HorizonVal
:param mainList:
:param inFather:
:param levelId:
:param maxLevelId:
:param inAxisXY:
"""
# 初始化最小值
reEnt = HorizonVal(inAxisXY.X, inAxisXY.X)
if levelId > maxLevelId: # 如果层级过大,则退出
return reEnt, AppReturnDTO(True)
# 如果没有子项也退出
if inFather.fa_user_infos is None or len(inFather.fa_user_infos) == 0:
return reEnt, AppReturnDTO(True)
startX = inAxisXY.X
# 循环所有子项,子项从小到大
allChildren = sorted(inFather.fa_user_infos, key=lambda x: x.LEVEL_ID)
allChildXList=[]
for index, son in enumerate(allChildren):
#获取子项的
nowHorizonVal, msg = self.AddSonItem(mainList, son, levelId+1, maxLevelId,
AxisXY(
startX, inAxisXY.Y + 1))
# 该值会传入入下一项兄弟项,加2是因为每个项间隔都是2,在除的时候,才会有整数
startX = nowHorizonVal.AllMaxHorizon if nowHorizonVal.AllMaxHorizon > nowHorizonVal.RowMaxHorizon + 2 else nowHorizonVal.RowMaxHorizon + 2
#获取所有子项的中间值
thisItemX = nowHorizonVal.Between()
# 保存所有子项的X坐标
allChildXList.append(thisItemX)
item = self.UserInfoToRelativeItem(son, thisItemX, inAxisXY.Y + 1)
mainList.append(item)
reEnt.RowMaxHorizon = max(allChildXList)
reEnt.RowMinHorizon = min(allChildXList)
reEnt.AllMaxHorizon = startX if startX > reEnt.RowMaxHorizon else reEnt.RowMaxHorizon
return reEnt, AppReturnDTO(True)
def UserInfoToRelativeItem(self, faUserInfo, x, y):
"""
把userinfo转成RelativeItem
:param self:
:param faUserInfo: 要转的实体
:param x: 坐标x
:param y: 坐标y
"""
reEnt = RelativeItem()
reEnt.ElderId = faUserInfo.ELDER_ID
if faUserInfo.fa_elder is not None:
reEnt.ElderName = faUserInfo.fa_elder.NAME
reEnt.FatherId = faUserInfo.FATHER_ID
# reEnt.IcoUrl = reEnt
reEnt.Id = faUserInfo.ID
reEnt.Name = faUserInfo.NAME
reEnt.Sex = faUserInfo.SEX
reEnt.x = x
reEnt.y = y
return reEnt
|
oasiswork/odoo
|
refs/heads/8.0
|
addons/website_sale/controllers/__init__.py
|
7372
|
import main
|
grevutiu-gabriel/sympy
|
refs/heads/master
|
sympy/utilities/tests/diagnose_imports.py
|
99
|
#!/usr/bin/env python
"""
Import diagnostics. Run bin/diagnose_imports.py --help for details.
"""
from __future__ import print_function
if __name__ == "__main__":
import sys
import inspect
from sympy.core.compatibility import builtins
import optparse
from os.path import abspath, dirname, join, normpath
this_file = abspath(__file__)
sympy_dir = join(dirname(this_file), '..', '..', '..')
sympy_dir = normpath(sympy_dir)
sys.path.insert(0, sympy_dir)
option_parser = optparse.OptionParser(
usage=
"Usage: %prog option [options]\n"
"\n"
"Import analysis for imports between SymPy modules.")
option_group = optparse.OptionGroup(
option_parser,
'Analysis options',
'Options that define what to do. Exactly one of these must be given.')
option_group.add_option(
'--problems',
help=
'Print all import problems, that is: '
'If an import pulls in a package instead of a module '
'(e.g. sympy.core instead of sympy.core.add); ' # see ##PACKAGE##
'if it imports a symbol that is already present; ' # see ##DUPLICATE##
'if it imports a symbol '
'from somewhere other than the defining module.', # see ##ORIGIN##
action='count')
option_group.add_option(
'--origins',
help=
'For each imported symbol in each module, '
'print the module that defined it. '
'(This is useful for import refactoring.)',
action='count')
option_parser.add_option_group(option_group)
option_group = optparse.OptionGroup(
option_parser,
'Sort options',
'These options define the sort order for output lines. '
'At most one of these options is allowed. '
'Unsorted output will reflect the order in which imports happened.')
option_group.add_option(
'--by-importer',
help='Sort output lines by name of importing module.',
action='count')
option_group.add_option(
'--by-origin',
help='Sort output lines by name of imported module.',
action='count')
option_parser.add_option_group(option_group)
(options, args) = option_parser.parse_args()
if args:
option_parser.error(
'Unexpected arguments %s (try %s --help)' % (args, sys.argv[0]))
if options.problems > 1:
option_parser.error('--problems must not be given more than once.')
if options.origins > 1:
option_parser.error('--origins must not be given more than once.')
if options.by_importer > 1:
option_parser.error('--by-importer must not be given more than once.')
if options.by_origin > 1:
option_parser.error('--by-origin must not be given more than once.')
options.problems = options.problems == 1
options.origins = options.origins == 1
options.by_importer = options.by_importer == 1
options.by_origin = options.by_origin == 1
if not options.problems and not options.origins:
option_parser.error(
'At least one of --problems and --origins is required')
if options.problems and options.origins:
option_parser.error(
'At most one of --problems and --origins is allowed')
if options.by_importer and options.by_origin:
option_parser.error(
'At most one of --by-importer and --by-origin is allowed')
options.by_process = not options.by_importer and not options.by_origin
builtin_import = builtins.__import__
class Definition(object):
"""Information about a symbol's definition."""
def __init__(self, name, value, definer):
self.name = name
self.value = value
self.definer = definer
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name and self.value == other.value
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return 'Definition(%s, ..., %s)' % (
repr(self.name), repr(self.definer))
symbol_definers = {} # Maps each function/variable to name of module to define it
def in_module(a, b):
"""Is a the same module as or a submodule of b?"""
return a == b or a != None and b != None and a.startswith(b + '.')
def relevant(module):
"""Is module relevant for import checking?
Only imports between relevant modules will be checked."""
return in_module(module, 'sympy')
sorted_messages = []
def msg(msg, *args):
global options, sorted_messages
if options.by_process:
print(msg % args)
else:
sorted_messages.append(msg % args)
def tracking_import(module, globals=globals(), locals=[], fromlist=None, level=-1):
"""__import__ wrapper - does not change imports at all, but tracks them.
Default order is implemented by doing output directly.
All other orders are implemented by collecting output information into
a sorted list that will be emitted after all imports are processed.
Indirect imports can only occur after the requested symbol has been
imported directly (because the indirect import would not have a module
to pick the symbol up from).
So this code detects indirect imports by checking whether the symbol in
question was already imported.
Keeps the semantics of __import__ unchanged."""
global options, symbol_definers
caller_frame = inspect.getframeinfo(sys._getframe(1))
importer_filename = caller_frame.filename
importer_module = globals['__name__']
if importer_filename == caller_frame.filename:
importer_reference = '%s line %s' % (
importer_filename, str(caller_frame.lineno))
else:
importer_reference = importer_filename
result = builtin_import(module, globals, locals, fromlist, level)
importee_module = result.__name__
# We're only interested if importer and importee are in SymPy
if relevant(importer_module) and relevant(importee_module):
for symbol in result.__dict__.iterkeys():
definition = Definition(
symbol, result.__dict__[symbol], importer_module)
if not definition in symbol_definers:
symbol_definers[definition] = importee_module
if hasattr(result, '__path__'):
##PACKAGE##
# The existence of __path__ is documented in the tutorial on modules.
# Python 3.3 documents this in http://docs.python.org/3.3/reference/import.html
if options.by_origin:
msg('Error: %s (a package) is imported by %s',
module, importer_reference)
else:
msg('Error: %s contains package import %s',
importer_reference, module)
if fromlist != None:
symbol_list = fromlist
if '*' in symbol_list:
if (importer_filename.endswith('__init__.py')
or importer_filename.endswith('__init__.pyc')
or importer_filename.endswith('__init__.pyo')):
# We do not check starred imports inside __init__
# That's the normal "please copy over its imports to my namespace"
symbol_list = []
else:
symbol_list = result.__dict__.iterkeys()
for symbol in symbol_list:
if not symbol in result.__dict__:
if options.by_origin:
msg('Error: %s.%s is not defined (yet), but %s tries to import it',
importee_module, symbol, importer_reference)
else:
msg('Error: %s tries to import %s.%s, which did not define it (yet)',
importer_reference, importee_module, symbol)
else:
definition = Definition(
symbol, result.__dict__[symbol], importer_module)
symbol_definer = symbol_definers[definition]
if symbol_definer == importee_module:
##DUPLICATE##
if options.by_origin:
msg('Error: %s.%s is imported again into %s',
importee_module, symbol, importer_reference)
else:
msg('Error: %s imports %s.%s again',
importer_reference, importee_module, symbol)
else:
##ORIGIN##
if options.by_origin:
msg('Error: %s.%s is imported by %s, which should import %s.%s instead',
importee_module, symbol, importer_reference, symbol_definer, symbol)
else:
msg('Error: %s imports %s.%s but should import %s.%s instead',
importer_reference, importee_module, symbol, symbol_definer, symbol)
return result
builtins.__import__ = tracking_import
__import__('sympy')
sorted_messages.sort()
for message in sorted_messages:
print(message)
|
ronakkhunt/kuma
|
refs/heads/master
|
vendor/packages/pygments/lexers/diff.py
|
75
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.diff
~~~~~~~~~~~~~~~~~~~~
Lexers for diff/patch formats.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, Generic, \
Literal
__all__ = ['DiffLexer', 'DarcsPatchLexer']
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
.. versionadded:: 0.10
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
DPATCH_KEYWORDS = ('hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace')
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'\{', Operator),
(r'\}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Text, Keyword, Text)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'.*\n', Text),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Text, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]+', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]+', Generic.Deleted),
],
}
|
h4ck3rm1k3/ansible
|
refs/heads/devel
|
v2/hacking/module_formatter.py
|
6
|
#!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# (c) 2012-2014, Michael DeHaan <michael@ansible.com> and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import glob
import sys
import yaml
import codecs
import json
import ast
import re
import optparse
import time
import datetime
import subprocess
import cgi
from jinja2 import Environment, FileSystemLoader
import ansible.utils
import ansible.utils.module_docs as module_docs
#####################################################################################
# constants and paths
# if a module is added in a version of Ansible older than this, don't print the version added information
# in the module documentation because everyone is assumed to be running something newer than this already.
TO_OLD_TO_BE_NOTABLE = 1.0
# Get parent directory of the directory this script lives in
MODULEDIR=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
))
# The name of the DOCUMENTATION template
EXAMPLE_YAML=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
))
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
DEPRECATED = " (D)"
NOTCORE = " (E)"
#####################################################################################
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
t = _MODULE.sub(r'``' + r"\1" + r"``", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
return t
#####################################################################################
def html_ify(text):
''' convert symbols like I(this is in italics) to valid HTML '''
t = cgi.escape(text)
t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
t = _CONST.sub("<code>" + r"\1" + "</code>", t)
return t
#####################################################################################
def rst_fmt(text, fmt):
''' helper for Jinja2 to do format strings '''
return fmt % (text)
#####################################################################################
def rst_xline(width, char="="):
''' return a restructured text line of a given length '''
return char * width
#####################################################################################
def write_data(text, options, outputname, module):
''' dumps module output to a file or the screen, as requested '''
if options.output_dir is not None:
fname = os.path.join(options.output_dir, outputname % module)
fname = fname.replace(".py","")
f = open(fname, 'w')
f.write(text.encode('utf-8'))
f.close()
else:
print text
#####################################################################################
def list_modules(module_dir, depth=0):
''' returns a hash of categories, each category being a hash of module names to file paths '''
categories = dict(all=dict(),_aliases=dict())
if depth <= 3: # limit # of subdirs
files = glob.glob("%s/*" % module_dir)
for d in files:
category = os.path.splitext(os.path.basename(d))[0]
if os.path.isdir(d):
res = list_modules(d, depth + 1)
for key in res.keys():
if key in categories:
categories[key] = ansible.utils.merge_hash(categories[key], res[key])
res.pop(key, None)
if depth < 2:
categories.update(res)
else:
category = module_dir.split("/")[-1]
if not category in categories:
categories[category] = res
else:
categories[category].update(res)
else:
module = category
category = os.path.basename(module_dir)
if not d.endswith(".py") or d.endswith('__init__.py'):
# windows powershell modules have documentation stubs in python docstring
# format (they are not executed) so skip the ps1 format files
continue
elif module.startswith("_") and os.path.islink(d):
source = os.path.splitext(os.path.basename(os.path.realpath(d)))[0]
module = module.replace("_","",1)
if not d in categories['_aliases']:
categories['_aliases'][source] = [module]
else:
categories['_aliases'][source].update(module)
continue
if not category in categories:
categories[category] = {}
categories[category][module] = d
categories['all'][module] = d
return categories
#####################################################################################
def generate_parser():
''' generate an optparse parser '''
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options] arg1 arg2',
description='Generate module documentation from metadata',
)
p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
p.add_option('-V', action='version', help='Show version number and exit')
return p
#####################################################################################
def jinja2_environment(template_dir, typ):
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True,
)
env.globals['xline'] = rst_xline
if typ == 'rst':
env.filters['convert_symbols_to_format'] = rst_ify
env.filters['html_ify'] = html_ify
env.filters['fmt'] = rst_fmt
env.filters['xline'] = rst_xline
template = env.get_template('rst.j2')
outputname = "%s_module.rst"
else:
raise Exception("unknown module format type: %s" % typ)
return env, template, outputname
#####################################################################################
def process_module(module, options, env, template, outputname, module_map, aliases):
fname = module_map[module]
if isinstance(fname, dict):
return "SKIPPED"
basename = os.path.basename(fname)
deprecated = False
# ignore files with extensions
if not basename.endswith(".py"):
return
elif module.startswith("_"):
if os.path.islink(fname):
return # ignore, its an alias
deprecated = True
module = module.replace("_","",1)
print "rendering: %s" % module
# use ansible core library to parse out doc metadata YAML and plaintext examples
doc, examples = ansible.utils.module_docs.get_docstring(fname, verbose=options.verbose)
# crash if module is missing documentation and not explicitly hidden from docs index
if doc is None:
if module in ansible.utils.module_docs.BLACKLIST_MODULES:
return "SKIPPED"
else:
sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
sys.exit(1)
if deprecated and 'deprecated' not in doc:
sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module))
sys.exit(1)
if "/core/" in fname:
doc['core'] = True
else:
doc['core'] = False
if module in aliases:
doc['aliases'] = aliases[module]
all_keys = []
if not 'version_added' in doc:
sys.stderr.write("*** ERROR: missing version_added in: %s ***\n" % module)
sys.exit(1)
added = 0
if doc['version_added'] == 'historical':
del doc['version_added']
else:
added = doc['version_added']
# don't show version added information if it's too old to be called out
if added:
added_tokens = str(added).split(".")
added = added_tokens[0] + "." + added_tokens[1]
added_float = float(added)
if added and added_float < TO_OLD_TO_BE_NOTABLE:
del doc['version_added']
for (k,v) in doc['options'].iteritems():
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = fname
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['ansible_version'] = options.ansible_version
doc['plainexamples'] = examples #plain text
# here is where we build the table of contents...
text = template.render(doc)
write_data(text, options, outputname, module)
return doc['short_description']
#####################################################################################
def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases):
modstring = module
modname = module
if module in deprecated:
modstring = modstring + DEPRECATED
modname = "_" + module
elif module not in core:
modstring = modstring + NOTCORE
result = process_module(modname, options, env, template, outputname, module_map, aliases)
if result != "SKIPPED":
category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module))
def process_category(category, categories, options, env, template, outputname):
module_map = categories[category]
aliases = {}
if '_aliases' in categories:
aliases = categories['_aliases']
category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category)
category_file = open(category_file_path, "w")
print "*** recording category %s in %s ***" % (category, category_file_path)
# TODO: start a new category file
category = category.replace("_"," ")
category = category.title()
modules = []
deprecated = []
core = []
for module in module_map.keys():
if isinstance(module_map[module], dict):
for mod in module_map[module].keys():
if mod.startswith("_"):
mod = mod.replace("_","",1)
deprecated.append(mod)
elif '/core/' in module_map[module][mod]:
core.append(mod)
else:
if module.startswith("_"):
module = module.replace("_","",1)
deprecated.append(module)
elif '/core/' in module_map[module]:
core.append(module)
modules.append(module)
modules.sort()
category_header = "%s Modules" % (category.title())
underscores = "`" * len(category_header)
category_file.write("""\
%s
%s
.. toctree:: :maxdepth: 1
""" % (category_header, underscores))
sections = []
for module in modules:
if module in module_map and isinstance(module_map[module], dict):
sections.append(module)
continue
else:
print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases)
sections.sort()
for section in sections:
category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section)))
category_file.write(".. toctree:: :maxdepth: 1\n\n")
section_modules = module_map[section].keys()
section_modules.sort()
#for module in module_map[section]:
for module in section_modules:
print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map[section], aliases)
category_file.write("""\n\n
.. note::
- %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale.
- %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not neccessarily) less activity maintained than 'core' modules.
- Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub <http://github.com/ansible/ansible-modules-core>`_, extras tickets to `ansible/ansible-modules-extras on GitHub <http://github.com/ansible/ansible-modules-extras>`_
""" % (DEPRECATED, NOTCORE))
category_file.close()
# TODO: end a new category file
#####################################################################################
def validate_options(options):
''' validate option parser options '''
if not options.module_dir:
print >>sys.stderr, "--module-dir is required"
sys.exit(1)
if not os.path.exists(options.module_dir):
print >>sys.stderr, "--module-dir does not exist: %s" % options.module_dir
sys.exit(1)
if not options.template_dir:
print "--template-dir must be specified"
sys.exit(1)
#####################################################################################
def main():
p = generate_parser()
(options, args) = p.parse_args()
validate_options(options)
env, template, outputname = jinja2_environment(options.template_dir, options.type)
categories = list_modules(options.module_dir)
last_category = None
category_names = categories.keys()
category_names.sort()
category_list_path = os.path.join(options.output_dir, "modules_by_category.rst")
category_list_file = open(category_list_path, "w")
category_list_file.write("Module Index\n")
category_list_file.write("============\n")
category_list_file.write("\n\n")
category_list_file.write(".. toctree::\n")
category_list_file.write(" :maxdepth: 1\n\n")
for category in category_names:
if category.startswith("_"):
continue
category_list_file.write(" list_of_%s_modules\n" % category)
process_category(category, categories, options, env, template, outputname)
category_list_file.close()
if __name__ == '__main__':
main()
|
opyate/gnashboard
|
refs/heads/master
|
application/kv_blueprint/__init__.py
|
33
|
from . import views
|
keisuke-umezawa/chainer
|
refs/heads/master
|
chainer/functions/loss/softmax_cross_entropy.py
|
4
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.activation import log_softmax
from chainer.utils import type_check
from chainer import variable
import chainerx
def _broadcast_to(array, shape):
if hasattr(numpy, 'broadcast_to'):
return numpy.broadcast_to(array, shape)
dummy = numpy.empty(shape, array.dtype)
return numpy.broadcast_arrays(array, dummy)[0]
def _check_class_weight_option(class_weight):
if class_weight is not None:
if class_weight.ndim != 1:
raise ValueError('class_weight.ndim should be 1')
if class_weight.dtype.kind != 'f':
raise ValueError('The dtype of class_weight should be \'f\'')
if isinstance(class_weight, variable.Variable):
raise ValueError('class_weight should be a numpy.ndarray or '
'cupy.ndarray, not a chainer.Variable')
def _check_reduce_option(reduce):
if reduce not in ('mean', 'no'):
raise ValueError(
'only \'mean\' and \'no\' are valid for \'reduce\', but \'%s\' is '
'given' % reduce)
def _check_input_values(x, t, ignore_label):
# Extract the raw ndarray as Variable.__ge__ is not implemented.
# We assume that t is already an ndarray.
if isinstance(x, variable.Variable):
x = x.data
if not (((0 <= t) &
(t < x.shape[1])) |
(t == ignore_label)).all():
msg = ('Each label `t` need to satisfy '
'`0 <= t < x.shape[1] or t == %d`' % ignore_label)
raise ValueError(msg)
def _reduction_dtype(x_dtype):
# Returns the dtype for accumulation and output of reduction.
# For float16 input, float32 is used.
# Otherwise the same dtype as the input is used.
if x_dtype == numpy.float16:
return numpy.float32
return x_dtype
class SoftmaxCrossEntropy(function_node.FunctionNode):
"""Softmax activation followed by a cross entropy loss."""
normalize = True
y = None
# Coefficient of normalization. Only used if reduce='mean'.
_coeff = None
def __init__(self, normalize=True, cache_score=True, class_weight=None,
ignore_label=-1, reduce='mean'):
self.normalize = normalize
self.cache_score = cache_score
_check_class_weight_option(class_weight)
self.class_weight = class_weight
self.ignore_label = ignore_label
_check_reduce_option(reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't'))
x_type, t_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype.kind == 'i',
t_type.ndim == x_type.ndim - 1,
x_type.shape[0] == t_type.shape[0],
x_type.shape[2:] == t_type.shape[1:],
)
def _is_chainerx_supported(self, input_arrays):
# Determines if the specified configuration of inputs and parameters
# are supported in `forward_chainerx` implementation.
# TODO(niboshi): Support these conditions.
if self.class_weight is not None:
return False
if self.ignore_label != -1:
return False
if self.reduce != 'mean':
return False
x, t = input_arrays
if x.ndim != 2:
return False
return True
def forward_chainerx(self, inputs):
# TODO(niboshi): Current implementation is only intended to support
# MNIST example.
x, t = inputs
num_classes = x.shape[1]
score = chainerx.log_softmax(x, axis=1)
mask = (t[:, chainerx.newaxis] == chainerx.arange(
num_classes, dtype=t.dtype, device=x.device)).astype(score.dtype)
# TODO(beam2d): implement mean
y = -(score * mask).sum() * (1 / x.shape[0])
return y,
def forward_cpu(self, inputs):
class_weight = backend.from_chx(self.class_weight)
self.retain_inputs((0, 1))
x, t = inputs
if chainer.is_debug():
_check_input_values(x, t, self.ignore_label)
log_y = log_softmax._log_softmax(x)
if self.cache_score:
self.y = numpy.exp(log_y)
if class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
log_y *= _broadcast_to(class_weight.reshape(shape), x.shape)
log_yd = numpy.rollaxis(log_y, 1)
log_yd = log_yd.reshape(len(log_yd), -1)
t_valid = t != self.ignore_label
t = t * t_valid
log_p = log_yd[t.ravel(), numpy.arange(t.size)]
log_p *= t_valid.ravel()
if self.reduce == 'mean':
# deal with the case where the SoftmaxCrossEntropy is
# unpickled from the old version
if self.normalize:
count = t_valid.sum()
else:
count = len(x)
self._coeff = 1.0 / max(count, 1)
# Perform reduction in a promoted dtype
reduc_dtype = _reduction_dtype(x.dtype)
y = log_p.sum(keepdims=True, dtype=reduc_dtype)
y = y * (-self._coeff)
y = y.astype(x.dtype, copy=False)
return y.reshape(()),
else:
return -log_p.reshape(t.shape),
def forward_gpu(self, inputs):
class_weight = backend.from_chx(self.class_weight)
self.retain_inputs((0, 1))
cupy = cuda.cupy
x, t = inputs
if chainer.is_debug():
_check_input_values(x, t, self.ignore_label)
if x.size == 0:
y = cupy.zeros(t.shape, dtype=x.dtype)
if self.cache_score:
self.y = y
if self.reduce == 'mean':
return y.sum(),
else:
return y,
log_y = log_softmax._log_softmax(x)
if self.cache_score:
self.y = cupy.exp(log_y)
if class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
log_y *= cupy.broadcast_to(class_weight.reshape(shape), x.shape)
log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
if self.reduce == 'mean':
# Reduction is performed in a promoted dtype
reduc_dtype = _reduction_dtype(x.dtype)
if self.normalize:
count = (t != self.ignore_label).sum(dtype=reduc_dtype)
count = cupy.maximum(1, count)
coeff = 1. / count
else:
coeff = cupy.array(1. / max(1, len(t)), dtype=reduc_dtype)
self._coeff = coeff
ret = cuda.reduce(
'S t, raw T log_y, int32 n_channel, raw U coeff, '
'S ignore_label',
'U out',
't == ignore_label ? T(0) : log_y[_j * n_channel + t]',
'a + b', 'out = static_cast<U>(a * -coeff[0])', '0',
'crossent_fwd'
)(t, log_y.reduced_view(), log_y.shape[-1],
self._coeff, self.ignore_label)
ret = ret.astype(log_y.dtype, copy=False)
else:
ret = cuda.elementwise(
'S t, raw T log_y, int32 n_channel, T ignore', 'T out',
'''
if (t == ignore) {
out = 0;
} else {
out = -log_y[i * n_channel + t];
}
''',
'softmax_crossent_no_reduce_fwd'
)(t, log_y.reduced_view(), log_y.shape[-1], self.ignore_label)
ret = ret.reshape(t.shape)
return ret,
def backward(self, input_indexes, grad_outputs):
func_grad = _SoftmaxCrossEntropyGrad_NoDoubleBackprop(
self.ignore_label, self.class_weight, self.y, self._coeff)
inputs = self.get_retained_inputs()
return func_grad.apply(inputs + grad_outputs) + (None,)
class _SoftmaxCrossEntropyGrad_NoDoubleBackprop(function_node.FunctionNode):
# A backward implementation which does not support double-backprop.
def __init__(self, ignore_label, class_weight, y, coeff):
self.ignore_label = ignore_label
self.class_weight = class_weight
self.y = y
self.coeff = coeff
def forward_cpu(self, inputs_and_grad_outputs):
x, t, gloss = inputs_and_grad_outputs
if x.size == 0:
return numpy.zeros(x.shape, dtype=x.dtype), None
if self.y is not None:
y = self.y.copy()
else:
y = log_softmax._log_softmax(x)
numpy.exp(y, out=y)
t_valid = t != self.ignore_label
t = t * t_valid
if y.ndim == 2:
gx = y
gx[numpy.arange(len(t)), t] -= 1
if self.class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
c = _broadcast_to(self.class_weight.reshape(shape), x.shape)
c = c[numpy.arange(len(t)), t]
gx *= _broadcast_to(numpy.expand_dims(c, 1), gx.shape)
gx *= t_valid.reshape((len(t), 1))
else:
# in the case where y.ndim is higher than 2,
# we think that a current implementation is inefficient
# because it yields two provisional arrays for indexing.
n_unit = t.size // len(t)
gx = y.reshape(y.shape[0], y.shape[1], -1)
fst_index = numpy.arange(t.size) // n_unit
trd_index = numpy.arange(t.size) % n_unit
gx[fst_index, t.ravel(), trd_index] -= 1
if self.class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
c = _broadcast_to(self.class_weight.reshape(shape), x.shape)
c = c.reshape(gx.shape)
c = c[fst_index, t.ravel(), trd_index]
c = c.reshape(y.shape[0], 1, -1)
gx *= _broadcast_to(c, gx.shape)
gx *= t_valid.reshape((len(t), 1, -1))
gx = gx.reshape(y.shape)
if self.coeff is not None:
gx *= gloss * self.coeff
else:
gx *= gloss[:, None]
return gx,
def forward_gpu(self, inputs_and_grad_outputs):
class_weight = cuda.to_gpu(self.class_weight)
cupy = cuda.cupy
x, t, gloss = inputs_and_grad_outputs
if x.size == 0:
return cupy.zeros(x.shape, dtype=x.dtype), None
if self.y is not None:
y = self.y
else:
y = log_softmax._log_softmax(x)
cupy.exp(y, out=y)
n_unit = t.size // len(t)
if self.coeff is not None:
coeff = self.coeff
else:
gloss = gloss[:, None, ...]
coeff = cupy.array(1, dtype=gloss.dtype) # dtype does not matter
if self.class_weight is None:
gx = cuda.elementwise(
'T y, S t, T gloss, U coeff, S n_channel, S n_unit, '
'S ignore_label',
'T gx',
'''
const int c = (i / n_unit % n_channel);
if (t == ignore_label) {
gx = T(0);
} else {
gx = static_cast<T>(gloss * coeff * (y - (c == t)));
}
''',
'softmax_crossent_bwd')(
y, cupy.expand_dims(t, 1), gloss, coeff, x.shape[1],
n_unit, self.ignore_label)
else:
gx = cuda.elementwise(
'T y, raw T w, S t, T gloss, U coeff, '
'S n_channel, S n_unit, S ignore_label',
'T gx',
'''
const int c = (i / n_unit % n_channel);
if (t == ignore_label) {
gx = T(0);
} else {
gx = static_cast<T>(
gloss * coeff * (y - (c == t)) * w[t]);
}
''',
'softmax_crossent_weight_bwd')(
y, class_weight, cupy.expand_dims(t, 1), gloss, coeff,
x.shape[1], n_unit, self.ignore_label)
return gx,
def backward(self, input_indexes, grad_outputs):
raise RuntimeError(
'F.softmax_cross_entropy was called with '
'\'enable_double_backprop=False\' argument, but double-backprop '
'is actually being performed. Please specify '
'\'enable_double_backprop=True\' explicitly.')
def _double_backward_softmax_cross_entropy(x, t, normalize, class_weight,
ignore_label, reduce, is_chainerx):
if isinstance(t, variable.Variable):
t = t.data
F = chainer.functions
_check_class_weight_option(class_weight)
_check_reduce_option(reduce)
if chainer.is_debug():
_check_input_values(x, t, ignore_label)
loss = -chainer.functions.log_softmax(x)
if class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
class_weight = F.broadcast_to(class_weight.reshape(shape), x.shape)
# TODO(niboshi): Remove this workaround after ChainerX supports
# type promotion.
if is_chainerx:
class_weight = F.cast(class_weight, x.dtype)
loss = loss * class_weight
in_use = (t != ignore_label).astype(x.dtype)
loss = F.rollaxis(loss, 1, loss.ndim)
loss = F.reshape(loss, (-1, loss.shape[-1]))
# Replace ignore_label value with one valid for F.select_item below.
t = t.clip(0, loss.shape[1] - 1)
loss = F.select_item(loss, t.ravel())
loss = F.reshape(loss, t.shape)
loss = loss * in_use
if reduce == 'mean':
reduc_dtype = _reduction_dtype(x.dtype)
if normalize:
# TODO(niboshi): Use in_use.sum(dtype=reduc_dtype) once chainerx
# supports dtype argument.
count = in_use.astype(reduc_dtype, copy=False).sum()
else:
count = len(x)
count = max(count, 1.)
if reduc_dtype == loss.dtype:
loss = F.sum(loss / count)
else:
# Sum in a promoted dtype
loss = F.cast(loss, reduc_dtype)
loss = F.sum(loss / count)
loss = F.cast(loss, x.dtype)
return loss
def softmax_cross_entropy(
x, t, normalize=True, cache_score=True, class_weight=None,
ignore_label=-1, reduce='mean', enable_double_backprop=False):
"""Computes cross entropy loss for pre-softmax activations.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a multidimensional array whose element indicates
unnormalized log probability: the first axis of the variable
represents the number of samples, and the second axis represents
the number of classes. While this function computes a usual softmax
cross entropy if the number of dimensions is equal to 2, it
computes a cross entropy of the replicated softmax if the number of
dimensions is greater than 2.
t (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a signed integer vector of ground truth
labels. If ``t[i] == ignore_label``, corresponding ``x[i]`` is
ignored.
normalize (bool): If ``True``, this function normalizes the cross
entropy loss across all instances. If ``False``, it only
normalizes along a batch size.
cache_score (bool): When it is ``True``, the function stores result
of forward computation to use it on backward computation. It
reduces computational cost though consumes more memory.
If ``enable_double_backprop`` option is ``True``, this option
is forcibly turned off and the function does not cache
the intermediate value.
class_weight (:ref:`ndarray`):
An array that contains constant weights that will be multiplied
with the loss values along with the second dimension. The shape of
this array should be ``(x.shape[1],)``. If this is not ``None``,
each class weight ``class_weight[i]`` is actually multiplied to
``y[:, i]`` that is the corresponding log-softmax output of ``x``
and has the same shape as ``x`` before calculating the actual loss
value.
ignore_label (int): Label value you want to ignore. Its default value
is ``-1``. See description of the argument `t`.
reduce (str): A string that determines whether to reduce the loss
values. If it is ``'mean'``, it computes the sum of the individual
cross entropy and normalize it according to ``normalize`` option.
If it is ``'no'``, this function computes cross entropy for each
instance and does not normalize it (``normalize`` option is
ignored). In this case, the loss value of the ignored instance,
which has ``ignore_label`` as its target value, is set to ``0``.
enable_double_backprop (bool): If ``True``, this function uses
implementation that supports higher order differentiation.
If ``False``, it uses single-backprop implementation.
This function use the single-backprop version because we expect
it is faster. So, if you need second or higher derivatives,
you need to turn it on explicitly.
Returns:
~chainer.Variable: A variable holding a scalar array of the cross
entropy loss. If ``reduce`` is ``'mean'``, it is a scalar array.
If ``reduce`` is ``'no'``, the shape is same as that of ``t``.
.. note::
This function is differentiable only by ``x``.
.. admonition:: Example
>>> x = np.array([[-1, 0, 1, 2], [2, 0, 1, -1]]).astype(np.float32)
>>> x
array([[-1., 0., 1., 2.],
[ 2., 0., 1., -1.]], dtype=float32)
>>> t = np.array([3, 0]).astype(np.int32)
>>> t
array([3, 0], dtype=int32)
>>> y = F.softmax_cross_entropy(x, t)
>>> y
variable(0.44018972)
>>> log_softmax = -F.log_softmax(x)
>>> expected_loss = np.mean([log_softmax[row, column].data \
for row, column in enumerate(t)])
>>> y.array == expected_loss
True
"""
is_chainerx = (
chainerx.is_available() and backend.get_array_module(x) is chainerx)
if is_chainerx or not enable_double_backprop:
# Optimized implementation.
# For non-ChainerX, forward and backward are supported but
# double-backprop is not supported.
# For ChainerX, even forward is supported for only specific
# configuration of inputs and parameters, which is tested with
# `SoftmaxCrossEntropy._is_chainerx_supported()`.
func = SoftmaxCrossEntropy(
normalize, cache_score, class_weight, ignore_label, reduce)
if not is_chainerx or func._is_chainerx_supported((x, t)):
loss, = func.apply((x, t))
return loss
# Generic double-backprop-enabled but unoptimized implementation
return _double_backward_softmax_cross_entropy(
x, t, normalize, class_weight, ignore_label, reduce, is_chainerx)
|
tntnatbry/tensorflow
|
refs/heads/master
|
tensorflow/compiler/tests/nullary_ops_test.py
|
122
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for operators with no arguments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import googletest
class NullaryOpsTest(XLATestCase):
def _testNullary(self, op, expected):
with self.test_session() as session:
with self.test_scope():
output = op()
result = session.run(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testNoOp(self):
with self.test_session():
with self.test_scope():
output = control_flow_ops.no_op()
# This should not crash.
output.run()
def testConstants(self):
constants = [
np.float32(42),
np.array([], dtype=np.float32),
np.array([1, 2], dtype=np.float32),
np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32),
np.array([[[1, 2], [3, 4], [5, 6]], [[10, 20], [30, 40], [50, 60]]],
dtype=np.float32),
np.array([[[]], [[]]], dtype=np.float32),
np.array([[[[1]]]], dtype=np.float32),
]
for c in constants:
self._testNullary(lambda c=c: constant_op.constant(c), expected=c)
if __name__ == "__main__":
googletest.main()
|
EArmour/pyfibot
|
refs/heads/master
|
pyfibot/modules/available/module_dhl.py
|
1
|
# -*- encoding: utf-8 -*-
"""
Get status of a shipment from DHL Track & Trace service. Experimental.
"""
from __future__ import unicode_literals
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
def command_dhl(bot, user, channel, args):
"""Get latest status of a shipment by DHL shipment number"""
payload = {'lang': 'en', 'idc': args}
url = 'https://nolp.dhl.de/nextt-online-public/set_identcodes.do'
r = bot.get_url(url, params=payload)
bs = BeautifulSoup(r.content)
status_div = bs.find('div', {'class': 'accordion-inner'})
if status_div:
status_table = status_div.find('tbody')
else:
return bot.say(channel, "Shipment with that number does not exist or an error occurred.")
status_row = None
for row in status_table.find_all('tr'):
try:
status_row = row.find_all('td')
except:
continue
date_str = status_row[0].text.strip()
place = status_row[1].text.strip()
status = status_row[2].text.strip()
dt = datetime.now() - datetime.strptime(date_str[5:], '%d.%m.%Y %H:%M h')
next_step = bs.find('td', text='Next step')
if next_step:
status += ' - Next step: %s' % next_step.next.next.next.next.strip()
agestr = []
if dt.days > 0:
agestr.append('%dd' % dt.days)
secs = dt.seconds
hours, minutes, seconds = secs // 3600, secs // 60 % 60, secs % 60
if hours > 0:
agestr.append('%dh' % hours)
if minutes > 0:
agestr.append('%dm' % minutes)
return bot.say(channel, '%s - %s - %s' % (' '.join(agestr) + ' ago', place, status))
|
zmeda/web-summit-2015-recap-zalando
|
refs/heads/master
|
node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py
|
2736
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
niecw/stock-logistics-workflow
|
refs/heads/8.0
|
__unported__/product_serial/stock.py
|
10
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Product serial module for OpenERP
# Copyright (C) 2008 Raphaël Valyi
# Copyright (C) 2011 Anevia S.A. - Ability to group invoice lines
# written by Alexis Demeaulte <alexis.demeaulte@anevia.com>
# Copyright (C) 2011-2013 Akretion - Ability to split lines on logistical units
# written by Emmanuel Samyn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
import hashlib
from openerp.tools.translate import _
class stock_move(orm.Model):
_inherit = "stock.move"
# We order by product name because otherwise, after the split,
# the products are "mixed" and not grouped by product name any more
_order = "picking_id, name, id"
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default['new_prodlot_code'] = False
return super(stock_move, self).copy(cr, uid, id, default,
context=context)
def _get_prodlot_code(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for move in self.browse(cr, uid, ids):
res[move.id] = move.prodlot_id and move.prodlot_id.name or False
return res
def _set_prodlot_code(self, cr, uid, ids, name, value, arg, context=None):
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for move in self.browse(cr, uid, ids, context=context):
product_id = move.product_id.id
existing_prodlot = move.prodlot_id
if existing_prodlot: # avoid creating a prodlot twice
self.pool.get('stock.production.lot').write(
cr, uid, existing_prodlot.id, {'name': value})
else:
lot_obj = self.pool['stock.production.lot']
prodlot_id = lot_obj.create(cr, uid, {
'name': value,
'product_id': product_id,
})
move.write({'prodlot_id': prodlot_id})
def _get_tracking_code(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for move in self.browse(cr, uid, ids):
res[move.id] = move.tracking_id and move.tracking_id.name or False
return res
def _set_tracking_code(self, cr, uid, ids, name, value, arg, context=None):
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for move in self.browse(cr, uid, ids, context=context):
existing_tracking = move.tracking_id
if existing_tracking: # avoid creating a tracking twice
self.pool.get('stock.tracking').write(
cr, uid, existing_tracking.id, {'name': value})
else:
tracking_id = self.pool.get('stock.tracking').create(cr, uid, {
'name': value,
})
move.write({'tracking_id': tracking_id})
_columns = {
'new_prodlot_code': fields.function(
_get_prodlot_code,
fnct_inv=_set_prodlot_code,
method=True, type='char', size=64,
string='Create Serial Number', select=1
),
'new_tracking_code': fields.function(
_get_tracking_code, fnct_inv=_set_tracking_code,
method=True, type='char', size=64,
string='Create Tracking', select=1
),
}
def action_done(self, cr, uid, ids, context=None):
"""
If we autosplit moves without reconnecting them 1 by 1, at least when
some move which has descendants is split.
The following situation would happen (alphabetical order is order of
creation, initially b and a pre-exists, then a is split, so a might get
assigned and then split too):
Incoming moves b, c, d
Outgoing moves a, e, f
Then we have those links: b->a, c->a, d->a
and: b->, b->e, b->f
The following code will detect this situation and reconnect properly
the moves into only: b->a, c->e and d->f.
"""
result = super(stock_move, self).action_done(cr, uid, ids, context)
for move in self.browse(cr, uid, ids):
if (
move.product_id.lot_split_type and
move.move_dest_id and
move.move_dest_id.id
):
cr.execute(
"select stock_move.id from stock_move_history_ids "
"left join stock_move "
"on stock_move.id = stock_move_history_ids.child_id "
"where parent_id=%s and stock_move.product_qty=1",
(move.id,))
unitary_out_moves = cr.fetchall()
if unitary_out_moves and len(unitary_out_moves) > 1:
unitary_in_moves = []
out_node = False
counter = 0
while (len(unitary_in_moves) != len(unitary_out_moves) and
counter < len(unitary_out_moves)):
out_node = unitary_out_moves[counter][0]
cr.execute(
"select stock_move.id from stock_move_history_ids "
"left join stock_move "
"on stock_move.id = "
"stock_move_history_ids.parent_id "
"where child_id=%s and stock_move.product_qty=1",
(out_node,))
unitary_in_moves = cr.fetchall()
counter += 1
if len(unitary_in_moves) == len(unitary_out_moves):
unitary_out_moves.reverse()
unitary_out_moves.pop()
unitary_in_moves.reverse()
unitary_in_moves.pop()
counter = 0
for unitary_in_move in unitary_in_moves:
cr.execute(
"delete from stock_move_history_ids "
"where parent_id=%s and child_id=%s", (
unitary_in_moves[counter][0], out_node))
cr.execute(
"update stock_move_history_ids "
"set parent_id=%s where parent_id=%s and "
"child_id=%s", (
unitary_in_moves[counter][0],
move.id,
unitary_out_moves[counter][0]))
counter += 1
return result
def split_move(self, cr, uid, ids, context=None):
all_ids = list(ids)
for move in self.browse(cr, uid, ids, context=context):
qty = move.product_qty
lu_qty = False
if move.product_id.lot_split_type == 'lu':
if not move.product_id.packaging:
raise orm.except_orm(_('Error :'), _(
"Product '%s' has 'Lot split type' = "
"'Logistical Unit' but is missing packaging "
"information.") % (move.product_id.name))
lu_qty = move.product_id.packaging[0].qty
elif move.product_id.lot_split_type == 'single':
lu_qty = 1
if lu_qty and qty > 1:
# Set existing move to LU quantity
self.write(cr, uid, move.id, {
'product_qty': lu_qty,
'product_uos_qty': move.product_id.uos_coeff
})
qty -= lu_qty
# While still enough qty to create a new move, create it
while qty >= lu_qty:
all_ids.append(
self.copy(cr, uid, move.id, {
'state': move.state, 'prodlot_id': None
}))
qty -= lu_qty
# Create a last move for the remainder qty
if qty > 0:
all_ids.append(self.copy(
cr, uid, move.id, {
'state': move.state,
'prodlot_id': None,
'product_qty': qty
}))
return all_ids
class stock_picking(orm.Model):
_inherit = "stock.picking"
def _check_split(self, move):
track_production = move.product_id.track_production
track_incoming = move.product_id.track_incoming
track_outgoing = move.product_id.track_outgoing
track_internal = move.product_id.track_internal
from_loc_usage = move.location_id.usage
dest_loc_usage = move.location_dest_id.usage
if track_production and (from_loc_usage == 'production' or
dest_loc_usage == 'production'):
return True
if track_incoming and from_loc_usage == 'supplier':
return True
if track_outgoing and dest_loc_usage == 'customer':
return True
if track_internal and dest_loc_usage == 'internal':
return True
return False
def action_assign_wkf(self, cr, uid, ids, context=None):
result = super(stock_picking, self).action_assign_wkf(
cr, uid, ids, context=context)
for picking in self.browse(cr, uid, ids):
if picking.company_id.autosplit_is_active:
for move in picking.move_lines:
# Auto split
if self._check_split(move):
self.pool.get('stock.move').split_move(
cr, uid, [move.id])
return result
# Because stock move line can be splitted by the module, we merge
# invoice lines (if option 'is_group_invoice_line' is activated for the
# company)
# at the following conditions :
# - the product is the same and
# - the discount is the same and
# - the unit price is the same and
# - the description is the same and
# - taxes are the same
# - they are from the same sale order lines (requires extra-code)
# we merge invoice line together and do the sum of quantity and
# subtotal.
def action_invoice_create(self, cursor, user, ids, journal_id=False,
group=False, type='out_invoice', context=None):
invoice_dict = super(stock_picking, self).action_invoice_create(
cursor, user,
ids, journal_id, group, type, context=context)
for picking_key in invoice_dict:
invoice = self.pool.get('account.invoice').browse(
cursor, user, invoice_dict[picking_key], context=context)
if not invoice.company_id.is_group_invoice_line:
continue
new_line_list = {}
for line in invoice.invoice_line:
# Build a key
key = unicode(line.product_id.id) + ";" \
+ unicode(line.discount) + ";" \
+ unicode(line.price_unit) + ";" \
+ line.name + ";"
# Add the tax key part
tax_tab = []
for tax in line.invoice_line_tax_id:
tax_tab.append(tax.id)
tax_tab.sort()
for tax in tax_tab:
key = key + unicode(tax) + ";"
# Add the sale order line part but check if the field exist
# because it's install by a specific module (not from addons)
if self.pool.get('ir.model.fields').search(cursor, user, [
('name', '=', 'sale_order_lines'),
('model', '=', 'account.invoice.line')
], context=context) != []:
order_line_tab = []
for order_line in line.sale_order_lines:
order_line_tab.append(order_line.id)
order_line_tab.sort()
for order_line in order_line_tab:
key = key + unicode(order_line) + ";"
# Get the hash of the key
hash_key = hashlib.sha224(key.encode('utf8')).hexdigest()
# if the key doesn't already exist, we keep the invoice line
# and we add the key to new_line_list
if hash_key not in new_line_list:
new_line_list[hash_key] = {
'id': line.id,
'quantity': line.quantity,
'price_subtotal': line.price_subtotal,
}
# if the key already exist, we update new_line_list and
# we delete the invoice line
else:
new_line_list[hash_key]['quantity'] = new_line_list[
hash_key]['quantity'] + line.quantity
new_line_list[hash_key]['price_subtotal'] = new_line_list[hash_key]['price_subtotal'] \
+ line.price_subtotal
self.pool.get('account.invoice.line').unlink(
cursor, user, line.id, context=context)
# Write modifications made on invoice lines
for hash_key in new_line_list:
line_id = new_line_list[hash_key]['id']
del new_line_list[hash_key]['id']
self.pool.get('account.invoice.line').write(
cursor, user, line_id, new_line_list[hash_key],
context=context)
return invoice_dict
class stock_production_lot(orm.Model):
_inherit = "stock.production.lot"
def _last_location_id(self, cr, uid, ids, field_name, arg, context=None):
"""Retrieves the last location where the product with given serial is.
Instead of using dates we assume the product is in the location having
the highest number of products with the given serial (should be 1 if no
mistake). This is better than using move dates because moves can easily
be encoded at with wrong dates."""
res = {}
for prodlot_id in ids:
cr.execute(
"select location_dest_id "
"from stock_move inner "
"join stock_report_prodlots "
"on stock_report_prodlots.location_id = location_dest_id "
"and stock_report_prodlots.prodlot_id = %s "
"where stock_move.prodlot_id = %s and stock_move.state=%s "
"order by stock_report_prodlots.qty DESC ",
(prodlot_id, prodlot_id, 'done'))
results = cr.fetchone()
res[prodlot_id] = results and results[0] or False
return res
def _last_location_id_search(self, cr, uid, obj, name, args, context=None):
ops = ['=', ]
prodlot_ids = ()
if not len(args):
return []
prodlot_ids = []
for a in args:
operator = a[1]
value = a[2]
if operator not in ops:
raise orm.except_orm(
_('Error !'),
_('Operator %s not supported in searches for '
'last_location_id (product.product).' % operator))
if operator == '=':
cr.execute(
"select distinct prodlot_id "
"from stock_report_prodlots "
"where location_id = %s and qty > 0 ",
(value, ))
prodlot_ids = filter(None, map(lambda x: x[0], cr.fetchall()))
return [('id', 'in', tuple(prodlot_ids))]
_columns = {
'last_location_id': fields.function(
_last_location_id,
fnct_search=_last_location_id_search,
type="many2one", relation="stock.location",
string="Last location",
help="Display the current stock location of this production lot"),
}
|
jianglu/mojo
|
refs/heads/master
|
third_party/libxml/src/check-xml-test-suite.py
|
347
|
#!/usr/bin/python
import sys
import time
import os
import string
sys.path.insert(0, "python")
import libxml2
test_nr = 0
test_succeed = 0
test_failed = 0
test_error = 0
#
# the testsuite description
#
CONF="xml-test-suite/xmlconf/xmlconf.xml"
LOG="check-xml-test-suite.log"
log = open(LOG, "w")
#
# Error and warning handlers
#
error_nr = 0
error_msg = ''
def errorHandler(ctx, str):
global error_nr
global error_msg
error_nr = error_nr + 1
if len(error_msg) < 300:
if len(error_msg) == 0 or error_msg[-1] == '\n':
error_msg = error_msg + " >>" + str
else:
error_msg = error_msg + str
libxml2.registerErrorHandler(errorHandler, None)
#warning_nr = 0
#warning = ''
#def warningHandler(ctx, str):
# global warning_nr
# global warning
#
# warning_nr = warning_nr + 1
# warning = warning + str
#
#libxml2.registerWarningHandler(warningHandler, None)
#
# Used to load the XML testsuite description
#
def loadNoentDoc(filename):
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return None
ctxt.replaceEntities(1)
ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if ctxt.wellFormed() != 1:
doc.freeDoc()
return None
return doc
#
# The conformance testing routines
#
def testNotWf(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print "%s: error: Well Formedness error not detected" % (id)
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testNotWfEnt(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print "%s: error: Well Formedness error not detected" % (id)
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testNotWfEntDtd(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print "%s: error: Well Formedness error not detected" % (id)
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testWfEntDtd(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc == None or ret != 0 or ctxt.wellFormed() == 0:
print "%s: error: wrongly failed to parse the document" % (id)
log.write("%s: error: wrongly failed to parse the document\n" % (id))
if doc != None:
doc.freeDoc()
return 0
if error_nr != 0:
print "%s: warning: WF document generated an error msg" % (id)
log.write("%s: error: WF document generated an error msg\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def testError(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ctxt.wellFormed() == 0:
print "%s: warning: failed to parse the document but accepted" % (id)
log.write("%s: warning: failed to parse the document but accepte\n" % (id))
return 2
if error_nr != 0:
print "%s: warning: WF document generated an error msg" % (id)
log.write("%s: error: WF document generated an error msg\n" % (id))
return 2
return 1
def testInvalid(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.validate(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
valid = ctxt.isValid()
if doc == None:
print "%s: error: wrongly failed to parse the document" % (id)
log.write("%s: error: wrongly failed to parse the document\n" % (id))
return 0
if valid == 1:
print "%s: error: Validity error not detected" % (id)
log.write("%s: error: Validity error not detected\n" % (id))
doc.freeDoc()
return 0
if error_nr == 0:
print "%s: warning: Validity error not reported" % (id)
log.write("%s: warning: Validity error not reported\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def testValid(filename, id):
global error_nr
global error_msg
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.validate(1)
ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
valid = ctxt.isValid()
if doc == None:
print "%s: error: wrongly failed to parse the document" % (id)
log.write("%s: error: wrongly failed to parse the document\n" % (id))
return 0
if valid != 1:
print "%s: error: Validity check failed" % (id)
log.write("%s: error: Validity check failed\n" % (id))
doc.freeDoc()
return 0
if error_nr != 0 or valid != 1:
print "%s: warning: valid document reported an error" % (id)
log.write("%s: warning: valid document reported an error\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def runTest(test):
global test_nr
global test_succeed
global test_failed
global error_msg
global log
uri = test.prop('URI')
id = test.prop('ID')
if uri == None:
print "Test without ID:", uri
return -1
if id == None:
print "Test without URI:", id
return -1
base = test.getBase(None)
URI = libxml2.buildURI(uri, base)
if os.access(URI, os.R_OK) == 0:
print "Test %s missing: base %s uri %s" % (URI, base, uri)
return -1
type = test.prop('TYPE')
if type == None:
print "Test %s missing TYPE" % (id)
return -1
extra = None
if type == "invalid":
res = testInvalid(URI, id)
elif type == "valid":
res = testValid(URI, id)
elif type == "not-wf":
extra = test.prop('ENTITIES')
# print URI
#if extra == None:
# res = testNotWfEntDtd(URI, id)
#elif extra == 'none':
# res = testNotWf(URI, id)
#elif extra == 'general':
# res = testNotWfEnt(URI, id)
#elif extra == 'both' or extra == 'parameter':
res = testNotWfEntDtd(URI, id)
#else:
# print "Unknow value %s for an ENTITIES test value" % (extra)
# return -1
elif type == "error":
res = testError(URI, id)
else:
# TODO skipped for now
return -1
test_nr = test_nr + 1
if res > 0:
test_succeed = test_succeed + 1
elif res == 0:
test_failed = test_failed + 1
elif res < 0:
test_error = test_error + 1
# Log the ontext
if res != 1:
log.write(" File: %s\n" % (URI))
content = string.strip(test.content)
while content[-1] == '\n':
content = content[0:-1]
if extra != None:
log.write(" %s:%s:%s\n" % (type, extra, content))
else:
log.write(" %s:%s\n\n" % (type, content))
if error_msg != '':
log.write(" ----\n%s ----\n" % (error_msg))
error_msg = ''
log.write("\n")
return 0
def runTestCases(case):
profile = case.prop('PROFILE')
if profile != None and \
string.find(profile, "IBM XML Conformance Test Suite - Production") < 0:
print "=>", profile
test = case.children
while test != None:
if test.name == 'TEST':
runTest(test)
if test.name == 'TESTCASES':
runTestCases(test)
test = test.next
conf = loadNoentDoc(CONF)
if conf == None:
print "Unable to load %s" % CONF
sys.exit(1)
testsuite = conf.getRootElement()
if testsuite.name != 'TESTSUITE':
print "Expecting TESTSUITE root element: aborting"
sys.exit(1)
profile = testsuite.prop('PROFILE')
if profile != None:
print profile
start = time.time()
case = testsuite.children
while case != None:
if case.name == 'TESTCASES':
old_test_nr = test_nr
old_test_succeed = test_succeed
old_test_failed = test_failed
old_test_error = test_error
runTestCases(case)
print " Ran %d tests: %d suceeded, %d failed and %d generated an error" % (
test_nr - old_test_nr, test_succeed - old_test_succeed,
test_failed - old_test_failed, test_error - old_test_error)
case = case.next
conf.freeDoc()
log.close()
print "Ran %d tests: %d suceeded, %d failed and %d generated an error in %.2f s." % (
test_nr, test_succeed, test_failed, test_error, time.time() - start)
|
Elder-of-Ozone/i3pystatus
|
refs/heads/master
|
i3pystatus/pomodoro.py
|
6
|
import os
import subprocess
import locale
from datetime import datetime, timedelta
from i3pystatus import IntervalModule
class Pomodoro(IntervalModule):
"""
This plugin shows Pomodoro timer.
Left click starts/restarts timer.
Right click stops it.
"""
settings = (
('sound',
'Path to sound file to play as alarm. Played by "aplay" utility'),
('pomodoro_duration',
'Working (pomodoro) interval duration in seconds'),
('break_duration', 'Short break duration in seconds'),
('long_break_duration', 'Long break duration in seconds'),
('short_break_count', 'Short break count before first long break'),
('format', 'format string, available formatters: current_pomodoro, '
'total_pomodoro, time')
)
required = ('sound',)
color_stopped = '#2ECCFA'
color_running = '#FFFF00'
color_break = '#37FF00'
interval = 1
short_break_count = 3
format = '☯ {current_pomodoro}/{total_pomodoro} {time}'
pomodoro_duration = 25 * 60
break_duration = 5 * 60
long_break_duration = 15 * 60
on_rightclick = "stop"
on_leftclick = "start"
def init(self):
# state could be either running/break or stopped
self.state = 'stopped'
self.breaks = 0
self.time = None
def run(self):
if self.time and datetime.now() >= self.time:
if self.state == 'running':
self.state = 'break'
if self.breaks == self.short_break_count:
self.time = datetime.now() + \
timedelta(seconds=self.long_break_duration)
self.breaks = 0
else:
self.time = datetime.now() + \
timedelta(seconds=self.break_duration)
self.breaks += 1
text = 'Go for a break!'
else:
self.state = 'running'
self.time = datetime.now() + \
timedelta(seconds=self.pomodoro_duration)
text = 'Back to work!'
self._alarm(text)
if self.state == 'running' or self.state == 'break':
min, sec = divmod((self.time - datetime.now()).total_seconds(), 60)
text = '{:02}:{:02}'.format(int(min), int(sec))
color = self.color_running if self.state == 'running' else self.color_break
else:
self.output = {
'full_text': 'Stopped',
'color': self.color_stopped
}
return
sdict = {
'time': text,
'current_pomodoro': self.breaks,
'total_pomodoro': self.short_break_count + 1,
}
self.output = {
'full_text': self.format.format(**sdict),
'color': color
}
def start(self):
self.state = 'running'
self.time = datetime.now() + timedelta(seconds=self.pomodoro_duration)
self.breaks = 0
def stop(self):
self.state = 'stopped'
self.time = None
def _alarm(self, text):
subprocess.call(['notify-send',
'Alarm!',
text])
subprocess.Popen(['aplay',
self.sound,
'-q'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
SanchayanMaity/gem5
|
refs/heads/CS570
|
src/sim/power/PowerModel.py
|
12
|
# Copyright (c) 2016 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: David Guillen Fandos
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import Parent
# Represents a power model for a simobj
# The model itself is also a SimObject so we can make use some
# nice features available such as Parent.any
class PowerModel(SimObject):
type = 'PowerModel'
cxx_header = "sim/power/power_model.hh"
@classmethod
def export_methods(cls, code):
code('''
double getDynamicPower() const;
double getStaticPower() const;
''')
# Keep a list of every model for every power state
pm = VectorParam.PowerModelState([], "List of per-state power models.")
# Need a reference to the system so we can query the thermal domain
# about temperature (temperature is needed for leakage calculation)
subsystem = Param.SubSystem(Parent.any, "subsystem")
|
mbox/django
|
refs/heads/master
|
django/contrib/flatpages/views.py
|
105
|
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.template import loader, RequestContext
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
DEFAULT_TEMPLATE = 'flatpages/default.html'
# This view is called from FlatpageFallbackMiddleware.process_response
# when a 404 is raised, which often means CsrfViewMiddleware.process_view
# has not been called even if CsrfViewMiddleware is installed. So we need
# to use @csrf_protect, in case the template needs {% csrf_token %}.
# However, we can't just wrap this view; if no matching flatpage exists,
# or a redirect is required for authentication, the 404 needs to be returned
# without any CSRF checks. Therefore, we only
# CSRF protect the internal implementation.
def flatpage(request, url):
"""
Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or :template:`flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
"""
if not url.startswith('/'):
url = '/' + url
site_id = get_current_site(request).id
try:
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_flatpage(request, f)
@csrf_protect
def render_flatpage(request, f):
"""
Internal interface to the flat page view.
"""
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
c = RequestContext(request, {
'flatpage': f,
})
response = HttpResponse(t.render(c))
return response
|
gloaec/trifle
|
refs/heads/master
|
src/trifle/raft/store.py
|
1
|
import os
import errno
import uuid
import msgpack # we're using it anyway...
from trifle import config
def read_state(port):
sfile = os.path.join(config['TMP_DIR'], "raft-state-%d" % port)
try:
with open(sfile) as r:
return msgpack.unpackb(r.read())
except IOError as e:
if not e.errno == errno.ENOENT:
raise
# no state file exists; initialize with fresh values
return 0, None, None, {}, uuid.uuid4().hex
def write_state(port, term, voted, log, peers, uuid):
try:
os.makedirs(config['TMP_DIR'])
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(config['TMP_DIR']):
pass
else: raise
sfile = os.path.join(config['TMP_DIR'], "raft-state-%d" % port)
with open(sfile, 'w') as w:
w.write(msgpack.packb((term, voted, log, peers, uuid)))
|
TangXT/edx-platform
|
refs/heads/master
|
common/djangoapps/heartbeat/views.py
|
199
|
from xmodule.modulestore.django import modulestore
from dogapi import dog_stats_api
from util.json_request import JsonResponse
from django.db import connection
from django.db.utils import DatabaseError
from xmodule.exceptions import HeartbeatFailure
@dog_stats_api.timed('edxapp.heartbeat')
def heartbeat(request):
"""
Simple view that a loadbalancer can check to verify that the app is up. Returns a json doc
of service id: status or message. If the status for any service is anything other than True,
it returns HTTP code 503 (Service Unavailable); otherwise, it returns 200.
"""
# This refactoring merely delegates to the default modulestore (which if it's mixed modulestore will
# delegate to all configured modulestores) and a quick test of sql. A later refactoring may allow
# any service to register itself as participating in the heartbeat. It's important that all implementation
# do as little as possible but give a sound determination that they are ready.
try:
output = modulestore().heartbeat()
except HeartbeatFailure as fail:
return JsonResponse({fail.service: unicode(fail)}, status=503)
cursor = connection.cursor()
try:
cursor.execute("SELECT CURRENT_DATE")
cursor.fetchone()
output['SQL'] = True
except DatabaseError as fail:
return JsonResponse({'SQL': unicode(fail)}, status=503)
return JsonResponse(output)
|
shankari/e-mission-server
|
refs/heads/master
|
emission/analysis/modelling/tour_model/prior_unused/exploratory_scripts/explore_smoothing_trajectories.py
|
2
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import json
import logging
import numpy as np
import datetime as pydt
# Our imports
import emission.analysis.modelling.tour_model.prior_unused.truth_pipeline as tp
import emission.core.get_database as edb
import emission.analysis.section_features as fc
import emission.plotting.gmaps.gmap_display as mgp
import emission.analysis.modelling.tour_model.prior_unused.exploratory_scripts.generate_smoothing_from_ground_truth_clusters as gsfgtc
query = {'type': 'move',
'confirmed_mode': {'$ne': 9},
'section_start_datetime' : {'$gt': pydt.datetime(2015, 0o2, 14)},
'$expr': 'this.track_points.length>1'}
# Now find other sections that meet this criterion
# Manually, we pick the sections with the top 20 average speeds that are not air
def find_other_sections_manual(needsSmoothing, findWithoutSmoothing):
section_list = []
maxSpeed_list = []
for section in edb.get_section_db().find(query):
avg_speed = fc.calAvgSpeed(section)
if len(maxSpeed_list) == 0 or fc.calAvgSpeed(section) > max(maxSpeed_list):
maxSpeed_list.append(avg_speed)
section_list.append(section)
return section_list
def get_feature_row(section):
ret_arr = np.zeros((5))
ret_arr[0] = fc.calAvgSpeed(section)
ret_arr[1] = fc.getIthMaxSpeed(section, 1)
percentiles = np.percentile(fc.calSpeeds(section), [90, 95, 99])
ret_arr[2] = percentiles[0]
ret_arr[3] = percentiles[1]
ret_arr[4] = percentiles[2]
return ret_arr
def find_other_sections_auto(needsSmoothing, fineWithoutSmoothing):
from sklearn import tree
section_list = []
nPos = len(needsSmoothing)
nNeg = len(fineWithoutSmoothing)
nRows = nPos + nNeg
nCols = 5
training_feature_set = np.zeros((nRows, nCols))
result_vector = np.zeros(nRows)
for (i, section) in enumerate(needsSmoothing):
training_feature_set[i] = get_feature_row(section)
result_vector[i] = 1
for (i, section) in enumerate(fineWithoutSmoothing):
training_feature_set[nPos + i] = get_feature_row(section)
result_vector[nPos + i] = -1
nTestSetRows = get_section_db().count_documents(query)
test_feature_set = np.zeros((nTestSetRows, nCols))
testSection_list = []
for (i, section) in enumerate(get_section_db().find(query)):
test_feature_set[i] = get_feature_row(section)
testSection_list.append(section)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(training_feature_set, result_vector)
predictions = clf.predict(test_feature_set)
for (i, section) in enumerate(testSection_list):
if predictions[i] == 1:
section_list.append(section)
return section_list
def generate_stats_for_candidates(sID_list):
pass
def plot_instances_for_gps_error_model():
smoothing_ground_truth_map = json.load(open("/Users/shankari/cluster_ground_truth/smoothing/caltrain/smoothing_removed_points"))
needsSmoothing = []
fineWithoutSmoothing = []
for (sid, rp_list) in smoothing_ground_truth_map.items():
sectionJSON = get_section_db().find_one({"_id": sid})
if sectionJSON is None:
print("Unable to find section %s in the database" % sid)
else:
if len(rp_list) > 0:
needsSmoothing.append(sectionJSON)
else:
fineWithoutSmoothing.append(sectionJSON)
print("-" * 20, "Needs smoothing", '-' * 20)
for section in needsSmoothing:
if section is not None:
print(section["_id"], fc.calAvgSpeed(section), fc.getIthMaxSpeed(section, 1), np.percentile(fc.calSpeeds(section), [90, 95, 99]))
print("-" * 20, "Fine without smoothing", '-' * 20)
for section in fineWithoutSmoothing:
if section is not None:
print(section["_id"], fc.calAvgSpeed(section), fc.getIthMaxSpeed(section, 1), np.percentile(fc.calSpeeds(section), [90, 95, 99]))
other_manual_candidates = find_other_sections_manual(needsSmoothing, fineWithoutSmoothing)
other_auto_candidates = find_other_sections_auto(needsSmoothing, fineWithoutSmoothing)
print(other_auto_candidates)
gsfgtc.generate_cluster_comparison(other_manual_candidates, "/tmp/other_manual")
gsfgtc.generate_cluster_comparison(other_auto_candidates, "/tmp/other_auto")
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
plot_instances_for_gps_error_model()
|
paour/weblate
|
refs/heads/master
|
weblate/trans/autofixes/whitespace.py
|
2
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import re
from django.utils.translation import ugettext_lazy as _
from weblate.trans.autofixes.base import AutoFix
class SameBookendingWhitespace(AutoFix):
'''
Help non-techy translators with their whitespace
'''
name = _('Trailing and leading whitespace')
def fix_single_target(self, target, source, unit):
# normalize newlines of source
source = re.compile(r'\r\n|\r|\n').sub('\n', source)
# capture preceding and tailing whitespace
start = re.compile(r'^(\s+)').search(source)
end = re.compile(r'(\s+)$').search(source)
head = start.group() if start else ''
tail = end.group() if end else ''
# add the whitespace around the target translation (ignore blanks)
stripped = target.strip()
if stripped:
newtarget = '%s%s%s' % (head, stripped, tail)
return newtarget, newtarget != target
return target, False
|
jlnaudin/x-drone
|
refs/heads/master
|
MissionPlanner-master/packages/IronPython.StdLib.2.7.4/content/Lib/zipfile.py
|
81
|
"""
Read and write ZIP files.
"""
import struct, os, time, sys, shutil
import binascii, cStringIO, stat
import io
import re
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
"ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ]
class BadZipfile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile # The exception raised by this module
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = "<4s4H2LH"
stringEndArchive = "PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = "PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = "PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = "PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = "PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipfile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if data[0:4] == stringEndArchive and data[-2:] == "\000\000":
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append("")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
endrec = list(struct.unpack(structEndArchive, recData))
comment = data[start+sizeEndCentDir:]
# check that comment length is correct
if endrec[_ECD_COMMENT_SIZE] == len(comment):
# Append the archive comment and start offset
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = "" # Comment for each file
self.extra = "" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
if isinstance(self.filename, unicode):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
else:
return self.filename, self.flag_bits
def _decodeFilename(self):
if self.flag_bits & 0x800:
return self.filename.decode('utf-8')
else:
return self.filename
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError, "Corrupt extra field %s"%(ln,)
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffffL, 0xffffffffL):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFFL:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffffL:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32(chr((self.key1 >> 24) & 255), self.key2)
def __call__(self, c):
"""Decrypt a single character."""
c = ord(c)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
c = chr(c)
self._UpdateKeys(c)
return c
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(r'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None):
self._fileobj = fileobj
self._decrypter = decrypter
self._compress_type = zipinfo.compress_type
self._compress_size = zipinfo.compress_size
self._compress_left = zipinfo.compress_size
if self._compress_type == ZIP_DEFLATED:
self._decompressor = zlib.decompressobj(-15)
self._unconsumed = ''
self._readbuffer = ''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find('\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = ''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == '':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + '\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
buf = ''
if n is None:
n = -1
while True:
if n < 0:
data = self.read1(n)
elif n > len(buf):
data = self.read1(n - len(buf))
else:
return buf
if len(data) == 0:
return buf
buf += data
def _update_crc(self, newdata, eof):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if eof and self._running_crc != self._expected_crc:
raise BadZipfile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
# Simplify algorithm (branching) by transforming negative n to large n.
if n < 0 or n is None:
n = self.MAX_N
# Bytes available in read buffer.
len_readbuffer = len(self._readbuffer) - self._offset
# Read from file.
if self._compress_left > 0 and n > len_readbuffer + len(self._unconsumed):
nbytes = n - len_readbuffer - len(self._unconsumed)
nbytes = max(nbytes, self.MIN_READ_SIZE)
nbytes = min(nbytes, self._compress_left)
data = self._fileobj.read(nbytes)
self._compress_left -= len(data)
if data and self._decrypter is not None:
data = ''.join(map(self._decrypter, data))
if self._compress_type == ZIP_STORED:
self._update_crc(data, eof=(self._compress_left==0))
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
else:
# Prepare deflated bytes for decompression.
self._unconsumed += data
# Handle unconsumed data.
if (len(self._unconsumed) > 0 and n > len_readbuffer and
self._compress_type == ZIP_DEFLATED):
data = self._decompressor.decompress(
self._unconsumed,
max(n - len_readbuffer, self.MIN_READ_SIZE)
)
self._unconsumed = self._decompressor.unconsumed_tail
eof = len(self._unconsumed) == 0 and self._compress_left == 0
if eof:
data += self._decompressor.flush()
self._update_crc(data, eof=eof)
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
# Read from buffer.
data = self._readbuffer[self._offset: self._offset + n]
self._offset += len(data)
return data
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError,\
"Compression requires the (missing) zlib module"
else:
raise RuntimeError, "That compression method is not supported"
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self.comment = ''
# Check if we were passed a file-like object
if isinstance(file, basestring):
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
if key == 'r':
self._GetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipfile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
if not self._filePassed:
self.fp.close()
self.fp = None
raise RuntimeError, 'Mode must be "r", "w" or "a"'
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _GetContents(self):
"""Read the directory, making sure we close the file if the format
is bad."""
try:
self._RealGetContents()
except BadZipfile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipfile("File is not a zip file")
if not endrec:
raise BadZipfile, "File is not a zip file"
if self.debug > 1:
print endrec
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self.comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print "given, inferred, offset", offset_cd, inferred, concat
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = cStringIO.StringIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if centdir[0:4] != stringCentralDir:
raise BadZipfile, "Bad magic number for central directory"
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print centdir
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
x.filename = x._decodeFilename()
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print "total", total
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self):
"""Print a table of contents for the zip file."""
print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
f = self.open(zinfo.filename, "r")
while f.read(chunk_size): # Check CRC-32
pass
except BadZipfile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
self.pwd = pwd
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
return self.open(name, "r", pwd).read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError, 'open() requires mode "r", "U", or "rU"'
if not self.fp:
raise RuntimeError, \
"Attempt to read ZIP archive that was already closed"
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = open(self.filename, 'rb')
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if fheader[0:4] != stringFileHeader:
raise BadZipfile, "Bad magic number for file header"
fheader = struct.unpack(structFileHeader, fheader)
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if fname != zinfo.orig_filename:
raise BadZipfile, \
'File name in directory "%s" and header "%s" differ.' % (
zinfo.orig_filename, fname)
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError, "File %s is encrypted, " \
"password required for extraction" % name
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
bytes = zef_file.read(12)
h = map(zd, bytes[0:12])
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if ord(h[11]) != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd)
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
# Strip trailing path separator, unless it represents the root.
if (targetpath[-1:] in (os.path.sep, os.path.altsep)
and len(os.path.splitdrive(targetpath)[1]) > 1):
targetpath = targetpath[:-1]
# don't include leading "/" from file name if present
if member.filename[0] == '/':
targetpath = os.path.join(targetpath, member.filename[1:])
else:
targetpath = os.path.join(targetpath, member.filename)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
source = self.open(member, pwd=pwd)
target = file(targetpath, "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print "Duplicate name:", zinfo.filename
if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"'
if not self.fp:
raise RuntimeError, \
"Attempt to write ZIP archive that was already closed"
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError, \
"Compression requires the (missing) zlib module"
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError, \
"That compression method is not supported"
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader())
return
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
self.fp.write(zinfo.FileHeader())
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
# Seek backwards and write CRC and file sizes
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset + 14, 0)
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
"""Write a file into the archive. The contents is the string
'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.file_size = len(bytes) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(bytes) & 0xffffffff # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
bytes = co.compress(bytes) + co.flush()
zinfo.compress_size = len(bytes) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader())
self.fp.write(bytes)
self.fp.flush()
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffffL
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print >>sys.stderr, (structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
# check for valid comment length
if len(self.comment) >= ZIP_MAX_COMMENT:
if self.debug > 0:
msg = 'Archive comment is too long; truncating to %d bytes' \
% ZIP_MAX_COMMENT
self.comment = self.comment[:ZIP_MAX_COMMENT]
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def writepy(self, pathname, basename = ""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print "Adding package in", pathname, "as", basename
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print "Adding files from directory", pathname
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError, \
'Files added with writepy() must end with ".py"'
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print "Adding file", arcname
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
import py_compile
if self.debug:
print "Compiling", file_py
try:
py_compile.compile(file_py, file_pyc, None, True)
except py_compile.PyCompileError,err:
print err.msg
fname = file_pyc
else:
fname = file_pyc
archivename = os.path.split(fname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print USAGE
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
zf.printdir()
zf.close()
elif args[0] == '-t':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print "Done testing"
elif args[0] == '-e':
if len(args) != 3:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
zf.close()
elif args[0] == '-c':
if len(args) < 3:
print USAGE
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
zf = ZipFile(args[1], 'w', allowZip64=True)
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
zf.close()
if __name__ == "__main__":
main()
|
upsuper/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/tests/sessions/new_session/support/create.py
|
15
|
# Note that we can only test things here all implementations must support
valid_data = [
("acceptInsecureCerts", [False, None]),
("browserName", [None]),
("browserVersion", [None]),
("platformName", [None]),
("pageLoadStrategy", ["none", "eager", "normal", None]),
("proxy", [None]),
("timeouts", [{"script": 0, "pageLoad": 2.0, "implicit": 2**53 - 1},
{"script": 50, "pageLoad": 25},
{"script": 500},
{}]),
("unhandledPromptBehavior", ["dismiss", "accept", None]),
("test:extension", [True, "abc", 123, [], {"key": "value"}, None]),
]
|
Nicop06/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_vrf_af.py
|
23
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrf_af
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VRF AF.
description:
- Manages VRF AF
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Default, where supported, restores params default value.
options:
vrf:
description:
- Name of the VRF.
required: true
afi:
description:
- Address-Family Identifier (AFI).
required: true
choices: ['ipv4', 'ipv6']
default: null
safi:
description:
- Sub Address-Family Identifier (SAFI).
- Deprecated in 2.4
required: true
choices: ['unicast', 'multicast']
default: null
route_target_both_auto_evpn:
description:
- Enable/Disable the EVPN route-target 'auto' setting for both
import and export target communities.
required: false
choices: ['true', 'false']
default: null
state:
description:
- Determines whether the config should be present or
not on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vrf_af:
vrf: ntc
afi: ipv4
route_target_both_auto_evpn: True
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["vrf context ntc", "address-family ipv4 unicast"]
'''
from ansible.module_utils.nxos import get_config, load_config
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import NetworkConfig
def main():
argument_spec = dict(
vrf=dict(required=True),
afi=dict(required=True, choices=['ipv4', 'ipv6']),
route_target_both_auto_evpn=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
m_facts=dict(default=False, type='bool', removed_in_version="2.4"),
safi=dict(choices=['unicast', 'multicast'], removed_in_version="2.4"),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
config_text = get_config(module)
config = NetworkConfig(indent=2, contents=config_text)
path = ['vrf context %s' % module.params['vrf'],
'address-family %s unicast' % module.params['afi']]
try:
current = config.get_block_config(path)
except ValueError:
current = None
commands = list()
if current and module.params['state'] == 'absent':
commands.append('no address-family %s unicast' % module.params['afi'])
elif module.params['state'] == 'present':
if current:
have = 'route-target both auto evpn' in current
want = bool(module.params['route_target_both_auto_evpn'])
if want and not have:
commands.append('address-family %s unicast' % module.params['afi'])
commands.append('route-target both auto evpn')
elif have and not want:
commands.append('address-family %s unicast' % module.params['afi'])
commands.append('no route-target both auto evpn')
else:
commands.append('address-family %s unicast' % module.params['afi'])
if module.params['route_target_both_auto_evpn']:
commands.append('route-target both auto evpn')
if commands:
commands.insert(0, 'vrf context %s' % module.params['vrf'])
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
result['commands'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
|
tunneln/CarnotKE
|
refs/heads/master
|
jyhton/lib-python/2.7/encodings/cp857.py
|
593
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP857.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp857',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x009f: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x00a7: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00d1: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: None, # UNDEFINED
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: None, # UNDEFINED
0x00e8: 0x00d7, # MULTIPLICATION SIGN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ed: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: None, # UNDEFINED
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u0131' # 0x008d -> LATIN SMALL LETTER DOTLESS I
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\u0130' # 0x0098 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u015e' # 0x009e -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u015f' # 0x009f -> LATIN SMALL LETTER S WITH CEDILLA
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u011e' # 0x00a6 -> LATIN CAPITAL LETTER G WITH BREVE
u'\u011f' # 0x00a7 -> LATIN SMALL LETTER G WITH BREVE
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0x00bd -> CENT SIGN
u'\xa5' # 0x00be -> YEN SIGN
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\xba' # 0x00d0 -> MASCULINE ORDINAL INDICATOR
u'\xaa' # 0x00d1 -> FEMININE ORDINAL INDICATOR
u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\ufffe' # 0x00d5 -> UNDEFINED
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\xa6' # 0x00dd -> BROKEN BAR
u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\ufffe' # 0x00e7 -> UNDEFINED
u'\xd7' # 0x00e8 -> MULTIPLICATION SIGN
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xec' # 0x00ec -> LATIN SMALL LETTER I WITH GRAVE
u'\xff' # 0x00ed -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xaf' # 0x00ee -> MACRON
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\ufffe' # 0x00f2 -> UNDEFINED
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00d1, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00d0, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x00e8, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x00ed, # LATIN SMALL LETTER Y WITH DIAERESIS
0x011e: 0x00a6, # LATIN CAPITAL LETTER G WITH BREVE
0x011f: 0x00a7, # LATIN SMALL LETTER G WITH BREVE
0x0130: 0x0098, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0131: 0x008d, # LATIN SMALL LETTER DOTLESS I
0x015e: 0x009e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x009f, # LATIN SMALL LETTER S WITH CEDILLA
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
jt6562/XX-Net
|
refs/heads/master
|
python27/1.0/lib/noarch/pyasn1/codec/cer/decoder.py
|
8
|
# CER decoder
from pyasn1.type import univ
from pyasn1.codec.ber import decoder
from pyasn1.compat.octets import oct2int
from pyasn1 import error
class BooleanDecoder(decoder.AbstractSimpleDecoder):
protoComponent = univ.Boolean(0)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if not head:
raise error.PyAsn1Error('Empty substrate')
byte = oct2int(head[0])
# CER/DER specifies encoding of TRUE as 0xFF and FALSE as 0x0, while
# BER allows any non-zero value as TRUE; cf. sections 8.2.2. and 11.1
# in http://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf
if byte == 0xff:
value = 1
elif byte == 0x00:
value = 0
else:
raise error.PyAsn1Error('Boolean CER violation: %s' % byte)
return self._createComponent(asn1Spec, tagSet, value), tail
tagMap = decoder.tagMap.copy()
tagMap.update({
univ.Boolean.tagSet: BooleanDecoder()
})
typeMap = decoder.typeMap
class Decoder(decoder.Decoder): pass
decode = Decoder(tagMap, decoder.typeMap)
|
drufat/sympy
|
refs/heads/master
|
sympy/strategies/util.py
|
124
|
from __future__ import print_function, division
from sympy import Basic
new = Basic.__new__
def assoc(d, k, v):
d = d.copy()
d[k] = v
return d
basic_fns = {'op': type,
'new': Basic.__new__,
'leaf': lambda x: not isinstance(x, Basic) or x.is_Atom,
'children': lambda x: x.args}
expr_fns = assoc(basic_fns, 'new', lambda op, *args: op(*args))
|
fugitifduck/exabgp
|
refs/heads/master
|
lib/exabgp/bgp/message/open/capability/addpath.py
|
1
|
# encoding: utf-8
"""
addpath.py
Created by Thomas Mangin on 2012-07-17.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from struct import pack
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message.open.capability.capability import Capability
# ====================================================================== AddPath
#
class AddPath (Capability,dict):
ID = Capability.CODE.ADD_PATH
string = {
0: 'disabled',
1: 'receive',
2: 'send',
3: 'send/receive',
}
def __init__ (self, families=(),send_receive=0):
for afi,safi in families:
self.add_path(afi,safi,send_receive)
def add_path (self, afi, safi, send_receive):
self[(afi,safi)] = send_receive
def __str__ (self):
return 'AddPath(' + ','.join(["%s %s %s" % (self.string[self[aafi]],xafi,xsafi) for (aafi,xafi,xsafi) in [((afi,safi),str(afi),str(safi)) for (afi,safi) in self]]) + ')'
def json (self):
families = ','.join('"%s/%s": "%s"' % (xafi,xsafi,self.string[self[aafi]]) for (aafi,xafi,xsafi) in (((afi,safi),str(afi),str(safi)) for (afi,safi) in self))
return '{ "name": "addpath"%s%s }' % (', ' if families else '', families)
def extract (self):
rs = []
for v in self:
if self[v]:
rs.append(v[0].pack() + v[1].pack() + pack('!B',self[v]))
return rs
@staticmethod
def unpack_capability (instance, data, capability=None): # pylint: disable=W0613
# XXX: FIXME: should check that we have not yet seen the capability
while data:
afi = AFI.unpack(data[:2])
safi = SAFI.unpack(data[2])
sr = ord(data[3])
instance.add_path(afi,safi,sr)
data = data[4:]
return instance
|
Dfelker/ansible
|
refs/heads/devel
|
lib/ansible/utils/path.py
|
81
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import stat
from time import sleep
from errno import EEXIST
__all__ = ['is_executable', 'unfrackpath']
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def unfrackpath(path):
'''
returns a path that is free of symlinks, environment
variables, relative path traversals and symbols (~)
example:
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
def makedirs_safe(path, mode=None):
'''Safe way to create dirs in muliprocess/thread environments'''
if not os.path.exists(path):
try:
if mode:
os.makedirs(path, mode)
else:
os.makedirs(path)
except OSError, e:
if e.errno != EEXIST:
raise
|
bestwpw/ChromeWebLab
|
refs/heads/master
|
Sketchbots/sw/labqueue/config.py
|
7
|
# Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the labqueue configuration file. It contains various settings
which allow the customization of the system's behavior.
"""
import os
import datetime
# This is the worker GUID used to signify topics, tasks, etc. which
# were created internally by this system (as opposed to, say, a
# sketchbot connecting over the REST interface).
#
API_WORKER_GUID = 'api.openweblab'
# This setting specifies the maximum size of a file which can be POSTed
# directly to the binary storage section of the queue. Any content which
# exceeds this amount must be uploaded to a special dynamic URL generated by
# the server. Such URLs must be requested from the server just prior to
# uploading the content.
MAX_DIRECT_UPLOAD_FILE_SIZE_BYTES = 800000
# If ALLOW_UNAUTHENTICATED_USE_WITH_WARNING is True, the server will
# allow connection from sketchbots, etc. without any kind of security
# or authorization. In that case, the server will complain with a
# warning, but allow such requests to proceed.
#
# If it is False, then the server will require an authorization header
# with pre-shared security key to be included with all requests.
#
ALLOW_UNAUTHENTICATED_USE_WITH_WARNING = True
# The HELP_TEMPLATES_PATH and ADMIN_TEMPLATES_PATH settings should point
# to the location of the template source files used to render online API
# help and browser-based admin UI.
#
HELP_TEMPLATES_PATH = os.path.join(os.path.dirname(__file__), 'templates', 'help' )
ADMIN_TEMPLATES_PATH = os.path.join(os.path.dirname(__file__), 'templates', 'admin' )
# If LDCS_ONLY_EDITABLE_BY_ORIGINAL_CREATOR is True, then an LDC can only
# be modified by the same worker that created it. If False, then any worker
# can edit any LDC.
#
LDCS_ONLY_EDITABLE_BY_ORIGINAL_CREATOR = False
# The number of seconds a Task reservation will be held before being
# automatically released by the system.
#
TASK_RESERVATION_MAX_HOLD_TIME_SEC = 500
# If True, then Topics with task policies using a 'max_num_tasks'
# rule will get max_num_tasks new 'slots' each hour for new Tasks,
# even if the Tasks from the previous hour have not been completed.
# If False, then the topic will have an absolute cap at 'max_num_tasks'
# so that Tasks must be completed for new ones to get in."
#
TASK_POLICY_MAX_NUM_TASKS_USES_SLOT_MODE = False
# Records are kept to check the last time the system was contacted
# by a particular worker. Set DISABLE_WORKER_STATUS_WRITES to silently
# disable updates to these records. This can be useful for debugging
# or reducing the number of datastore writes.
#
DISABLE_WORKER_STATUS_WRITES = False
# The minimum time between allowed worker status updates, in seconds.
# If a worker tries to update its own status less than MIN_WORKER_STATUS_UPDATE_PERIOD_SEC
# since its last update the server will return an error. This is used
# to prevent over-active workers from gobbling up app engine quota.
# To reduce quota use, set this to a higher number (or better yet, make
# your robots check in less frequently).
#
MIN_WORKER_STATUS_UPDATE_PERIOD_SEC = 5
# When listing the recent status of all workers that have contacted
# the system, ANCIENT_WORKER_STATUS_CUTOFF_DAYS can be used to automatically
# filter out old entries. If non-None, then this should indicate the
# maximum age of a workrer before they are dropped from status lists.
# Even ancient workers can have their status queried via directly
# requesting that single worker's status.
#
ANCIENT_WORKER_STATUS_CUTOFF_DAYS = None # 10
# This is the canonical list of valid touchpoint names. A touchpoint
# is a grouping of closely-related interactive exhibit pieces.
#
VALID_TOUCHPOINT_NAMES = [
'rob',
]
# This is the canonical list of valid activity space names. An activity
# space is a logical grouping of touchpoints.
#
VALID_ACTIVITY_SPACES = [
'www',
]
# If HTTP_RAISE_EXCEPTIONS_IN_REQUESTS is True, the RESTful HTTP interface
# will allow any Exceptions encountered in the labqueue code to bubble up
# as true Python Exceptions. This will cause any Exception-generating request
# to respond as HTTP status 500. That will potentially obscure any bugs from
# users connecting via HTTP, in exchange for allowing the system to be debugged
# via a live debugger. If it is False, however, the Exceptions will be caught
# and (hopefully) userful error responses will be returned over HTTP, with
# appropriate status codes.
#
HTTP_RAISE_EXCEPTIONS_IN_REQUESTS = False
# If HTTP_HELP is True, then a built-in human interface to the RESTful API
# will be accessible by appending ?HELP=GET (to try out GET requests) or
# ?HELP=POST (for POST requests) to any API URL.
#
HTTP_HELP = True
LDC_THUMBNAIL_PARAMS = {
'small': {
'all': {
'max_source_height': None,
'min_source_height': None,
'max_source_width': None,
'min_source_width': None,
'width': 140,
'height': 110,
'overlay_path': None,
'valign': 'middle',
# these are good for robot portraits:
'top_crop_pct': None,
'bottom_crop_pct': None,
'left_crop_pct': None,
'right_crop_pct': None,
'crop_x': None,
'crop_y': None,
'post_crop_uniform_scale_pct': None,
},
},
}
# The number of seconds to allow edge cache to hold LDC public media content
PUBLIC_MEDIA_CACHE_MAX_AGE_SEC = 61
|
gchp/django
|
refs/heads/master
|
tests/forms_tests/widget_tests/base.py
|
192
|
from django.test import SimpleTestCase
class WidgetTest(SimpleTestCase):
beatles = (('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))
def check_html(self, widget, name, value, html='', attrs=None, **kwargs):
output = widget.render(name, value, attrs=attrs, **kwargs)
self.assertHTMLEqual(output, html)
|
2014c2g14/w16b_test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_functools.py
|
727
|
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
def reduce(func,iterable,initializer=None):
args = iter(iterable)
if initializer is not None:
res = initializer
else:
res = next(args)
while True:
try:
res = func(res,next(args))
except StopIteration:
return res
|
linuxsoftware/python-holidays
|
refs/heads/master
|
holidays.py
|
1
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com>
# Website: https://github.com/ryanss/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, datetime
from dateutil.easter import easter
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta as rd
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
import six
__version__ = '0.8.1'
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = range(7)
WEEKEND = (SATURDAY, SUNDAY)
class HolidayBase(dict):
PROVINCES = []
def __init__(self, years=[], expand=True, observed=True,
prov=None, state=None):
self.observed = observed
self.expand = expand
if isinstance(years, int):
years = [years, ]
self.years = set(years)
if not getattr(self, 'prov', False):
self.prov = prov
self.state = state
for year in list(self.years):
self._populate(year)
def __setattr__(self, key, value):
if key == 'observed' and len(self) > 0:
dict.__setattr__(self, key, value)
if value is True:
# Add (Observed) dates
years = list(self.years)
self.years = set()
self.clear()
for year in years:
self._populate(year)
else:
# Remove (Observed) dates
for k, v in list(self.items()):
if v.find("Observed") >= 0:
del self[k]
else:
return dict.__setattr__(self, key, value)
def __keytransform__(self, key):
if isinstance(key, datetime):
key = key.date()
elif isinstance(key, date):
key = key
elif isinstance(key, int) or isinstance(key, float):
key = datetime.utcfromtimestamp(key).date()
elif isinstance(key, six.string_types):
try:
key = parse(key).date()
except:
raise ValueError("Cannot parse date from string '%s'" % key)
else:
raise TypeError("Cannot convert type '%s' to date." % type(key))
if self.expand and key.year not in self.years:
self.years.add(key.year)
self._populate(key.year)
return key
def __contains__(self, key):
return dict.__contains__(self, self.__keytransform__(key))
def __getitem__(self, key):
return dict.__getitem__(self, self.__keytransform__(key))
def __setitem__(self, key, value):
if key in self:
if self.get(key).find(value) < 0 \
and value.find(self.get(key)) < 0:
value = "%s, %s" % (value, self.get(key))
else:
value = self.get(key)
return dict.__setitem__(self, self.__keytransform__(key), value)
def update(self, *args):
args = list(args)
for arg in args:
if isinstance(arg, dict):
for key, value in list(arg.items()):
self[key] = value
elif isinstance(arg, list):
for item in arg:
self[item] = "Holiday"
else:
self[arg] = "Holiday"
def append(self, *args):
return self.update(*args)
def get(self, key, default=None):
return dict.get(self, self.__keytransform__(key), default)
def get_list(self, key):
return [h for h in self.get(key, "").split(", ") if h]
def pop(self, key, default=None):
if default is None:
return dict.pop(self, self.__keytransform__(key))
return dict.pop(self, self.__keytransform__(key), default)
def __eq__(self, other):
return (dict.__eq__(self, other) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return (dict.__ne__(self, other) or self.__dict__ != other.__dict__)
def __add__(self, other):
if isinstance(other, int) and other == 0:
# Required to sum() list of holidays
# sum([h1, h2]) is equivalent to (0 + h1 + h2)
return self
elif not isinstance(other, HolidayBase):
raise TypeError()
HolidaySum = createHolidaySum(self, other)
country = (getattr(self, 'country', None) or
getattr(other, 'country', None))
if self.country and other.country and self.country != other.country:
c1 = self.country
if not isinstance(c1, list):
c1 = [c1]
c2 = other.country
if not isinstance(c2, list):
c2 = [c2]
country = c1 + c2
prov = getattr(self, 'prov', None) or getattr(other, 'prov', None)
if self.prov and other.prov and self.prov != other.prov:
p1 = self.prov if isinstance(self.prov, list) else [self.prov]
p2 = other.prov if isinstance(other.prov, list) else [other.prov]
prov = p1 + p2
return HolidaySum(years=(self.years | other.years),
expand=(self.expand or other.expand),
observed=(self.observed or other.observed),
country=country, prov=prov)
def __radd__(self, other):
return self.__add__(other)
def _populate(self, year):
pass
def createHolidaySum(h1, h2):
class HolidaySum(HolidayBase):
def __init__(self, country, **kwargs):
self.country = country
self.holidays = []
if getattr(h1, 'holidays', False):
for h in h1.holidays:
self.holidays.append(h)
else:
self.holidays.append(h1)
if getattr(h2, 'holidays', False):
for h in h2.holidays:
self.holidays.append(h)
else:
self.holidays.append(h2)
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
for h in self.holidays[::-1]:
h._populate(year)
self.update(h)
return HolidaySum
class Canada(HolidayBase):
PROVINCES = ['AB', 'BC', 'MB', 'NB', 'NL', 'NS', 'NT', 'NU', 'ON', 'PE',
'QC', 'SK', 'YU']
def __init__(self, **kwargs):
self.country = 'CA'
self.prov = kwargs.pop('prov', 'ON')
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
if year >= 1867:
name = "New Year's Day"
self[date(year, 1, 1)] = name
if self.observed and date(year, 1, 1).weekday() == 6:
self[date(year, 1, 1) + rd(days=+1)] = name + " (Observed)"
elif self.observed and date(year, 1, 1).weekday() == 5:
# Add Dec 31st from the previous year without triggering
# the entire year to be added
expand = self.expand
self.expand = False
self[date(year, 1, 1) + rd(days=-1)] = name + " (Observed)"
self.expand = expand
# The next year's observed New Year's Day can be in this year
# when it falls on a Friday (Jan 1st is a Saturday)
if self.observed and date(year, 12, 31).weekday() == 4:
self[date(year, 12, 31)] = name + " (Observed)"
# Family Day / Louis Riel Day (MB) / Islander Day (PE)
# / Heritage Day (NS, YU)
if self.prov in ('AB', 'SK', 'ON') and year >= 2008:
self[date(year, 2, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov in ('AB', 'SK') and year >= 2007:
self[date(year, 2, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov == 'AB' and year >= 1990:
self[date(year, 2, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov == 'BC' and year >= 2013:
self[date(year, 2, 1) + rd(weekday=MO(+2))] = "Family Day"
elif self.prov == 'MB' and year >= 2008:
self[date(year, 2, 1) + rd(weekday=MO(+3))] = "Louis Riel Day"
elif self.prov == 'PE' and year >= 2010:
self[date(year, 2, 1) + rd(weekday=MO(+3))] = "Islander Day"
elif self.prov == 'PE' and year == 2009:
self[date(year, 2, 1) + rd(weekday=MO(+2))] = "Islander Day"
elif self.prov in ('NS') and year >= 2015:
# http://novascotia.ca/lae/employmentrights/NovaScotiaHeritageDay.asp
self[date(year, 2, 1) + rd(weekday=MO(+3))] = "Heritage Day"
elif self.prov in ('YU'):
# start date?
# http://heritageyukon.ca/programs/heritage-day
# https://en.wikipedia.org/wiki/Family_Day_(Canada)#Yukon_Heritage_Day
# Friday before the last Sunday in February
dt = date(year, 3, 1) + rd(weekday=SU(-1)) + rd(weekday=FR(-1))
self[dt] = "Heritage Day"
# St. Patrick's Day
if self.prov == 'NL' and year >= 1900:
dt = date(year, 3, 17)
# Nearest Monday to March 17
dt1 = date(year, 3, 17) + rd(weekday=MO(-1))
dt2 = date(year, 3, 17) + rd(weekday=MO(+1))
if dt2 - dt <= dt - dt1:
self[dt2] = "St. Patrick's Day"
else:
self[dt1] = "St. Patrick's Day"
# Good Friday
if self.prov != 'QC' and year >= 1867:
self[easter(year) + rd(weekday=FR(-1))] = "Good Friday"
# Easter Monday
if self.prov == 'QC' and year >= 1867:
self[easter(year) + rd(weekday=MO)] = "Easter Monday"
# St. George's Day
if self.prov == 'NL' and year == 2010:
# 4/26 is the Monday closer to 4/23 in 2010
# but the holiday was observed on 4/19? Crazy Newfies!
self[date(2010, 4, 19)] = "St. George's Day"
elif self.prov == 'NL' and year >= 1990:
dt = date(year, 4, 23)
# Nearest Monday to April 23
dt1 = dt + rd(weekday=MO(-1))
dt2 = dt + rd(weekday=MO(+1))
if dt2 - dt < dt - dt1:
self[dt2] = "St. George's Day"
else:
self[dt1] = "St. George's Day"
# Victoria Day / National Patriotes Day (QC)
if self.prov not in ('NB', 'NS', 'PE', 'NL', 'QC') and year >= 1953:
self[date(year, 5, 24) + rd(weekday=MO(-1))] = "Victoria Day"
elif self.prov == 'QC' and year >= 1953:
name = "National Patriotes Day"
self[date(year, 5, 24) + rd(weekday=MO(-1))] = name
# National Aboriginal Day
if self.prov == 'NT' and year >= 1996:
self[date(year, 6, 21)] = "National Aboriginal Day"
# St. Jean Baptiste Day
if self.prov == 'QC' and year >= 1925:
self[date(year, 6, 24)] = "St. Jean Baptiste Day"
if self.observed and date(year, 6, 24).weekday() == 6:
self[date(year, 6, 25)] = "St. Jean Baptiste Day (Observed)"
# Discovery Day
if self.prov == 'NL' and year >= 1997:
dt = date(year, 6, 24)
# Nearest Monday to June 24
dt1 = dt + rd(weekday=MO(-1))
dt2 = dt + rd(weekday=MO(+1))
if dt2 - dt <= dt - dt1:
self[dt2] = "Discovery Day"
else:
self[dt1] = "Discovery Day"
elif self.prov == 'YU' and year >= 1912:
self[date(year, 8, 1) + rd(weekday=MO(+3))] = "Discovery Day"
# Canada Day / Memorial Day (NL)
if self.prov != 'NL' and year >= 1867:
name = "Canada Day"
self[date(year, 7, 1)] = name
if self.observed and date(year, 7, 1).weekday() in (5, 6):
self[date(year, 7, 1) + rd(weekday=MO)] = name + " (Observed)"
elif year >= 1867:
name = "Memorial Day"
self[date(year, 7, 1)] = name
if self.observed and date(year, 7, 1).weekday() in (5, 6):
self[date(year, 7, 1) + rd(weekday=MO)] = name + " (Observed)"
# Nunavut Day
if self.prov == 'NU' and year >= 2001:
self[date(year, 7, 9)] = "Nunavut Day"
if self.observed and date(year, 7, 9).weekday() == 6:
self[date(year, 7, 10)] = "Nunavut Day (Observed)"
elif self.prov == 'NU' and year == 2000:
self[date(2000, 4, 1)] = "Nunavut Day"
# Civic Holiday
if self.prov in ('ON', 'MB', 'NT') and year >= 1900:
self[date(year, 8, 1) + rd(weekday=MO)] = "Civic Holiday"
elif self.prov in ('AB') and year >= 1974:
# https://en.wikipedia.org/wiki/Civic_Holiday#Alberta
self[date(year, 8, 1) + rd(weekday=MO)] = "Heritage Day"
elif self.prov in ('BC') and year >= 1974:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, 8, 1) + rd(weekday=MO)] = "British Columbia Day"
elif self.prov in ('NB') and year >= 1900:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, 8, 1) + rd(weekday=MO)] = "New Brunswick Day"
elif self.prov in ('SK') and year >= 1900:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, 8, 1) + rd(weekday=MO)] = "Saskatchewan Day"
# Labour Day
if year >= 1894:
self[date(year, 9, 1) + rd(weekday=MO)] = "Labour Day"
# Thanksgiving
if self.prov not in ('NB', 'NS', 'PE', 'NL') and year >= 1931:
if year == 1935:
# in 1935, Canadian Thanksgiving was moved due to the General
# Election falling on the second Monday of October
# https://books.google.ca/books?id=KcwlQsmheG4C&pg=RA1-PA1940&lpg=RA1-PA1940&dq=canada+thanksgiving+1935&source=bl&ots=j4qYrcfGuY&sig=gxXeAQfXVsOF9fOwjSMswPHJPpM&hl=en&sa=X&ved=0ahUKEwjO0f3J2PjOAhVS4mMKHRzKBLAQ6AEIRDAG#v=onepage&q=canada%20thanksgiving%201935&f=false
self[date(1935, 10, 25)] = "Thanksgiving"
else:
self[date(year, 10, 1) + rd(weekday=MO(+2))] = "Thanksgiving"
# Remembrance Day
name = "Remembrance Day"
provinces = ('ON', 'QC', 'NS', 'NL', 'NT', 'PE', 'SK')
if self.prov not in provinces and year >= 1931:
self[date(year, 11, 11)] = name
elif self.prov in ('NS', 'NL', 'NT', 'PE', 'SK') and year >= 1931:
self[date(year, 11, 11)] = name
if self.observed and date(year, 11, 11).weekday() == 6:
name = name + " (Observed)"
self[date(year, 11, 11) + rd(weekday=MO)] = name
# Christmas Day
if year >= 1867:
self[date(year, 12, 25)] = "Christmas Day"
if self.observed and date(year, 12, 25).weekday() == 5:
self[date(year, 12, 24)] = "Christmas Day (Observed)"
elif self.observed and date(year, 12, 25).weekday() == 6:
self[date(year, 12, 26)] = "Christmas Day (Observed)"
# Boxing Day
if year >= 1867:
name = "Boxing Day"
name_observed = name + " (Observed)"
if self.observed and date(year, 12, 26).weekday() in (5, 6):
self[date(year, 12, 26) + rd(weekday=MO)] = name_observed
elif self.observed and date(year, 12, 26).weekday() == 0:
self[date(year, 12, 27)] = name_observed
else:
self[date(year, 12, 26)] = name
class CA(Canada):
pass
class Colombia(HolidayBase):
# https://es.wikipedia.org/wiki/Anexo:D%C3%ADas_festivos_en_Colombia
def __init__(self, **kwargs):
self.country = 'CO'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# Fixed date holidays!
# If observed=True and they fall on a weekend they are not observed.
# If observed=False there are 18 holidays
# New Year's Day
if self.observed and date(year, 1, 1).weekday() in WEEKEND:
pass
else:
self[date(year, 1, 1)] = "Año Nuevo [New Year's Day]"
# Labor Day
self[date(year, 5, 1)] = "Día del Trabajo [Labour Day]"
# Independence Day
name = "Día de la Independencia [Independence Day]"
if self.observed and date(year, 7, 20).weekday() in WEEKEND:
pass
else:
self[date(year, 7, 20)] = name
# Battle of Boyaca
self[date(year, 8, 7)] = "Batalla de Boyacá [Battle of Boyacá]"
# Immaculate Conception
if self.observed and date(year, 12, 8).weekday() in WEEKEND:
pass
else:
self[date(year, 12, 8)
] = "La Inmaculada Concepción [Immaculate Conception]"
# Christmas
self[date(year, 12, 25)] = "Navidad [Christmas]"
# Emiliani Law holidays!
# Unless they fall on a Monday they are observed the following monday
# Epiphany
name = "Día de los Reyes Magos [Epiphany]"
if date(year, 1, 6).weekday() == 0 or not self.observed:
self[date(year, 1, 6)] = name
else:
self[date(year, 1, 6) + rd(weekday=MO)] = name + "(Observed)"
# Saint Joseph's Day
name = "Día de San José [Saint Joseph's Day]"
if date(year, 3, 19).weekday() == 0 or not self.observed:
self[date(year, 3, 19)] = name
else:
self[date(year, 3, 19) + rd(weekday=MO)] = name + "(Observed)"
# Saint Peter and Saint Paul's Day
name = "San Pedro y San Pablo [Saint Peter and Saint Paul]"
if date(year, 6, 29).weekday() == 0 or not self.observed:
self[date(year, 6, 29)] = name
else:
self[date(year, 6, 29) + rd(weekday=MO)] = name + "(Observed)"
# Assumption of Mary
name = "La Asunción [Assumption of Mary]"
if date(year, 8, 15).weekday() == 0 or not self.observed:
self[date(year, 8, 15)] = name
else:
self[date(year, 8, 15) + rd(weekday=MO)] = name + "(Observed)"
# Discovery of America
name = "Descubrimiento de América [Discovery of America]"
if date(year, 10, 12).weekday() == 0 or not self.observed:
self[date(year, 10, 12)] = name
else:
self[date(year, 10, 12) + rd(weekday=MO)] = name + "(Observed)"
# All Saints’ Day
name = "Dia de Todos los Santos [All Saint's Day]"
if date(year, 11, 1).weekday() == 0 or not self.observed:
self[date(year, 11, 1)] = name
else:
self[date(year, 11, 1) + rd(weekday=MO)] = name + "(Observed)"
# Independence of Cartagena
name = "Independencia de Cartagena [Independence of Cartagena]"
if date(year, 11, 11).weekday() == 0 or not self.observed:
self[date(year, 11, 11)] = name
else:
self[date(year, 11, 11) + rd(weekday=MO)] = name + "(Observed)"
# Holidays based on Easter
# Maundy Thursday
self[easter(year) + rd(weekday=TH(-1))
] = "Jueves Santo [Maundy Thursday]"
# Good Friday
self[easter(year) + rd(weekday=FR(-1))
] = "Viernes Santo [Good Friday]"
# Holidays based on Easter but are observed the following monday
# (unless they occur on a monday)
# Ascension of Jesus
name = "Ascensión del señor [Ascension of Jesus]"
hdate = easter(year) + rd(days=+39)
if hdate.weekday() == 0 or not self.observed:
self[hdate] = name
else:
self[hdate + rd(weekday=MO)] = name + "(Observed)"
# Corpus Christi
name = "Corpus Christi [Corpus Christi]"
hdate = easter(year) + rd(days=+60)
if hdate.weekday() == 0 or not self.observed:
self[hdate] = name
else:
self[hdate + rd(weekday=MO)] = name + "(Observed)"
# Sacred Heart
name = "Sagrado Corazón [Sacred Heart]"
hdate = easter(year) + rd(days=+68)
if hdate.weekday() == 0 or not self.observed:
self[hdate] = name
else:
self[hdate + rd(weekday=MO)] = name + "(Observed)"
class CO(Colombia):
pass
class Mexico(HolidayBase):
def __init__(self, **kwargs):
self.country = 'MX'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
name = "Año Nuevo [New Year's Day]"
self[date(year, 1, 1)] = name
if self.observed and date(year, 1, 1).weekday() == 6:
self[date(year, 1, 1) + rd(days=+1)] = name + " (Observed)"
elif self.observed and date(year, 1, 1).weekday() == 5:
# Add Dec 31st from the previous year without triggering
# the entire year to be added
expand = self.expand
self.expand = False
self[date(year, 1, 1) + rd(days=-1)] = name + " (Observed)"
self.expand = expand
# The next year's observed New Year's Day can be in this year
# when it falls on a Friday (Jan 1st is a Saturday)
if self.observed and date(year, 12, 31).weekday() == 4:
self[date(year, 12, 31)] = name + " (Observed)"
# Constitution Day
name = "Día de la Constitución [Constitution Day]"
if 2006 >= year >= 1917:
self[date(year, 2, 5)] = name
elif year >= 2007:
self[date(year, 2, 1) + rd(weekday=MO(+1))] = name
# Benito Juárez's birthday
name = "Natalicio de Benito Juárez [Benito Juárez's birthday]"
if 2006 >= year >= 1917:
self[date(year, 3, 21)] = name
elif year >= 2007:
self[date(year, 3, 1) + rd(weekday=MO(+3))] = name
# Labor Day
if year >= 1923:
self[date(year, 5, 1)] = "Día del Trabajo [Labour Day]"
if self.observed and date(year, 5, 1).weekday() == 5:
self[date(year, 5, 1) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, 5, 1).weekday() == 6:
self[date(year, 5, 1) + rd(days=+1)] = name + " (Observed)"
# Independence Day
name = "Día de la Independencia [Independence Day]"
self[date(year, 9, 16)] = name
if self.observed and date(year, 9, 16).weekday() == 5:
self[date(year, 9, 16) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, 9, 16).weekday() == 6:
self[date(year, 9, 16) + rd(days=+1)] = name + " (Observed)"
# Revolution Day
name = "Día de la Revolución [Revolution Day]"
if 2006 >= year >= 1917:
self[date(year, 11, 20)] = name
elif year >= 2007:
self[date(year, 11, 1) + rd(weekday=MO(+3))] = name
# Change of Federal Government
# Every six years--next observance 2018
name = "Transmisión del Poder Ejecutivo Federal"
name += " [Change of Federal Government]"
if (2018 - year) % 6 == 0:
self[date(year, 12, 1)] = name
if self.observed and date(year, 12, 1).weekday() == 5:
self[date(year, 12, 1) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, 12, 1).weekday() == 6:
self[date(year, 12, 1) + rd(days=+1)] = name + " (Observed)"
# Christmas
self[date(year, 12, 25)] = "Navidad [Christmas]"
if self.observed and date(year, 12, 25).weekday() == 5:
self[date(year, 12, 25) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, 12, 25).weekday() == 6:
self[date(year, 12, 25) + rd(days=+1)] = name + " (Observed)"
class MX(Mexico):
pass
class UnitedStates(HolidayBase):
# https://en.wikipedia.org/wiki/Public_holidays_in_the_United_States
STATES = ['AL', 'AK', 'AS', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC', 'FL',
'GA', 'GU', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME',
'MD', 'MH', 'MA', 'MI', 'FM', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV',
'NH', 'NJ', 'NM', 'NY', 'NC', 'ND', 'MP', 'OH', 'OK', 'OR', 'PW',
'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'VI',
'WA', 'WV', 'WI', 'WY']
def __init__(self, **kwargs):
self.country = 'US'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
if year > 1870:
name = "New Year's Day"
self[date(year, 1, 1)] = name
if self.observed and date(year, 1, 1).weekday() == 6:
self[date(year, 1, 1) + rd(days=+1)] = name + " (Observed)"
elif self.observed and date(year, 1, 1).weekday() == 5:
# Add Dec 31st from the previous year without triggering
# the entire year to be added
expand = self.expand
self.expand = False
self[date(year, 1, 1) + rd(days=-1)] = name + " (Observed)"
self.expand = expand
# The next year's observed New Year's Day can be in this year
# when it falls on a Friday (Jan 1st is a Saturday)
if self.observed and date(year, 12, 31).weekday() == 4:
self[date(year, 12, 31)] = name + " (Observed)"
# Epiphany
if self.state == 'PR':
self[date(year, 1, 6)] = "Epiphany"
# Three King's Day
if self.state == 'VI':
self[date(year, 1, 6)] = "Three King's Day"
# Lee Jackson Day
name = "Lee Jackson Day"
if self.state == 'VA' and year >= 2000:
dt = date(year, 1, 1) + rd(weekday=MO(+3)) + rd(weekday=FR(-1))
self[dt] = name
elif self.state == 'VA' and year >= 1983:
self[date(year, 1, 1) + rd(weekday=MO(+3))] = name
elif self.state == 'VA' and year >= 1889:
self[date(year, 1, 19)] = name
# Inauguration Day
if self.state in ('DC', 'LA', 'MD', 'VA') and year >= 1789:
name = "Inauguration Day"
if (year - 1789) % 4 == 0 and year >= 1937:
self[date(year, 1, 20)] = name
if date(year, 1, 20).weekday() == 6:
self[date(year, 1, 21)] = name + " (Observed)"
elif (year - 1789) % 4 == 0:
self[date(year, 3, 4)] = name
if date(year, 3, 4).weekday() == 6:
self[date(year, 3, 5)] = name + " (Observed)"
# Martin Luther King, Jr. Day
if year >= 1986:
name = "Martin Luther King, Jr. Day"
if self.state == 'AL':
name = "Robert E. Lee/Martin Luther King Birthday"
elif self.state in ('AS', 'MS'):
name = ("Dr. Martin Luther King Jr. "
"and Robert E. Lee's Birthdays")
elif self.state in ('AZ', 'NH'):
name = "Dr. Martin Luther King Jr./Civil Rights Day"
elif self.state == 'GA' and year < 2012:
name = "Robert E. Lee's Birthday"
elif self.state == 'ID' and year >= 2006:
name = "Martin Luther King, Jr. - Idaho Human Rights Day"
self[date(year, 1, 1) + rd(weekday=MO(+3))] = name
# Lincoln's Birthday
name = "Lincoln's Birthday"
if (self.state in ('CT', 'IL', 'IA', 'NJ', 'NY') and year >= 1971) \
or (self.state == 'CA' and year >= 1971 and year <= 2009):
self[date(year, 2, 12)] = name
if self.observed and date(year, 2, 12).weekday() == 5:
self[date(year, 2, 11)] = name + " (Observed)"
elif self.observed and date(year, 2, 12).weekday() == 6:
self[date(year, 2, 13)] = name + " (Observed)"
# Susan B. Anthony Day
if (self.state == 'CA' and year >= 2014) \
or (self.state == 'FL' and year >= 2011) \
or (self.state == 'NY' and year >= 2004) \
or (self.state == 'WI' and year >= 1976):
self[date(year, 2, 15)] = "Susan B. Anthony Day"
# Washington's Birthday
name = "Washington's Birthday"
if self.state == 'AL':
name = "George Washington/Thomas Jefferson Birthday"
elif self.state == 'AS':
name = "George Washington's Birthday and Daisy Gatson Bates Day"
elif self.state in ('PR', 'VI'):
name = "Presidents' Day"
if self.state not in ('DE', 'FL', 'GA', 'NM', 'PR'):
if year > 1970:
self[date(year, 2, 1) + rd(weekday=MO(+3))] = name
elif year >= 1879:
self[date(year, 2, 22)] = name
elif self.state == 'GA':
if date(year, 12, 24).weekday() != 2:
self[date(year, 12, 24)] = name
else:
self[date(year, 12, 26)] = name
elif self.state in ('PR', 'VI'):
self[date(year, 2, 1) + rd(weekday=MO(+3))] = name
# Mardi Gras
if self.state == 'LA' and year >= 1857:
self[easter(year) + rd(days=-47)] = "Mardi Gras"
# Guam Discovery Day
if self.state == 'GU' and year >= 1970:
self[date(year, 3, 1) + rd(weekday=MO)] = "Guam Discovery Day"
# Casimir Pulaski Day
if self.state == 'IL' and year >= 1978:
self[date(year, 3, 1) + rd(weekday=MO)] = "Casimir Pulaski Day"
# Texas Independence Day
if self.state == 'TX' and year >= 1874:
self[date(year, 3, 2)] = "Texas Independence Day"
# Town Meeting Day
if self.state == 'VT' and year >= 1800:
self[date(year, 3, 1) + rd(weekday=TU)] = "Town Meeting Day"
# Evacuation Day
if self.state == 'MA' and year >= 1901:
name = "Evacuation Day"
self[date(year, 3, 17)] = name
if date(year, 3, 17).weekday() in (5, 6):
self[date(year, 3, 17) + rd(weekday=MO)] = name + " (Observed)"
# Emancipation Day
if self.state == 'PR':
self[date(year, 3, 22)] = "Emancipation Day"
if self.observed and date(year, 3, 22).weekday() == 6:
self[date(year, 3, 23)] = "Emancipation Day (Observed)"
# Prince Jonah Kuhio Kalanianaole Day
if self.state == 'HI' and year >= 1949:
name = "Prince Jonah Kuhio Kalanianaole Day"
self[date(year, 3, 26)] = name
if self.observed and date(year, 3, 26).weekday() == 5:
self[date(year, 3, 25)] = name + " (Observed)"
elif self.observed and date(year, 3, 26).weekday() == 6:
self[date(year, 3, 27)] = name + " (Observed)"
# Steward's Day
name = "Steward's Day"
if self.state == 'AK' and year >= 1955:
self[date(year, 4, 1) + rd(days=-1, weekday=MO(-1))] = name
elif self.state == 'AK' and year >= 1918:
self[date(year, 3, 30)] = name
# César Chávez Day
name = "César Chávez Day"
if self.state == 'CA' and year >= 1995:
self[date(year, 3, 31)] = name
if self.observed and date(year, 3, 31).weekday() == 6:
self[date(year, 4, 1)] = name + " (Observed)"
elif self.state == 'TX' and year >= 2000:
self[date(year, 3, 31)] = name
# Transfer Day
if self.state == 'VI':
self[date(year, 3, 31)] = "Transfer Day"
# Emancipation Day
if self.state == 'DC' and year >= 2005:
name = "Emancipation Day"
self[date(year, 4, 16)] = name
if self.observed and date(year, 4, 16).weekday() == 5:
self[date(year, 4, 15)] = name + " (Observed)"
elif self.observed and date(year, 4, 16).weekday() == 6:
self[date(year, 4, 17)] = name + " (Observed)"
# Patriots' Day
if self.state in ('ME', 'MA') and year >= 1969:
self[date(year, 4, 1) + rd(weekday=MO(+3))] = "Patriots' Day"
elif self.state in ('ME', 'MA') and year >= 1894:
self[date(year, 4, 19)] = "Patriots' Day"
# Holy Thursday
if self.state == 'VI':
self[easter(year) + rd(weekday=TH(-1))] = "Holy Thursday"
# Good Friday
if self.state in ('CT', 'DE', 'GU', 'IN', 'KY', 'LA',
'NJ', 'NC', 'PR', 'TN', 'TX', 'VI'):
self[easter(year) + rd(weekday=FR(-1))] = "Good Friday"
# Easter Monday
if self.state == 'VI':
self[easter(year) + rd(weekday=MO)] = "Easter Monday"
# Confederate Memorial Day
name = "Confederate Memorial Day"
if self.state in ('AL', 'GA', 'MS', 'SC') and year >= 1866:
if self.state == 'GA' and year >= 2016:
name = "State Holiday"
self[date(year, 4, 1) + rd(weekday=MO(+4))] = name
elif self.state == 'TX' and year >= 1931:
self[date(year, 1, 19)] = name
# San Jacinto Day
if self.state == 'TX' and year >= 1875:
self[date(year, 4, 21)] = "San Jacinto Day"
# Arbor Day
if self.state == 'NE' and year >= 1989:
self[date(year, 4, 30) + rd(weekday=FR(-1))] = "Arbor Day"
elif self.state == 'NE' and year >= 1875:
self[date(year, 4, 22)] = "Arbor Day"
# Primary Election Day
if self.state == 'IN' and \
((year >= 2006 and year % 2 == 0) or year >= 2015):
dt = date(year, 5, 1) + rd(weekday=MO)
self[dt + rd(days=+1)] = "Primary Election Day"
# Truman Day
if self.state == 'MO' and year >= 1949:
name = "Truman Day"
self[date(year, 5, 8)] = name
if self.observed and date(year, 5, 8).weekday() == 5:
self[date(year, 5, 7)] = name + " (Observed)"
elif self.observed and date(year, 5, 8).weekday() == 6:
self[date(year, 5, 10)] = name + " (Observed)"
# Memorial Day
if year > 1970:
self[date(year, 5, 31) + rd(weekday=MO(-1))] = "Memorial Day"
elif year >= 1888:
self[date(year, 5, 30)] = "Memorial Day"
# Jefferson Davis Birthday
name = "Jefferson Davis Birthday"
if self.state == 'AL' and year >= 1890:
self[date(year, 6, 1) + rd(weekday=MO)] = name
# Kamehameha Day
if self.state == 'HI' and year >= 1872:
self[date(year, 6, 11)] = "Kamehameha Day"
if self.observed and year >= 2011:
if date(year, 6, 11).weekday() == 5:
self[date(year, 6, 10)] = "Kamehameha Day (Observed)"
elif date(year, 6, 11).weekday() == 6:
self[date(year, 6, 12)] = "Kamehameha Day (Observed)"
# Emancipation Day In Texas
if self.state == 'TX' and year >= 1980:
self[date(year, 6, 19)] = "Emancipation Day In Texas"
# West Virginia Day
name = "West Virginia Day"
if self.state == 'WV' and year >= 1927:
self[date(year, 6, 20)] = name
if self.observed and date(year, 6, 20).weekday() == 5:
self[date(year, 6, 19)] = name + " (Observed)"
elif self.observed and date(year, 6, 20).weekday() == 6:
self[date(year, 6, 21)] = name + " (Observed)"
# Emancipation Day in US Virgin Islands
if self.state == 'VI':
self[date(year, 7, 3)] = "Emancipation Day"
# Independence Day
if year > 1870:
name = "Independence Day"
self[date(year, 7, 4)] = name
if self.observed and date(year, 7, 4).weekday() == 5:
self[date(year, 7, 4) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, 7, 4).weekday() == 6:
self[date(year, 7, 4) + rd(days=+1)] = name + " (Observed)"
# Liberation Day (Guam)
if self.state == 'GU' and year >= 1945:
self[date(year, 7, 21)] = "Liberation Day (Guam)"
# Pioneer Day
if self.state == 'UT' and year >= 1849:
name = "Pioneer Day"
self[date(year, 7, 24)] = name
if self.observed and date(year, 7, 24).weekday() == 5:
self[date(year, 7, 24) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, 7, 24).weekday() == 6:
self[date(year, 7, 24) + rd(days=+1)] = name + " (Observed)"
# Constitution Day
if self.state == 'PR':
self[date(year, 7, 25)] = "Constitution Day"
if self.observed and date(year, 7, 25).weekday() == 6:
self[date(year, 7, 26)] = "Constitution Day (Observed)"
# Victory Day
if self.state == 'RI' and year >= 1948:
self[date(year, 8, 1) + rd(weekday=MO(+2))] = "Victory Day"
# Statehood Day (Hawaii)
if self.state == 'HI' and year >= 1959:
self[date(year, 8, 1) + rd(weekday=FR(+3))] = "Statehood Day"
# Bennington Battle Day
if self.state == 'VT' and year >= 1778:
name = "Bennington Battle Day"
self[date(year, 8, 16)] = name
if self.observed and date(year, 8, 16).weekday() == 5:
self[date(year, 8, 15)] = name + " (Observed)"
elif self.observed and date(year, 8, 16).weekday() == 6:
self[date(year, 8, 17)] = name + " (Observed)"
# Lyndon Baines Johnson Day
if self.state == 'TX' and year >= 1973:
self[date(year, 8, 27)] = "Lyndon Baines Johnson Day"
# Labor Day
if year >= 1894:
self[date(year, 9, 1) + rd(weekday=MO)] = "Labor Day"
# Columbus Day
if self.state not in ('AK', 'DE', 'FL', 'HI', 'NV'):
if self.state == 'SD':
name = "Native American Day"
elif self.state == 'VI':
name = "Columbus Day and Puerto Rico Friendship Day"
else:
name = "Columbus Day"
if year >= 1970:
self[date(year, 10, 1) + rd(weekday=MO(+2))] = name
elif year >= 1937:
self[date(year, 10, 12)] = name
# Alaska Day
if self.state == 'AK' and year >= 1867:
self[date(year, 10, 18)] = "Alaska Day"
if self.observed and date(year, 10, 18).weekday() == 5:
self[date(year, 10, 18) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, 10, 18).weekday() == 6:
self[date(year, 10, 18) + rd(days=+1)] = name + " (Observed)"
# Nevada Day
if self.state == 'NV' and year >= 1933:
dt = date(year, 10, 31)
if year >= 2000:
dt += rd(weekday=FR(-1))
self[dt] = "Nevada Day"
if self.observed and dt.weekday() == 5:
self[dt + rd(days=-1)] = "Nevada Day (Observed)"
elif self.observed and dt.weekday() == 6:
self[dt + rd(days=+1)] = "Nevada Day (Observed)"
# Liberty Day
if self.state == 'VI':
self[date(year, 11, 1)] = "Liberty Day"
# Election Day
if (self.state in ('DE', 'HI', 'IL', 'IN', 'LA',
'MT', 'NH', 'NJ', 'NY', 'WV') and
year >= 2008 and year % 2 == 0) \
or (self.state in ('IN', 'NY') and year >= 2015):
dt = date(year, 11, 1) + rd(weekday=MO)
self[dt + rd(days=+1)] = "Election Day"
# All Souls' Day
if self.state == 'GU':
self[date(year, 11, 2)] = "All Souls' Day"
# Veterans Day
if year > 1953:
name = "Veterans Day"
else:
name = "Armistice Day"
if 1978 > year > 1970:
self[date(year, 10, 1) + rd(weekday=MO(+4))] = name
elif year >= 1938:
self[date(year, 11, 11)] = name
if self.observed and date(year, 11, 11).weekday() == 5:
self[date(year, 11, 11) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, 11, 11).weekday() == 6:
self[date(year, 11, 11) + rd(days=+1)] = name + " (Observed)"
# Discovery Day
if self.state == 'PR':
self[date(year, 11, 19)] = "Discovery Day"
if self.observed and date(year, 11, 19).weekday() == 6:
self[date(year, 11, 20)] = "Discovery Day (Observed)"
# Thanksgiving
if year > 1870:
self[date(year, 11, 1) + rd(weekday=TH(+4))] = "Thanksgiving"
# Day After Thanksgiving
# Friday After Thanksgiving
# Lincoln's Birthday
# American Indian Heritage Day
# Family Day
# New Mexico Presidents' Day
if (self.state in ('DE', 'FL', 'NH', 'NC', 'OK', 'TX', 'WV') and
year >= 1975) \
or (self.state == 'IN' and year >= 2010) \
or (self.state == 'MD' and year >= 2008) \
or self.state in ('NV', 'NM'):
if self.state in ('DE', 'NH', 'NC', 'OK', 'WV'):
name = "Day After Thanksgiving"
elif self.state in ('FL', 'TX'):
name = "Friday After Thanksgiving"
elif self.state == 'IN':
name = "Lincoln's Birthday"
elif self.state == 'MD' and year >= 2008:
name = "American Indian Heritage Day"
elif self.state == 'NV':
name = "Family Day"
elif self.state == 'NM':
name = "Presidents' Day"
dt = date(year, 11, 1) + rd(weekday=TH(+4))
self[dt + rd(days=+1)] = name
# Robert E. Lee's Birthday
if self.state == 'GA' and year >= 1986:
if year >= 2016:
name = "State Holiday"
else:
name = "Robert E. Lee's Birthday"
self[date(year, 11, 29) + rd(weekday=FR(-1))] = name
# Lady of Camarin Day
if self.state == 'GU':
self[date(year, 12, 8)] = "Lady of Camarin Day"
# Christmas Eve
if self.state == 'AS' or \
(self.state in ('KS', 'MI', 'NC') and year >= 2013) or \
(self.state == 'TX' and year >= 1981) or \
(self.state == 'WI' and year >= 2012):
name = "Christmas Eve"
self[date(year, 12, 24)] = name
name = name + " (Observed)"
# If on Friday, observed on Thursday
if self.observed and date(year, 12, 24).weekday() == 4:
self[date(year, 12, 24) + rd(days=-1)] = name
# If on Saturday or Sunday, observed on Friday
elif self.observed and date(year, 12, 24).weekday() in (5, 6):
self[date(year, 12, 24) + rd(weekday=FR(-1))] = name
# Christmas Day
if year > 1870:
name = "Christmas Day"
self[date(year, 12, 25)] = "Christmas Day"
if self.observed and date(year, 12, 25).weekday() == 5:
self[date(year, 12, 25) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, 12, 25).weekday() == 6:
self[date(year, 12, 25) + rd(days=+1)] = name + " (Observed)"
# Day After Christmas
if self.state == 'NC' and year >= 2013:
name = "Day After Christmas"
self[date(year, 12, 26)] = name
name = name + " (Observed)"
# If on Saturday or Sunday, observed on Monday
if self.observed and date(year, 12, 26).weekday() in (5, 6):
self[date(year, 12, 26) + rd(weekday=MO)] = name
# If on Monday, observed on Tuesday
elif self.observed and date(year, 12, 26).weekday() == 0:
self[date(year, 12, 26) + rd(days=+1)] = name
elif self.state == 'TX' and year >= 1981:
self[date(year, 12, 26)] = "Day After Christmas"
elif self.state == 'VI':
self[date(year, 12, 26)] = "Christmas Second Day"
# New Year's Eve
if (self.state in ('KY', 'MI') and year >= 2013) or \
(self.state == 'WI' and year >= 2012):
name = "New Year's Eve"
self[date(year, 12, 31)] = name
if self.observed and date(year, 12, 31).weekday() == 5:
self[date(year, 12, 30)] = name + " (Observed)"
class US(UnitedStates):
pass
class NewZealand(HolidayBase):
PROVINCES = ['NTL', 'AUK', 'TKI', 'HKB', 'WGN', 'MBH', 'NSN', 'CAN',
'STC', 'WTL', 'OTA', 'STL', 'CIT']
def __init__(self, **kwargs):
self.country = 'NZ'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# Bank Holidays Act 1873
# The Employment of Females Act 1873
# Factories Act 1894
# Industrial Conciliation and Arbitration Act 1894
# Labour Day Act 1899
# Anzac Day Act 1920, 1949, 1956
# New Zealand Day Act 1973
# Waitangi Day Act 1960, 1976
# Sovereign's Birthday Observance Act 1937, 1952
# Holidays Act 1981, 2003
if year < 1894:
return
# New Year's Day
name = "New Year's Day"
jan1 = date(year, 1, 1)
self[jan1] = name
if self.observed and jan1.weekday() in WEEKEND:
self[date(year, 1, 3)] = name + " (Observed)"
name = "Day after New Year's Day"
jan2 = date(year, 1, 2)
self[jan2] = name
if self.observed and jan2.weekday() in WEEKEND:
self[date(year, 1, 4)] = name + " (Observed)"
# Waitangi Day
if year > 1973:
name = "New Zealand Day"
if year > 1976:
name = "Waitangi Day"
feb6 = date(year, 2, 6)
self[feb6] = name
if self.observed and year >= 2014 and feb6.weekday() in WEEKEND:
self[feb6 + rd(weekday=MO)] = name + " (Observed)"
# Easter
self[easter(year) + rd(weekday=FR(-1))] = "Good Friday"
self[easter(year) + rd(weekday=MO)] = "Easter Monday"
# Anzac Day
if year > 1920:
name = "Anzac Day"
apr25 = date(year, 4, 25)
self[apr25] = name
if self.observed and year >= 2014 and apr25.weekday() in WEEKEND:
self[apr25 + rd(weekday=MO)] = name + " (Observed)"
# Sovereign's Birthday
if year >= 1952:
name = "Queen's Birthday"
elif year > 1901:
name = "King's Birthday"
if year == 1952:
self[date(year, 6, 2)] = name # Elizabeth II
elif year > 1937:
self[date(year, 6, 1) + rd(weekday=MO(+1))] = name # EII & GVI
elif year == 1937:
self[date(year, 6, 9)] = name # George VI
elif year == 1936:
self[date(year, 6, 23)] = name # Edward VIII
elif year > 1911:
self[date(year, 6, 3)] = name # George V
elif year > 1901:
# http://paperspast.natlib.govt.nz/cgi-bin/paperspast?a=d&d=NZH19091110.2.67
self[date(year, 11, 9)] = name # Edward VII
# Labour Day
name = "Labour Day"
if year >= 1910:
self[date(year, 10, 1) + rd(weekday=MO(+4))] = name
elif year > 1899:
self[date(year, 10, 1) + rd(weekday=WE(+2))] = name
# Christmas Day
name = "Christmas Day"
dec25 = date(year, 12, 25)
self[dec25] = name
if self.observed and dec25.weekday() in WEEKEND:
self[date(year, 12, 27)] = name + " (Observed)"
# Boxing Day
name = "Boxing Day"
dec26 = date(year, 12, 26)
self[dec26] = name
if self.observed and dec26.weekday() in WEEKEND:
self[date(year, 12, 28)] = name + " (Observed)"
# Province Anniversary Day
if self.prov in ('NTL', 'Northland', 'AUK', 'Auckland'):
if 1963 < year <= 1973 and self.prov in ('NTL', 'Northland'):
name = "Waitangi Day"
dt = date(year, 2, 6)
else:
name = "Auckland Anniversary Day"
dt = date(year, 1, 29)
if dt.weekday() in (TUESDAY, WEDNESDAY, THURSDAY):
self[dt + rd(weekday=MO(-1))] = name
else:
self[dt + rd(weekday=MO)] = name
elif self.prov in ('TKI', 'Taranaki', 'New Plymouth'):
name = "Taranaki Anniversary Day"
self[date(year, 3, 1) + rd(weekday=MO(+2))] = name
elif self.prov in ('HKB', "Hawke's Bay"):
name = "Hawke's Bay Anniversary Day"
labour_day = date(year, 10, 1) + rd(weekday=MO(+4))
self[labour_day + rd(weekday=FR(-1))] = name
elif self.prov in ('WGN', 'Wellington'):
name = "Wellington Anniversary Day"
jan22 = date(year, 1, 22)
if jan22.weekday() in (TUESDAY, WEDNESDAY, THURSDAY):
self[jan22 + rd(weekday=MO(-1))] = name
else:
self[jan22 + rd(weekday=MO)] = name
elif self.prov in ('MBH', 'Marlborough'):
name = "Marlborough Anniversary Day"
labour_day = date(year, 10, 1) + rd(weekday=MO(+4))
self[labour_day + rd(weeks=1)] = name
elif self.prov in ('NSN', 'Nelson'):
name = "Nelson Anniversary Day"
feb1 = date(year, 2, 1)
if feb1.weekday() in (TUESDAY, WEDNESDAY, THURSDAY):
self[feb1 + rd(weekday=MO(-1))] = name
else:
self[feb1 + rd(weekday=MO)] = name
elif self.prov in ('CAN', 'Canterbury'):
name = "Canterbury Anniversary Day"
showday = date(year, 11, 1) + rd(weekday=TU) + rd(weekday=FR(+2))
self[showday] = name
elif self.prov in ('STC', 'South Canterbury'):
name = "South Canterbury Anniversary Day"
dominion_day = date(year, 9, 1) + rd(weekday=MO(4))
self[dominion_day] = name
elif self.prov in ('WTL', 'Westland'):
name = "Westland Anniversary Day"
dec1 = date(year, 12, 1)
# Observance varies?!?!
if year == 2005: # special case?!?!
self[date(year, 12, 5)] = name
elif dec1.weekday() in (TUESDAY, WEDNESDAY, THURSDAY):
self[dec1 + rd(weekday=MO(-1))] = name
else:
self[dec1 + rd(weekday=MO)] = name
elif self.prov in ('OTA', 'Otago'):
name = "Otago Anniversary Day"
mar23 = date(year, 3, 23)
# there is no easily determined single day of local observance?!?!
if mar23.weekday() in (TUESDAY, WEDNESDAY, THURSDAY):
dt = mar23 + rd(weekday=MO(-1))
else:
dt = mar23 + rd(weekday=MO)
if dt == easter(year) + rd(weekday=MO): # Avoid Easter Monday
dt += rd(days=1)
self[dt] = name
elif self.prov in ('STL', 'Southland'):
name = "Southland Anniversary Day"
jan17 = date(year, 1, 17)
if year > 2011:
self[easter(year) + rd(weekday=TU)] = name
else:
if jan17.weekday() in (TUESDAY, WEDNESDAY, THURSDAY):
self[jan17 + rd(weekday=MO(-1))] = name
else:
self[jan17 + rd(weekday=MO)] = name
elif self.prov in ('CIT', 'Chatham Islands'):
name = "Chatham Islands Anniversary Day"
nov30 = date(year, 11, 30)
if nov30.weekday() in (TUESDAY, WEDNESDAY, THURSDAY):
self[nov30 + rd(weekday=MO(-1))] = name
else:
self[nov30 + rd(weekday=MO)] = name
class NZ(NewZealand):
pass
class Australia(HolidayBase):
STATES = ['ACT', 'NSW', 'NT', 'QLD', 'SA', 'TAS', 'VIC', 'WA']
def __init__(self, **kwargs):
self.country = 'AU'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# ACT: Holidays Act 1958
# NSW: Public Holidays Act 2010
# NT: Public Holidays Act 2013
# QLD: Holidays Act 1983
# SA: Holidays Act 1910
# TAS: Statutory Holidays Act 2000
# VIC: Public Holidays Act 1993
# WA: Public and Bank Holidays Act 1972
# New Year's Day
name = "New Year's Day"
jan1 = date(year, 1, 1)
self[jan1] = name
if self.observed and jan1.weekday() in WEEKEND:
self[jan1 + rd(weekday=MO)] = name + " (Observed)"
# Australia Day
jan26 = date(year, 1, 26)
if year >= 1935:
if self.state == 'NSW' and year < 1946:
name = "Anniversary Day"
else:
name = "Australia Day"
self[jan26] = name
if self.observed and year >= 1946 and jan26.weekday() in WEEKEND:
self[jan26 + rd(weekday=MO)] = name + " (Observed)"
elif year >= 1888 and self.state != 'SA':
name = "Anniversary Day"
self[jan26] = name
# Adelaide Cup
if self.state == 'SA':
name = "Adelaide Cup"
if year >= 2006:
# subject to proclamation ?!?!
self[date(year, 3, 1) + rd(weekday=MO(+2))] = name
else:
self[date(year, 3, 1) + rd(weekday=MO(+3))] = name
# Canberra Day
if self.state == 'ACT':
name = "Canberra Day"
self[date(year, 3, 1) + rd(weekday=MO(+2))] = name
# Easter
self[easter(year) + rd(weekday=FR(-1))] = "Good Friday"
if self.state in ('ACT', 'NSW', 'NT', 'QLD', 'SA', 'VIC'):
self[easter(year) + rd(weekday=SA(-1))] = "Easter Saturday"
if self.state == 'NSW':
self[easter(year)] = "Easter Sunday"
self[easter(year) + rd(weekday=MO)] = "Easter Monday"
# Anzac Day
if year > 1920:
name = "Anzac Day"
apr25 = date(year, 4, 25)
self[apr25] = name
if self.observed:
if apr25.weekday() == SATURDAY and self.state in ('WA', 'NT'):
self[apr25 + rd(weekday=MO)] = name + " (Observed)"
elif (apr25.weekday() == SUNDAY and
self.state in ('ACT', 'QLD', 'SA', 'WA', 'NT')):
self[apr25 + rd(weekday=MO)] = name + " (Observed)"
# Western Australia Day
if self.state == 'WA' and year > 1832:
if year >= 2015:
name = "Western Australia Day"
else:
name = "Foundation Day"
self[date(year, 6, 1) + rd(weekday=MO(+1))] = name
# Sovereign's Birthday
if year >= 1952:
name = "Queen's Birthday"
elif year > 1901:
name = "King's Birthday"
if year >= 1936:
name = "Queen's Birthday"
if self.state == 'QLD':
if year == 2012:
self[date(year, 10, 1)] = name
self[date(year, 6, 11)] = "Queen's Diamond Jubilee"
else:
dt = date(year, 6, 1) + rd(weekday=MO(+2))
self[dt] = name
elif self.state == 'WA':
# by proclamation ?!?!
self[date(year, 10, 1) + rd(weekday=MO(-1))] = name
else:
dt = date(year, 6, 1) + rd(weekday=MO(+2))
self[dt] = name
elif year > 1911:
self[date(year, 6, 3)] = name # George V
elif year > 1901:
self[date(year, 11, 9)] = name # Edward VII
# Picnic Day
if self.state == 'NT':
name = "Picnic Day"
self[date(year, 8, 1) + rd(weekday=MO)] = name
# Labour Day
name = "Labour Day"
if self.state in ('NSW', 'ACT', 'SA'):
self[date(year, 10, 1) + rd(weekday=MO)] = name
elif self.state == 'WA':
self[date(year, 3, 1) + rd(weekday=MO)] = name
elif self.state == 'VIC':
self[date(year, 3, 1) + rd(weekday=MO(+2))] = name
elif self.state == 'QLD':
if 2013 <= year <= 2015:
self[date(year, 10, 1) + rd(weekday=MO)] = name
else:
self[date(year, 5, 1) + rd(weekday=MO)] = name
elif self.state == 'NT':
name = "May Day"
self[date(year, 5, 1) + rd(weekday=MO)] = name
elif self.state == 'TAS':
name = "Eight Hours Day"
self[date(year, 3, 1) + rd(weekday=MO(+2))] = name
# Family & Community Day
if self.state == 'ACT':
name = "Family & Community Day"
if 2007 <= year <= 2009:
self[date(year, 11, 1) + rd(weekday=TU)] = name
else:
# First Monday of the September/October school holidays
# moved to the second Monday if this falls on Labour day
# The following formula works until at least 2020
# http://www.cmd.act.gov.au/communication/holidays
labour_day = date(year, 10, 1) + rd(weekday=MO)
dt = date(year, 9, 25) + rd(weekday=MO)
if year == 2011:
dt = date(year, 10, 10) + rd(weekday=MO)
else:
dt = date(year, 9, 25) + rd(weekday=MO)
if dt == labour_day:
dt += rd(weekday=MO(+2))
self[dt] = name
# Melbourne Cup
if self.state == 'VIC':
name = "Melbourne Cup"
self[date(year, 11, 1) + rd(weekday=TU)] = name
# Christmas Day
name = "Christmas Day"
dec25 = date(year, 12, 25)
self[dec25] = name
if self.observed and dec25.weekday() in WEEKEND:
self[date(year, 12, 27)] = name + " (Observed)"
# Boxing Day
if self.state == 'SA':
name = "Proclamation Day"
else:
name = "Boxing Day"
dec26 = date(year, 12, 26)
self[dec26] = name
if self.observed and dec26.weekday() in WEEKEND:
self[date(year, 12, 28)] = name + " (Observed)"
class AU(Australia):
pass
class Germany(HolidayBase):
"""Official holidays for Germany in it's current form.
This class doesn't return any holidays before 1990-10-03.
Before that date the current Germany was separated into the "German
Democratic Republic" and the "Federal Republic of Germany" which both had
somewhat different holidays. Since this class is called "Germany" it
doesn't really make sense to include the days from the two former
countries.
Note that Germany doesn't have rules for holidays that happen on a
Sunday. Those holidays are still holiday days but there is no additional
day to make up for the "lost" day.
Also note that German holidays are partly declared by each province there
are some weired edge cases:
- "Mariä Himmelfahrt" is only a holiday in Bavaria (BY) if your
municipality is mothly catholic which in term depends on census data.
Since we don't have this data but most municipalities in Bavaria
*are* mostly catholic, we count that as holiday for whole Bavaria.
- There is an "Augsburger Friedensfest" which only exists in the town
Augsburg. This is excluded for Bavaria.
- "Gründonnerstag" (Thursday before easter) is not a holiday but pupil
don't have to go to school (but only in Baden Württemberg) which is
solved by adjusting school holidays to include this day. It is
excluded from our list.
- "Fronleichnam" is a holiday in certain, explicitly defined
municipalities in Saxony (SN) and Thuringia (TH). We exclude it from
both provinces.
"""
PROVINCES = ['BW', 'BY', 'BE', 'BB', 'HB', 'HH', 'HE', 'MV', 'NI', 'NW',
'RP', 'SL', 'SN', 'ST', 'SH', 'TH']
def __init__(self, **kwargs):
self.country = 'DE'
self.prov = kwargs.pop('prov', 'SH')
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
if year <= 1989:
return
if year > 1990:
self[date(year, 1, 1)] = 'Neujahr'
if self.prov in ('BW', 'BY', 'ST'):
self[date(year, 1, 6)] = 'Heilige Drei Könige'
self[easter(year) - rd(days=2)] = 'Karfreitag'
if self.prov == 'BB':
# will always be a Sunday and we have no "observed" rule so
# this is pretty pointless but it's nonetheless an official
# holiday by law
self[easter(year)] = 'Ostern'
self[easter(year) + rd(days=1)] = 'Ostermontag'
self[date(year, 5, 1)] = 'Maifeiertag'
self[easter(year) + rd(days=39)] = 'Christi Himmelfahrt'
if self.prov == 'BB':
# will always be a Sunday and we have no "observed" rule so
# this is pretty pointless but it's nonetheless an official
# holiday by law
self[easter(year) + rd(days=49)] = 'Pfingsten'
self[easter(year) + rd(days=50)] = 'Pfingstmontag'
if self.prov in ('BW', 'BY', 'HE', 'NW', 'RP', 'SL'):
self[easter(year) + rd(days=60)] = 'Fronleichnam'
if self.prov in ('BY', 'SL'):
self[date(year, 8, 15)] = 'Mariä Himmelfahrt'
self[date(year, 10, 3)] = 'Tag der Deutschen Einheit'
if self.prov in ('BB', 'MV', 'SN', 'ST', 'TH'):
self[date(year, 10, 31)] = 'Reformationstag'
if self.prov in ('BW', 'BY', 'NW', 'RP', 'SL'):
self[date(year, 11, 1)] = 'Allerheiligen'
if self.prov == 'SN':
# can be calculated as "last wednesday before year-11-23" which is
# why we need to go back two wednesdays if year-11-23 happens to be
# a wednesday
base_data = date(year, 11, 23)
weekday_delta = WE(-2) if base_data.weekday() == 2 else WE(-1)
self[base_data + rd(weekday=weekday_delta)] = 'Buß- und Bettag'
self[date(year, 12, 25)] = 'Erster Weihnachtstag'
self[date(year, 12, 26)] = 'Zweiter Weihnachtstag'
class DE(Germany):
pass
class Austria(HolidayBase):
PROVINCES = ['B', 'K', 'N', 'O', 'S', 'ST', 'T', 'V', 'W']
def __init__(self, **kwargs):
self.country = 'AT'
self.prov = kwargs.pop('prov', kwargs.pop('state', 'W'))
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# public holidays
self[date(year, 1, 1)] = "Neujahr"
self[date(year, 1, 6)] = "Heilige Drei Könige"
self[easter(year) + rd(weekday=MO)] = "Ostermontag"
self[date(year, 5, 1)] = "Staatsfeiertag"
self[easter(year) + rd(days=39)] = "Christi Himmelfahrt"
self[easter(year) + rd(days=50)] = "Pfingstmontag"
self[easter(year) + rd(days=60)] = "Fronleichnam"
self[date(year, 8, 15)] = "Maria Himmelfahrt"
if 1919 <= year <= 1934:
self[date(year, 11, 12)] = "Nationalfeiertag"
if year >= 1967:
self[date(year, 10, 26)] = "Nationalfeiertag"
self[date(year, 11, 1)] = "Allerheiligen"
self[date(year, 12, 8)] = "Maria Empfängnis"
self[date(year, 12, 25)] = "Christtag"
self[date(year, 12, 26)] = "Stefanitag"
class AT(Austria):
pass
class Denmark(HolidayBase):
# https://en.wikipedia.org/wiki/Public_holidays_in_Denmark
def __init__(self, **kwargs):
self.country = 'DK'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# Public holidays
self[date(year, 1, 1)] = "Nytårsdag"
self[easter(year) + rd(weekday=TH(-1))] = "Skærtorsdag"
self[easter(year) + rd(weekday=FR(-1))] = "Langfredag"
self[easter(year)] = "Påskedag"
self[easter(year) + rd(weekday=MO)] = "Anden påskedag"
self[easter(year) + rd(weekday=FR(+4))] = "Store bededag"
self[easter(year) + rd(days=39)] = "Kristi himmelfartsdag"
self[easter(year) + rd(days=49)] = "Pinsedag"
self[easter(year) + rd(days=50)] = "Anden pinsedag"
self[date(year, 12, 25)] = "Juledag"
self[date(year, 12, 26)] = "Anden juledag"
class DK(Denmark):
pass
class UnitedKingdom(HolidayBase):
# https://en.wikipedia.org/wiki/Public_holidays_in_the_United_Kingdom
def __init__(self, **kwargs):
self.country = 'UK'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
if year >= 1974:
name = "New Year's Day"
self[date(year, 1, 1)] = name
if self.observed and date(year, 1, 1).weekday() == 6:
self[date(year, 1, 1) + rd(days=+1)] = name + " (Observed)"
elif self.observed and date(year, 1, 1).weekday() == 5:
self[date(year, 1, 1) + rd(days=+2)] = name + " (Observed)"
# New Year Holiday
if self.country in ('UK', 'Scotland'):
name = "New Year Holiday"
if self.country == 'UK':
name += " [Scotland]"
self[date(year, 1, 2)] = name
if self.observed and date(year, 1, 2).weekday() in (5, 6):
self[date(year, 1, 2) + rd(days=+2)] = name + " (Observed)"
elif self.observed and date(year, 1, 2).weekday() == 0:
self[date(year, 1, 2) + rd(days=+1)] = name + " (Observed)"
# St. Patrick's Day
if self.country in ('UK', 'Northern Ireland', 'Ireland'):
name = "St. Patrick's Day"
if self.country == 'UK':
name += " [Northern Ireland]"
self[date(year, 3, 17)] = name
if self.observed and date(year, 3, 17).weekday() in (5, 6):
self[date(year, 3, 17) + rd(weekday=MO)] = name + " (Observed)"
# Good Friday
if self.country != 'Ireland':
self[easter(year) + rd(weekday=FR(-1))] = "Good Friday"
# Easter Monday
if self.country != 'Scotland':
name = "Easter Monday"
if self.country == 'UK':
name += " [England, Wales, Northern Ireland]"
self[easter(year) + rd(weekday=MO)] = name
# May Day bank holiday (first Monday in May)
if year >= 1978:
name = "May Day"
if year == 1995:
dt = date(year, 5, 8)
else:
dt = date(year, 5, 1)
if dt.weekday() == 0:
self[dt] = name
elif dt.weekday() == 1:
self[dt + rd(days=+6)] = name
elif dt.weekday() == 2:
self[dt + rd(days=+5)] = name
elif dt.weekday() == 3:
self[dt + rd(days=+4)] = name
elif dt.weekday() == 4:
self[dt + rd(days=+3)] = name
elif dt.weekday() == 5:
self[dt + rd(days=+2)] = name
elif dt.weekday() == 6:
self[dt + rd(days=+1)] = name
# Spring bank holiday (last Monday in May)
if self.country != 'Ireland':
name = "Spring Bank Holiday"
if year == 2012:
self[date(year, 6, 4)] = name
elif year >= 1971:
self[date(year, 5, 31) + rd(weekday=MO(-1))] = name
# June bank holiday (first Monday in June)
if self.country == 'Ireland':
self[date(year, 6, 1) + rd(weekday=MO)] = "June Bank Holiday"
# TT bank holiday (first Friday in June)
if self.country == 'Isle of Man':
self[date(year, 6, 1) + rd(weekday=FR)] = "TT Bank Holiday"
# Tynwald Day
if self.country == 'Isle of Man':
self[date(year, 7, 5)] = "Tynwald Day"
# Battle of the Boyne
if self.country in ('UK', 'Northern Ireland'):
name = "Battle of the Boyne"
if self.country == 'UK':
name += " [Northern Ireland]"
self[date(year, 7, 12)] = name
# Summer bank holiday (first Monday in August)
if self.country in ('UK', 'Scotland', 'Ireland'):
name = "Summer Bank Holiday"
if self.country == 'UK':
name += " [Scotland]"
self[date(year, 8, 1) + rd(weekday=MO)] = name
# Late Summer bank holiday (last Monday in August)
if self.country not in ('Scotland', 'Ireland') and year >= 1971:
name = "Late Summer Bank Holiday"
if self.country == 'UK':
name += " [England, Wales, Northern Ireland]"
self[date(year, 8, 31) + rd(weekday=MO(-1))] = name
# October Bank Holiday (last Monday in October)
if self.country == 'Ireland':
name = "October Bank Holiday"
self[date(year, 8, 31) + rd(weekday=MO(-1))] = name
# St. Andrew's Day
if self.country in ('UK', 'Scotland'):
name = "St. Andrew's Day"
if self.country == 'UK':
name += " [Scotland]"
self[date(year, 11, 30)] = name
# Christmas Day
name = "Christmas Day"
self[date(year, 12, 25)] = name
if self.observed and date(year, 12, 25).weekday() == 5:
self[date(year, 12, 27)] = name + " (Observed)"
elif self.observed and date(year, 12, 25).weekday() == 6:
self[date(year, 12, 27)] = name + " (Observed)"
# Boxing Day
name = "Boxing Day"
self[date(year, 12, 26)] = name
if self.observed and date(year, 12, 26).weekday() == 5:
self[date(year, 12, 28)] = name + " (Observed)"
elif self.observed and date(year, 12, 26).weekday() == 6:
self[date(year, 12, 28)] = name + " (Observed)"
# Special holidays
if self.country != 'Ireland':
if year == 1977:
self[date(year, 6, 7)] = "Silver Jubilee of Elizabeth II"
elif year == 1981:
self[date(year, 7, 29)] = "Wedding of Charles and Diana"
elif year == 1999:
self[date(year, 12, 31)] = "Millennium Celebrations"
elif year == 2002:
self[date(year, 6, 3)] = "Golden Jubilee of Elizabeth II"
elif year == 2011:
self[date(year, 4, 29)] = "Wedding of William and Catherine"
elif year == 2012:
self[date(year, 6, 5)] = "Diamond Jubilee of Elizabeth II"
class UK(UnitedKingdom):
pass
class England(UnitedKingdom):
def __init__(self, **kwargs):
self.country = 'England'
HolidayBase.__init__(self, **kwargs)
class Wales(UnitedKingdom):
def __init__(self, **kwargs):
self.country = 'Wales'
HolidayBase.__init__(self, **kwargs)
class Scotland(UnitedKingdom):
def __init__(self, **kwargs):
self.country = 'Scotland'
HolidayBase.__init__(self, **kwargs)
class IsleOfMan(UnitedKingdom):
def __init__(self, **kwargs):
self.country = 'Isle of Man'
HolidayBase.__init__(self, **kwargs)
class NorthernIreland(UnitedKingdom):
def __init__(self, **kwargs):
self.country = 'Northern Ireland'
HolidayBase.__init__(self, **kwargs)
class Ireland(UnitedKingdom):
def __init__(self, **kwargs):
self.country = 'Ireland'
HolidayBase.__init__(self, **kwargs)
class Spain(HolidayBase):
PROVINCES = ['AND', 'ARG', 'AST', 'CAN', 'CAM', 'CAL', 'CAT', 'CVA',
'EXT', 'GAL', 'IBA', 'ICA', 'MAD', 'MUR', 'NAV', 'PVA', 'RIO']
def __init__(self, **kwargs):
self.country = 'ES'
self.prov = kwargs.pop('prov', kwargs.pop('state', ''))
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, 1, 1)] = "Año nuevo"
self[date(year, 1, 6)] = "Epifanía del Senyor"
if self.prov and self.prov in ['CVA', 'MUR', 'MAD', 'NAV', 'PVA']:
self[date(year, 3, 19)] = "San José"
if self.prov and self.prov != 'CAT':
self[easter(year) + rd(weeks=-1, weekday=TH)] = "Jueves Santo"
self[easter(year) + rd(weeks=-1, weekday=FR)] = "Viernes Santo"
if self.prov and self.prov in ['CAT', 'PVA', 'NAV', 'CVA', 'IBA']:
self[easter(year) + rd(weekday=MO)] = "Lunes de Pascua"
self[date(year, 5, 1)] = "Día del Trabajador"
if self.prov and self.prov in ['CAT', 'GAL']:
self[date(year, 6, 24)] = "San Juan"
self[date(year, 8, 15)] = "Assunción de la Virgen"
self[date(year, 11, 1)] = "Todos los Santos"
self[date(year, 12, 6)] = "Día de la constitución Española"
self[date(year, 12, 8)] = "La Inmaculada Concepción"
self[date(year, 12, 25)] = "Navidad"
if self.prov and self.prov in ['CAT', 'IBA']:
self[date(year, 12, 26)] = "San Esteban"
# Provinces festive day
if self.prov:
if self.prov == 'AND':
self[date(year, 2, 28)] = "Día de Andalucia"
elif self.prov == 'ARG':
self[date(year, 4, 23)] = "Día de San Jorge"
elif self.prov == 'AST':
self[date(year, 3, 8)] = "Día de Asturias"
elif self.prov == 'CAN':
self[date(year, 2, 28)] = "Día de la Montaña"
elif self.prov == 'CAM':
self[date(year, 2, 28)] = "Día de Castilla - La Mancha"
elif self.prov == 'CAL':
self[date(year, 4, 23)] = "Día de Castilla y Leon"
elif self.prov == 'CAT':
self[date(year, 9, 11)] = "Día Nacional de Catalunya"
elif self.prov == 'CVA':
self[date(year, 10, 9)] = "Día de la Comunidad Valenciana"
elif self.prov == 'EXT':
self[date(year, 9, 8)] = "Día de Extremadura"
elif self.prov == 'GAL':
self[date(year, 7, 25)] = "Día Nacional de Galicia"
elif self.prov == 'IBA':
self[date(year, 3, 1)] = "Día de las Islas Baleares"
elif self.prov == 'ICA':
self[date(year, 5, 30)] = "Día de Canarias"
elif self.prov == 'MAD':
self[date(year, 5, 2)] = "Día de Comunidad De Madrid"
elif self.prov == 'MUR':
self[date(year, 6, 9)] = "Día de la Región de Murcia"
elif self.prov == 'NAV':
self[date(year, 9, 27)] = "Día de Navarra"
elif self.prov == 'PVA':
self[date(year, 10, 25)] = "Día del Páis Vasco"
elif self.prov == 'RIO':
self[date(year, 6, 9)] = "Día de La Rioja"
class ES(Spain):
pass
class EuropeanCentralBank(HolidayBase):
# https://en.wikipedia.org/wiki/TARGET2
# http://www.ecb.europa.eu/press/pr/date/2000/html/pr001214_4.en.html
def __init__(self, **kwargs):
self.country = 'EU'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, 1, 1)] = "New Year's Day"
e = easter(year)
self[e - rd(days=2)] = "Good Friday"
self[e + rd(days=1)] = "Easter Monday"
self[date(year, 5, 1)] = "1 May (Labour Day)"
self[date(year, 12, 25)] = "Christmas Day"
self[date(year, 12, 26)] = "26 December"
class ECB(EuropeanCentralBank):
pass
class TAR(EuropeanCentralBank):
pass
class Czech(HolidayBase):
# https://en.wikipedia.org/wiki/Public_holidays_in_the_Czech_Republic
def __init__(self, **kwargs):
self.country = 'CZ'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, 1, 1)] = "Den obnovy samostatného českého státu" \
if year >= 2000 else \
"Nový rok"
e = easter(year)
if year <= 1951 or year >= 2016:
self[e - rd(days=2)] = "Velký pátek"
self[e + rd(days=1)] = "Velikonoční pondělí"
if year >= 1951:
self[date(year, 5, 1)] = "Svátek práce"
if year >= 1992:
self[date(year, 5, 8)] = "Den vítězství"
elif year >= 1947:
self[date(year, 5, 9)] = "Den vítězství nad hitlerovským fašismem"
if year >= 1951:
self[date(year, 7, 5)] = "Den slovanských věrozvěstů " \
"Cyrila a Metoděje"
self[date(year, 7, 6)] = "Den upálení mistra Jana Husa"
if year >= 2000:
self[date(year, 9, 28)] = "Den české státnosti"
if year >= 1951:
self[date(year, 10, 28)] = "Den vzniku samostatného " \
"československého státu"
if year >= 1990:
self[date(year, 11, 17)] = "Den boje za svobodu a demokracii"
if year >= 1990:
self[date(year, 12, 24)] = "Štědrý den"
if year >= 1951:
self[date(year, 12, 25)] = "1. svátek vánoční"
self[date(year, 12, 26)] = "2. svátek vánoční"
class CZ(Czech):
pass
class Portugal(HolidayBase):
# https://en.wikipedia.org/wiki/Public_holidays_in_Portugal
def __init__(self, **kwargs):
self.country = 'PT'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, 1, 1)] = "Ano Novo"
e = easter(year)
# carnival is no longer a holyday, but some companies let workers off.
# @todo recollect the years in which it was a public holyday
# self[e - rd(days=47)] = "Carnaval"
self[e - rd(days=2)] = "Sexta-feira Santa"
self[e] = "Páscoa"
# Revoked holidays in 2013–2015
if year < 2013 or year > 2015:
self[e + rd(days=60)] = "Corpo de Deus"
self[date(year, 10, 5)] = "Implantação da República"
self[date(year, 11, 1)] = "Dia de Todos os Santos"
self[date(year, 12, 1)] = "Restauração da Independência"
self[date(year, 4, 25)] = "Dia da Liberdade"
self[date(year, 5, 1)] = "Dia do Trabalhador"
self[date(year, 6, 10)] = "Dia de Portugal"
self[date(year, 8, 15)] = "Assunção de Nossa Senhora"
self[date(year, 12, 8)] = "Imaculada Conceição"
self[date(year, 12, 25)] = "Christmas Day"
class PT(Portugal):
pass
class PortugalExt(Portugal):
"""
Adds extended days that most people have as a bonus from their companies:
- Carnival
- the day before and after xmas
- the day before the new year
- Lisbon's city holyday
"""
def _populate(self, year):
super(PortugalExt, self)._populate(year)
e = easter(year)
self[e - rd(days=47)] = "Carnaval"
self[date(year, 12, 24)] = "Vespera de Natal"
self[date(year, 12, 26)] = "26 de Dezembro"
self[date(year, 12, 31)] = "Vespera de Ano novo"
self[date(year, 6, 13)] = "Dia de Santo António"
# TODO add bridging days
# - get holydays that occur on twesday and add monday (-1 day)
# - get holydays that occur on thursday and add friday (+1 day)
class PTE(PortugalExt):
pass
class Netherlands(HolidayBase):
SUNDAY = 6
def __init__(self, **kwargs):
# http://www.iamsterdam.com/en/visiting/plan-your-trip/practical-info/public-holidays
self.country = "NL"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New years
self[date(year, 1, 1)] = "Nieuwjaarsdag"
easter_date = easter(year)
# Easter
self[easter_date] = "Eerste paasdag"
# Second easter day
self[easter_date + rd(days=1)] = "Tweede paasdag"
# Ascension day
self[easter_date + rd(days=39)] = "Hemelvaart"
# Pentecost
self[easter_date + rd(days=49)] = "Eerste Pinksterdag"
# Pentecost monday
self[easter_date + rd(days=50)] = "Tweede Pinksterdag"
# First christmas
self[date(year, 12, 25)] = "Eerste Kerstdag"
# Second christmas
self[date(year, 12, 26)] = "Tweede Kerstdag"
# Liberation day
if year >= 1947 and year <= 2000:
self[date(year, 5, 5)] = "Bevrijdingsdag"
# Kingsday
if year >= 2014:
kings_day = date(year, 4, 27)
if kings_day.weekday() == self.SUNDAY:
kings_day = kings_day - rd(days=1)
self[kings_day] = "Koningsdag"
# Queen's day
if year >= 1891 and year <= 2013:
queens_day = date(year, 4, 30)
if year <= 1948:
queens_day = date(year, 8, 31)
if queens_day.weekday() == self.SUNDAY:
if year < 1980:
queens_day = queens_day + rd(days=1)
else:
queens_day = queens_day - rd(days=1)
self[queens_day] = "Koninginnedag"
class NL(Netherlands):
pass
class Norway(HolidayBase):
"""
Norwegian holidays.
Note that holidays falling on a sunday is "lost",
it will not be moved to another day to make up for the collision.
In Norway, ALL sundays are considered a holiday (https://snl.no/helligdag).
Initialize this class with include_sundays=False
to not include sundays as a holiday.
Primary sources:
https://lovdata.no/dokument/NL/lov/1947-04-26-1
https://no.wikipedia.org/wiki/Helligdager_i_Norge
https://www.timeanddate.no/merkedag/norge/
"""
def __init__(self, include_sundays=True, **kwargs):
"""
:param include_sundays: Whether to consider sundays as a holiday
(which they are in Norway)
:param kwargs:
"""
self.country = "NO"
self.include_sundays = include_sundays
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# Add all the sundays of the year before adding the "real" holidays
if self.include_sundays:
first_day_of_year = date(year, 1, 1)
first_sunday_of_year = first_day_of_year\
+ rd(days=SUNDAY - first_day_of_year.weekday())
cur_date = first_sunday_of_year
while cur_date < date(year+1, 1, 1):
assert cur_date.weekday() == SUNDAY
self[cur_date] = "Søndag"
cur_date += rd(days=7)
# ========= Static holidays =========
self[date(year, 1, 1)] = "Første nyttårsdag"
# Source: https://lovdata.no/dokument/NL/lov/1947-04-26-1
if year >= 1947:
self[date(year, 5, 1)] = "Arbeidernes dag"
self[date(year, 5, 17)] = "Grunnlovsdag"
# According to https://no.wikipedia.org/wiki/F%C3%B8rste_juledag,
# these dates are only valid from year > 1700
# Wikipedia has no source for the statement, so leaving this be for now
self[date(year, 12, 25)] = "Første juledag"
self[date(year, 12, 26)] = "Andre juledag"
# ========= Moving holidays =========
# NOTE: These are probably subject to the same > 1700
# restriction as the above dates. The only source I could find for how
# long Easter has been celebrated in Norway was
# https://www.hf.uio.no/ikos/tjenester/kunnskap/samlinger/norsk-folkeminnesamling/livs-og-arshoytider/paske.html
# which says
# "(...) has been celebrated for over 1000 years (...)" (in Norway)
e = easter(year)
maundy_thursday = e - rd(days=3)
good_friday = e - rd(days=2)
resurrection_sunday = e
easter_monday = e + rd(days=1)
ascension_thursday = e + rd(days=39)
pentecost = e + rd(days=49)
pentecost_day_two = e + rd(days=50)
assert maundy_thursday.weekday() == THURSDAY
assert good_friday.weekday() == FRIDAY
assert resurrection_sunday.weekday() == SUNDAY
assert easter_monday.weekday() == MONDAY
assert ascension_thursday.weekday() == THURSDAY
assert pentecost.weekday() == SUNDAY
assert pentecost_day_two.weekday() == MONDAY
self[maundy_thursday] = "Skjærtorsdag"
self[good_friday] = "Langfredag"
self[resurrection_sunday] = "Første påskedag"
self[easter_monday] = "Andre påskedag"
self[ascension_thursday] = "Kristi himmelfartsdag"
self[pentecost] = "Første pinsedag"
self[pentecost_day_two] = "Andre pinsedag"
class NO(Norway):
pass
|
ayepezv/GAD_ERP
|
refs/heads/master
|
openerp/addons/base/ir/ir_exports.py
|
42
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class IrExports(models.Model):
_name = "ir.exports"
_order = 'name'
name = fields.Char(string='Export Name')
resource = fields.Char(index=True)
export_fields = fields.One2many('ir.exports.line', 'export_id', string='Export ID', copy=True)
class IrExportsLine(models.Model):
_name = 'ir.exports.line'
_order = 'id'
name = fields.Char(string='Field Name')
export_id = fields.Many2one('ir.exports', string='Export', index=True, ondelete='cascade')
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/intangible/pet/shared_gnort_hue.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/intangible/pet/shared_gnort_hue.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
arnaudsj/titanium_mobile
|
refs/heads/master
|
drillbit/sdk_tests/unittest2/test/test_setups.py
|
111
|
import sys
from cStringIO import StringIO
import unittest2
from unittest2.test.support import resultFactory
class TestSetups(unittest2.TestCase):
def getRunner(self):
return unittest2.TextTestRunner(resultclass=resultFactory,
stream=StringIO())
def runTests(self, *cases):
suite = unittest2.TestSuite()
for case in cases:
tests = unittest2.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest2.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest2.TestSuite())
realSuite.addTest(unittest2.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest2.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest2.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest2.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest2.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest2.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest2.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest2.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest2.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest2.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest2.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest2.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest2.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest2.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest2.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest2.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest2.TestSuite((Test1('testOne'),))
second = unittest2.TestSuite((Test1('testTwo'),))
third = unittest2.TestSuite((Test2('testOne'),))
fourth = unittest2.TestSuite((Test2('testTwo'),))
fifth = unittest2.TestSuite((Test3('testOne'),))
sixth = unittest2.TestSuite((Test3('testTwo'),))
suite = unittest2.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest2.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest2.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest2.TestCase):
@classmethod
def setUpClass(cls):
raise unittest2.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest2.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest2.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest2.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest2.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest2.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest2.TestSuite()
# nesting a suite again exposes a bug in the initial implementation
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
self.assertRaisesRegexp(Exception, msg, suite.debug)
|
cellsrg/LOD-table-annotator
|
refs/heads/master
|
jython/Lib/site-packages/pip-9.0.1-py2.7.egg/pip/_vendor/distlib/database.py
|
334
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
logger.debug('Getting requirements from metadata %r', md.todict())
reqts = getattr(md, req_attr)
return set(md.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.finder = finder = resources.finder_for_path(path)
if finder is None:
import pdb; pdb.set_trace ()
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
try:
r = finder.find('REQUESTED')
except AttributeError:
import pdb; pdb.set_trace ()
self.requested = r is not None
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Placeholder for summary'
return Distribution(md)
|
ycaihua/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Tools/pynche/Switchboard.py
|
116
|
"""Switchboard class.
This class is used to coordinate updates among all Viewers. Every Viewer must
conform to the following interface:
- it must include a method called update_yourself() which takes three
arguments; the red, green, and blue values of the selected color.
- When a Viewer selects a color and wishes to update all other Views, it
should call update_views() on the Switchboard object. Note that the
Viewer typically does *not* update itself before calling update_views(),
since this would cause it to get updated twice.
Optionally, Viewers can also implement:
- save_options() which takes an optiondb (a dictionary). Store into this
dictionary any values the Viewer wants to save in the persistent
~/.pynche file. This dictionary is saved using marshal. The namespace
for the keys is ad-hoc; make sure you don't clobber some other Viewer's
keys!
- withdraw() which takes no arguments. This is called when Pynche is
unmapped. All Viewers should implement this.
- colordb_changed() which takes a single argument, an instance of
ColorDB. This is called whenever the color name database is changed and
gives a chance for the Viewers to do something on those events. See
ListViewer for details.
External Viewers are found dynamically. Viewer modules should have names such
as FooViewer.py. If such a named module has a module global variable called
ADDTOVIEW and this variable is true, the Viewer will be added dynamically to
the `View' menu. ADDTOVIEW contains a string which is used as the menu item
to display the Viewer (one kludge: if the string contains a `%', this is used
to indicate that the next character will get an underline in the menu,
otherwise the first character is underlined).
FooViewer.py should contain a class called FooViewer, and its constructor
should take two arguments, an instance of Switchboard, and optionally a Tk
master window.
"""
import sys
import marshal
class Switchboard:
def __init__(self, initfile):
self.__initfile = initfile
self.__colordb = None
self.__optiondb = {}
self.__views = []
self.__red = 0
self.__green = 0
self.__blue = 0
self.__canceled = 0
# read the initialization file
fp = None
if initfile:
try:
try:
fp = open(initfile, 'rb')
self.__optiondb = marshal.load(fp)
if not isinstance(self.__optiondb, dict):
print('Problem reading options from file:', initfile,
file=sys.stderr)
self.__optiondb = {}
except (IOError, EOFError, ValueError):
pass
finally:
if fp:
fp.close()
def add_view(self, view):
self.__views.append(view)
def update_views(self, red, green, blue):
self.__red = red
self.__green = green
self.__blue = blue
for v in self.__views:
v.update_yourself(red, green, blue)
def update_views_current(self):
self.update_views(self.__red, self.__green, self.__blue)
def current_rgb(self):
return self.__red, self.__green, self.__blue
def colordb(self):
return self.__colordb
def set_colordb(self, colordb):
self.__colordb = colordb
for v in self.__views:
if hasattr(v, 'colordb_changed'):
v.colordb_changed(colordb)
self.update_views_current()
def optiondb(self):
return self.__optiondb
def save_views(self):
# save the current color
self.__optiondb['RED'] = self.__red
self.__optiondb['GREEN'] = self.__green
self.__optiondb['BLUE'] = self.__blue
for v in self.__views:
if hasattr(v, 'save_options'):
v.save_options(self.__optiondb)
# save the name of the file used for the color database. we'll try to
# load this first.
self.__optiondb['DBFILE'] = self.__colordb.filename()
fp = None
try:
try:
fp = open(self.__initfile, 'wb')
except IOError:
print('Cannot write options to file:', \
self.__initfile, file=sys.stderr)
else:
marshal.dump(self.__optiondb, fp)
finally:
if fp:
fp.close()
def withdraw_views(self):
for v in self.__views:
if hasattr(v, 'withdraw'):
v.withdraw()
def canceled(self, flag=1):
self.__canceled = flag
def canceled_p(self):
return self.__canceled
|
colinnewell/odoo
|
refs/heads/8.0
|
openerp/osv/__init__.py
|
337
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import osv
import fields
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
valentin-krasontovitsch/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_serviceenginegroup.py
|
29
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_serviceenginegroup
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of ServiceEngineGroup Avi RESTful Object
description:
- This module is used to configure ServiceEngineGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
active_standby:
description:
- Service engines in active/standby mode for ha failover.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
advertise_backend_networks:
description:
- Advertise reach-ability of backend server networks via adc through bgp for default gateway feature.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
aggressive_failure_detection:
description:
- Enable aggressive failover configuration for ha.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
algo:
description:
- In compact placement, virtual services are placed on existing ses until max_vs_per_se limit is reached.
- Enum options - PLACEMENT_ALGO_PACKED, PLACEMENT_ALGO_DISTRIBUTED.
- Default value when not specified in API or module is interpreted by Avi Controller as PLACEMENT_ALGO_PACKED.
allow_burst:
description:
- Allow ses to be created using burst license.
- Field introduced in 17.2.5.
version_added: "2.5"
type: bool
archive_shm_limit:
description:
- Amount of se memory in gb until which shared memory is collected in core archive.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 8.
- Units(GB).
async_ssl:
description:
- Ssl handshakes will be handled by dedicated ssl threads.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.4"
type: bool
async_ssl_threads:
description:
- Number of async ssl threads per se_dp.
- Allowed values are 1-16.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
version_added: "2.4"
auto_rebalance:
description:
- If set, virtual services will be automatically migrated when load on an se is less than minimum or more than maximum thresholds.
- Only alerts are generated when the auto_rebalance is not set.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
auto_rebalance_capacity_per_se:
description:
- Capacities of se for auto rebalance for each criteria.
- Field introduced in 17.2.4.
version_added: "2.5"
auto_rebalance_criteria:
description:
- Set of criteria for se auto rebalance.
- Enum options - SE_AUTO_REBALANCE_CPU, SE_AUTO_REBALANCE_PPS, SE_AUTO_REBALANCE_MBPS, SE_AUTO_REBALANCE_OPEN_CONNS, SE_AUTO_REBALANCE_CPS.
- Field introduced in 17.2.3.
version_added: "2.5"
auto_rebalance_interval:
description:
- Frequency of rebalance, if 'auto rebalance' is enabled.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
- Units(SEC).
auto_redistribute_active_standby_load:
description:
- Redistribution of virtual services from the takeover se to the replacement se can cause momentary traffic loss.
- If the auto-redistribute load option is left in its default off state, any desired rebalancing requires calls to rest api.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
buffer_se:
description:
- Excess service engine capacity provisioned for ha failover.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
cloud_ref:
description:
- It is a reference to an object of type cloud.
connection_memory_percentage:
description:
- Percentage of memory for connection state.
- This will come at the expense of memory used for http in-memory cache.
- Allowed values are 10-90.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
- Units(PERCENT).
cpu_reserve:
description:
- Boolean flag to set cpu_reserve.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
cpu_socket_affinity:
description:
- Allocate all the cpu cores for the service engine virtual machines on the same cpu socket.
- Applicable only for vcenter cloud.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
custom_securitygroups_data:
description:
- Custom security groups to be associated with data vnics for se instances in openstack and aws clouds.
- Field introduced in 17.1.3.
custom_securitygroups_mgmt:
description:
- Custom security groups to be associated with management vnic for se instances in openstack and aws clouds.
- Field introduced in 17.1.3.
custom_tag:
description:
- Custom tag will be used to create the tags for se instance in aws.
- Note this is not the same as the prefix for se name.
dedicated_dispatcher_core:
description:
- Dedicate the core that handles packet receive/transmit from the network to just the dispatching function.
- Don't use it for tcp/ip and ssl functions.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
description:
description:
- User defined description for the object.
disable_csum_offloads:
description:
- Stop using tcp/udp and ip checksum offload features of nics.
- Field introduced in 17.1.14, 17.2.5.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
disable_gro:
description:
- Disable generic receive offload (gro) in dpdk poll-mode driver packet receive path.
- Gro is on by default on nics that do not support lro (large receive offload) or do not gain performance boost from lro.
- Field introduced in 17.2.5.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.5"
type: bool
disable_tso:
description:
- Disable tcp segmentation offload (tso) in dpdk poll-mode driver packet transmit path.
- Tso is on by default on nics that support it.
- Field introduced in 17.2.5.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.5"
type: bool
disk_per_se:
description:
- Amount of disk space for each of the service engine virtual machines.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
- Units(GB).
distribute_load_active_standby:
description:
- Use both the active and standby service engines for virtual service placement in the legacy active standby ha mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
enable_hsm_priming:
description:
- (this is a beta feature).
- Enable hsm key priming.
- If enabled, key handles on the hsm will be synced to se before processing client connections.
- Field introduced in 17.2.7.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.6"
type: bool
enable_routing:
description:
- Enable routing for this serviceenginegroup .
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
enable_vip_on_all_interfaces:
description:
- Enable vip on all interfaces of se.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
enable_vmac:
description:
- Use virtual mac address for interfaces on which floating interface ips are placed.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
extra_config_multiplier:
description:
- Multiplier for extra config to support large vs/pool config.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
extra_shared_config_memory:
description:
- Extra config memory to support large geo db configuration.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
- Units(MB).
floating_intf_ip:
description:
- If serviceenginegroup is configured for legacy 1+1 active standby ha mode, floating ip's will be advertised only by the active se in the pair.
- Virtual services in this group must be disabled/enabled for any changes to the floating ip's to take effect.
- Only active se hosting vs tagged with active standby se 1 tag will advertise this floating ip when manual load distribution is enabled.
floating_intf_ip_se_2:
description:
- If serviceenginegroup is configured for legacy 1+1 active standby ha mode, floating ip's will be advertised only by the active se in the pair.
- Virtual services in this group must be disabled/enabled for any changes to the floating ip's to take effect.
- Only active se hosting vs tagged with active standby se 2 tag will advertise this floating ip when manual load distribution is enabled.
flow_table_new_syn_max_entries:
description:
- Maximum number of flow table entries that have not completed tcp three-way handshake yet.
- Field introduced in 17.2.5.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
version_added: "2.5"
ha_mode:
description:
- High availability mode for all the virtual services using this service engine group.
- Enum options - HA_MODE_SHARED_PAIR, HA_MODE_SHARED, HA_MODE_LEGACY_ACTIVE_STANDBY.
- Default value when not specified in API or module is interpreted by Avi Controller as HA_MODE_SHARED.
hardwaresecuritymodulegroup_ref:
description:
- It is a reference to an object of type hardwaresecuritymodulegroup.
hm_on_standby:
description:
- Enable active health monitoring from the standby se for all placed virtual services.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
host_attribute_key:
description:
- Key of a (key, value) pair identifying a label for a set of nodes usually in container clouds.
- Needs to be specified together with host_attribute_value.
- Ses can be configured differently including ha modes across different se groups.
- May also be used for isolation between different classes of virtualservices.
- Virtualservices' se group may be specified via annotations/labels.
- A openshift/kubernetes namespace maybe annotated with a matching se group label as openshift.io/node-selector apptype=prod.
- When multiple se groups are used in a cloud with host attributes specified,just a single se group can exist as a match-all se group without a
- host_attribute_key.
host_attribute_value:
description:
- Value of a (key, value) pair identifying a label for a set of nodes usually in container clouds.
- Needs to be specified together with host_attribute_key.
host_gateway_monitor:
description:
- Enable the host gateway monitor when service engine is deployed as docker container.
- Disabled by default.
- Field introduced in 17.2.4.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
hypervisor:
description:
- Override default hypervisor.
- Enum options - DEFAULT, VMWARE_ESX, KVM, VMWARE_VSAN, XEN.
ignore_rtt_threshold:
description:
- Ignore rtt samples if it is above threshold.
- Field introduced in 17.1.6,17.2.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 5000.
- Units(MILLISECONDS).
version_added: "2.5"
ingress_access_data:
description:
- Program se security group ingress rules to allow vip data access from remote cidr type.
- Enum options - SG_INGRESS_ACCESS_NONE, SG_INGRESS_ACCESS_ALL, SG_INGRESS_ACCESS_VPC.
- Field introduced in 17.1.5.
- Default value when not specified in API or module is interpreted by Avi Controller as SG_INGRESS_ACCESS_ALL.
version_added: "2.5"
ingress_access_mgmt:
description:
- Program se security group ingress rules to allow ssh/icmp management access from remote cidr type.
- Enum options - SG_INGRESS_ACCESS_NONE, SG_INGRESS_ACCESS_ALL, SG_INGRESS_ACCESS_VPC.
- Field introduced in 17.1.5.
- Default value when not specified in API or module is interpreted by Avi Controller as SG_INGRESS_ACCESS_ALL.
version_added: "2.5"
instance_flavor:
description:
- Instance/flavor type for se instance.
iptables:
description:
- Iptable rules.
least_load_core_selection:
description:
- Select core with least load for new flow.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
license_tier:
description:
- Specifies the license tier which would be used.
- This field by default inherits the value from cloud.
- Enum options - ENTERPRISE_16, ENTERPRISE_18.
- Field introduced in 17.2.5.
version_added: "2.5"
license_type:
description:
- If no license type is specified then default license enforcement for the cloud type is chosen.
- Enum options - LIC_BACKEND_SERVERS, LIC_SOCKETS, LIC_CORES, LIC_HOSTS, LIC_SE_BANDWIDTH.
- Field introduced in 17.2.5.
version_added: "2.5"
log_disksz:
description:
- Maximum disk capacity (in mb) to be allocated to an se.
- This is exclusively used for debug and log data.
- Default value when not specified in API or module is interpreted by Avi Controller as 10000.
- Units(MB).
max_cpu_usage:
description:
- When cpu usage on an se exceeds this threshold, virtual services hosted on this se may be rebalanced to other ses to reduce load.
- A new se may be created as part of this process.
- Allowed values are 40-90.
- Default value when not specified in API or module is interpreted by Avi Controller as 80.
- Units(PERCENT).
max_scaleout_per_vs:
description:
- Maximum number of active service engines for the virtual service.
- Allowed values are 1-64.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
max_se:
description:
- Maximum number of services engines in this group.
- Allowed values are 0-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
max_vs_per_se:
description:
- Maximum number of virtual services that can be placed on a single service engine.
- East west virtual services are excluded from this limit.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
mem_reserve:
description:
- Boolean flag to set mem_reserve.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
memory_per_se:
description:
- Amount of memory for each of the service engine virtual machines.
- Default value when not specified in API or module is interpreted by Avi Controller as 2048.
mgmt_network_ref:
description:
- Management network to use for avi service engines.
- It is a reference to an object of type network.
mgmt_subnet:
description:
- Management subnet to use for avi service engines.
min_cpu_usage:
description:
- When cpu usage on an se falls below the minimum threshold, virtual services hosted on the se may be consolidated onto other underutilized ses.
- After consolidation, unused service engines may then be eligible for deletion.
- Allowed values are 20-60.
- Default value when not specified in API or module is interpreted by Avi Controller as 30.
- Units(PERCENT).
min_scaleout_per_vs:
description:
- Minimum number of active service engines for the virtual service.
- Allowed values are 1-64.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
name:
description:
- Name of the object.
required: true
non_significant_log_throttle:
description:
- This setting limits the number of non-significant logs generated per second per core on this se.
- Default is 100 logs per second.
- Set it to zero (0) to disable throttling.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
- Units(PER_SECOND).
num_flow_cores_sum_changes_to_ignore:
description:
- Number of changes in num flow cores sum to ignore.
- Default value when not specified in API or module is interpreted by Avi Controller as 8.
openstack_availability_zone:
description:
- Field deprecated in 17.1.1.
openstack_availability_zones:
description:
- Field introduced in 17.1.1.
openstack_mgmt_network_name:
description:
- Avi management network name.
openstack_mgmt_network_uuid:
description:
- Management network uuid.
os_reserved_memory:
description:
- Amount of extra memory to be reserved for use by the operating system on a service engine.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
per_app:
description:
- Per-app se mode is designed for deploying dedicated load balancers per app (vs).
- In this mode, each se is limited to a max of 2 vss.
- Vcpus in per-app ses count towards licensing usage at 25% rate.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
placement_mode:
description:
- If placement mode is 'auto', virtual services are automatically placed on service engines.
- Enum options - PLACEMENT_MODE_AUTO.
- Default value when not specified in API or module is interpreted by Avi Controller as PLACEMENT_MODE_AUTO.
realtime_se_metrics:
description:
- Enable or disable real time se metrics.
se_bandwidth_type:
description:
- Select the se bandwidth for the bandwidth license.
- Enum options - SE_BANDWIDTH_UNLIMITED, SE_BANDWIDTH_25M, SE_BANDWIDTH_200M, SE_BANDWIDTH_1000M, SE_BANDWIDTH_10000M.
- Field introduced in 17.2.5.
version_added: "2.5"
se_deprovision_delay:
description:
- Duration to preserve unused service engine virtual machines before deleting them.
- If traffic to a virtual service were to spike up abruptly, this se would still be available to be utilized again rather than creating a new se.
- If this value is set to 0, controller will never delete any ses and administrator has to manually cleanup unused ses.
- Allowed values are 0-525600.
- Default value when not specified in API or module is interpreted by Avi Controller as 120.
- Units(MIN).
se_dos_profile:
description:
- Dosthresholdprofile settings for serviceenginegroup.
se_ipc_udp_port:
description:
- Udp port for se_dp ipc in docker bridge mode.
- Field introduced in 17.1.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 1500.
version_added: "2.4"
se_name_prefix:
description:
- Prefix to use for virtual machine name of service engines.
- Default value when not specified in API or module is interpreted by Avi Controller as Avi.
se_probe_port:
description:
- Tcp port on se where echo service will be run.
- Field introduced in 17.2.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 7.
version_added: "2.5"
se_remote_punt_udp_port:
description:
- Udp port for punted packets in docker bridge mode.
- Field introduced in 17.1.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 1501.
version_added: "2.4"
se_sb_dedicated_core:
description:
- Sideband traffic will be handled by a dedicated core.
- Field introduced in 16.5.2, 17.1.9, 17.2.3.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
se_sb_threads:
description:
- Number of sideband threads per se.
- Allowed values are 1-128.
- Field introduced in 16.5.2, 17.1.9, 17.2.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
version_added: "2.5"
se_thread_multiplier:
description:
- Multiplier for se threads based on vcpu.
- Allowed values are 1-10.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
se_tunnel_mode:
description:
- Determines if dsr from secondary se is active or not 0 automatically determine based on hypervisor type.
- 1 disable dsr unconditionally.
- ~[0,1] enable dsr unconditionally.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
se_tunnel_udp_port:
description:
- Udp port for tunneled packets from secondary to primary se in docker bridge mode.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 1550.
se_udp_encap_ipc:
description:
- Determines if se-se ipc messages are encapsulated in an udp header 0 automatically determine based on hypervisor type.
- 1 use udp encap unconditionally.
- ~[0,1] don't use udp encap.
- Field introduced in 17.1.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
version_added: "2.4"
se_vs_hb_max_pkts_in_batch:
description:
- Maximum number of aggregated vs heartbeat packets to send in a batch.
- Allowed values are 1-256.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 8.
se_vs_hb_max_vs_in_pkt:
description:
- Maximum number of virtualservices for which heartbeat messages are aggregated in one packet.
- Allowed values are 1-1024.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 256.
service_ip_subnets:
description:
- Subnets assigned to the se group.
- Required for vs group placement.
- Field introduced in 17.1.1.
significant_log_throttle:
description:
- This setting limits the number of significant logs generated per second per core on this se.
- Default is 100 logs per second.
- Set it to zero (0) to disable throttling.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
- Units(PER_SECOND).
tenant_ref:
description:
- It is a reference to an object of type tenant.
udf_log_throttle:
description:
- This setting limits the number of udf logs generated per second per core on this se.
- Udf logs are generated due to the configured client log filters or the rules with logging enabled.
- Default is 100 logs per second.
- Set it to zero (0) to disable throttling.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
- Units(PER_SECOND).
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vcenter_clusters:
description:
- Vcenterclusters settings for serviceenginegroup.
vcenter_datastore_mode:
description:
- Enum options - vcenter_datastore_any, vcenter_datastore_local, vcenter_datastore_shared.
- Default value when not specified in API or module is interpreted by Avi Controller as VCENTER_DATASTORE_ANY.
vcenter_datastores:
description:
- List of vcenterdatastore.
vcenter_datastores_include:
description:
- Boolean flag to set vcenter_datastores_include.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
vcenter_folder:
description:
- Folder to place all the service engine virtual machines in vcenter.
- Default value when not specified in API or module is interpreted by Avi Controller as AviSeFolder.
vcenter_hosts:
description:
- Vcenterhosts settings for serviceenginegroup.
vcpus_per_se:
description:
- Number of vcpus for each of the service engine virtual machines.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
vs_host_redundancy:
description:
- Ensure primary and secondary service engines are deployed on different physical hosts.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
vs_scalein_timeout:
description:
- Time to wait for the scaled in se to drain existing flows before marking the scalein done.
- Default value when not specified in API or module is interpreted by Avi Controller as 30.
- Units(SEC).
vs_scalein_timeout_for_upgrade:
description:
- During se upgrade, time to wait for the scaled-in se to drain existing flows before marking the scalein done.
- Default value when not specified in API or module is interpreted by Avi Controller as 30.
- Units(SEC).
vs_scaleout_timeout:
description:
- Time to wait for the scaled out se to become ready before marking the scaleout done.
- Default value when not specified in API or module is interpreted by Avi Controller as 30.
- Units(SEC).
vss_placement:
description:
- If set, virtual services will be placed on only a subset of the cores of an se.
- Field introduced in 17.2.5.
version_added: "2.5"
waf_mempool:
description:
- Enable memory pool for waf.
- Field introduced in 17.2.3.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.5"
type: bool
waf_mempool_size:
description:
- Memory pool size used for waf.
- Field introduced in 17.2.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 64.
- Units(KB).
version_added: "2.5"
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create ServiceEngineGroup object
avi_serviceenginegroup:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_serviceenginegroup
"""
RETURN = '''
obj:
description: ServiceEngineGroup (api/serviceenginegroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
active_standby=dict(type='bool',),
advertise_backend_networks=dict(type='bool',),
aggressive_failure_detection=dict(type='bool',),
algo=dict(type='str',),
allow_burst=dict(type='bool',),
archive_shm_limit=dict(type='int',),
async_ssl=dict(type='bool',),
async_ssl_threads=dict(type='int',),
auto_rebalance=dict(type='bool',),
auto_rebalance_capacity_per_se=dict(type='list',),
auto_rebalance_criteria=dict(type='list',),
auto_rebalance_interval=dict(type='int',),
auto_redistribute_active_standby_load=dict(type='bool',),
buffer_se=dict(type='int',),
cloud_ref=dict(type='str',),
connection_memory_percentage=dict(type='int',),
cpu_reserve=dict(type='bool',),
cpu_socket_affinity=dict(type='bool',),
custom_securitygroups_data=dict(type='list',),
custom_securitygroups_mgmt=dict(type='list',),
custom_tag=dict(type='list',),
dedicated_dispatcher_core=dict(type='bool',),
description=dict(type='str',),
disable_csum_offloads=dict(type='bool',),
disable_gro=dict(type='bool',),
disable_tso=dict(type='bool',),
disk_per_se=dict(type='int',),
distribute_load_active_standby=dict(type='bool',),
enable_hsm_priming=dict(type='bool',),
enable_routing=dict(type='bool',),
enable_vip_on_all_interfaces=dict(type='bool',),
enable_vmac=dict(type='bool',),
extra_config_multiplier=dict(type='float',),
extra_shared_config_memory=dict(type='int',),
floating_intf_ip=dict(type='list',),
floating_intf_ip_se_2=dict(type='list',),
flow_table_new_syn_max_entries=dict(type='int',),
ha_mode=dict(type='str',),
hardwaresecuritymodulegroup_ref=dict(type='str',),
hm_on_standby=dict(type='bool',),
host_attribute_key=dict(type='str',),
host_attribute_value=dict(type='str',),
host_gateway_monitor=dict(type='bool',),
hypervisor=dict(type='str',),
ignore_rtt_threshold=dict(type='int',),
ingress_access_data=dict(type='str',),
ingress_access_mgmt=dict(type='str',),
instance_flavor=dict(type='str',),
iptables=dict(type='list',),
least_load_core_selection=dict(type='bool',),
license_tier=dict(type='str',),
license_type=dict(type='str',),
log_disksz=dict(type='int',),
max_cpu_usage=dict(type='int',),
max_scaleout_per_vs=dict(type='int',),
max_se=dict(type='int',),
max_vs_per_se=dict(type='int',),
mem_reserve=dict(type='bool',),
memory_per_se=dict(type='int',),
mgmt_network_ref=dict(type='str',),
mgmt_subnet=dict(type='dict',),
min_cpu_usage=dict(type='int',),
min_scaleout_per_vs=dict(type='int',),
name=dict(type='str', required=True),
non_significant_log_throttle=dict(type='int',),
num_flow_cores_sum_changes_to_ignore=dict(type='int',),
openstack_availability_zone=dict(type='str',),
openstack_availability_zones=dict(type='list',),
openstack_mgmt_network_name=dict(type='str',),
openstack_mgmt_network_uuid=dict(type='str',),
os_reserved_memory=dict(type='int',),
per_app=dict(type='bool',),
placement_mode=dict(type='str',),
realtime_se_metrics=dict(type='dict',),
se_bandwidth_type=dict(type='str',),
se_deprovision_delay=dict(type='int',),
se_dos_profile=dict(type='dict',),
se_ipc_udp_port=dict(type='int',),
se_name_prefix=dict(type='str',),
se_probe_port=dict(type='int',),
se_remote_punt_udp_port=dict(type='int',),
se_sb_dedicated_core=dict(type='bool',),
se_sb_threads=dict(type='int',),
se_thread_multiplier=dict(type='int',),
se_tunnel_mode=dict(type='int',),
se_tunnel_udp_port=dict(type='int',),
se_udp_encap_ipc=dict(type='int',),
se_vs_hb_max_pkts_in_batch=dict(type='int',),
se_vs_hb_max_vs_in_pkt=dict(type='int',),
service_ip_subnets=dict(type='list',),
significant_log_throttle=dict(type='int',),
tenant_ref=dict(type='str',),
udf_log_throttle=dict(type='int',),
url=dict(type='str',),
uuid=dict(type='str',),
vcenter_clusters=dict(type='dict',),
vcenter_datastore_mode=dict(type='str',),
vcenter_datastores=dict(type='list',),
vcenter_datastores_include=dict(type='bool',),
vcenter_folder=dict(type='str',),
vcenter_hosts=dict(type='dict',),
vcpus_per_se=dict(type='int',),
vs_host_redundancy=dict(type='bool',),
vs_scalein_timeout=dict(type='int',),
vs_scalein_timeout_for_upgrade=dict(type='int',),
vs_scaleout_timeout=dict(type='int',),
vss_placement=dict(type='dict',),
waf_mempool=dict(type='bool',),
waf_mempool_size=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'serviceenginegroup',
set([]))
if __name__ == '__main__':
main()
|
bmya/addons-yelizariev
|
refs/heads/8.0
|
product_custom/__init__.py
|
2148
|
import models
|
pgiri/asyncoro
|
refs/heads/master
|
examples/tut_coros.py
|
1
|
#!/usr/bin/env python
# program for creating coroutines (asynchronous concurrent
# programming); see http://asyncoro.sourceforge.net/tutorial.html for
# details.
import sys, random, time
import asyncoro
def coro_proc(n, coro=None):
s = random.uniform(0.5, 3)
print('%f: coroutine %d sleeping for %f seconds' % (time.time(), n, s))
yield coro.sleep(s)
print('%f: coroutine %d terminating' % (time.time(), n))
# create 10 clients
for i in range(10):
asyncoro.Coro(coro_proc, i)
|
vbelakov/h2o
|
refs/heads/master
|
py/testdir_multi_jvm/test_exec2_sum_cols.py
|
9
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_util, h2o_exec
zeroList = [
'Result0 = 0',
]
# the first column should use this
exprList = [
'Result<n> = sum(<keyX>[,<col1>])',
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(2)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
# wait while I inspect things
# time.sleep(1500)
h2o.tear_down_cloud()
def test_exec2_sum(self):
print "Replicating covtype.data by 2x for results comparison to 1x"
filename1x = 'covtype.data'
pathname1x = h2i.find_folder_and_filename('home-0xdiag-datasets', 'standard/covtype.data', returnFullPath=True)
filename2x = "covtype_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
h2o_util.file_cat(pathname1x, pathname1x, pathname2x)
csvAll = [
(pathname1x, "cA", 5, 1),
(pathname2x, "cB", 5, 2),
(pathname2x, "cC", 5, 2),
]
h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
firstDone = False
for (csvPathname, hex_key, timeoutSecs, resultMult) in csvAll:
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=2000)
print "Parse result['Key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvPathname
h2o_exec.exec_zero_list(zeroList)
colResultList = h2o_exec.exec_expr_list_across_cols(lenNodes, exprList, hex_key, maxCol=54,
timeoutSecs=timeoutSecs)
print "\ncolResultList", colResultList
if not firstDone:
colResultList0 = list(colResultList)
good = [float(x) for x in colResultList0]
firstDone = True
else:
print "\n", colResultList0, "\n", colResultList
# create the expected answer...i.e. N * first
compare = [float(x)/resultMult for x in colResultList]
print "\n", good, "\n", compare
self.assertEqual(good, compare, 'compare is not equal to good (first try * resultMult)')
if __name__ == '__main__':
h2o.unit_main()
|
kenshay/ImageScripter
|
refs/heads/master
|
ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/telemetry/internal/platform/profiler/v8_profiler.py
|
7
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import tempfile
from telemetry.internal.platform import profiler
class V8Profiler(profiler.Profiler):
_V8_ARG = '--js-flags=--logfile=%s --prof --log-timer-events'
@classmethod
def name(cls):
return 'v8'
@classmethod
def is_supported(cls, browser_type):
return not browser_type.startswith('cros')
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
if browser_type.startswith('android'):
dump_file = '/data/local/tmp/v8-profile.log'
else:
dump_file = tempfile.mkstemp()[1]
options.AppendExtraBrowserArgs([cls._V8_ARG % dump_file, '--no-sandbox'])
def CollectProfile(self):
# Find output filename from browser argument.
for i in self._browser_backend.browser_options.extra_browser_args:
match = re.match(self._V8_ARG % r'(\S+)', i)
if match:
output_file = match.groups(0)[0]
assert output_file
# On Android pull the output file to the host.
if self._platform_backend.GetOSName() == 'android':
host_output_file = '%s.log' % self._output_path
self._browser_backend.device.PullFile(output_file, host_output_file)
# Clean the device
self._browser_backend.device.RemovePath(output_file)
output_file = host_output_file
print 'V8 profile saved as %s' % output_file
print 'To view, open in ' \
'http://v8.googlecode.com/svn/trunk/tools/tick-processor.html'
return [output_file]
|
MountainWei/nova
|
refs/heads/master
|
nova/tests/unit/virt/xenapi/test_xenapi.py
|
13
|
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for XenAPI."""
import ast
import base64
import contextlib
import copy
import functools
import os
import re
import uuid
import mock
from mox3 import mox
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
import testtools
from nova.compute import api as compute_api
from nova.compute import arch
from nova.compute import flavors
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova import test
from nova.tests.unit.db import fakes as db_fakes
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit import fake_processutils
import nova.tests.unit.image.fake as fake_image
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_aggregate
from nova.tests.unit import utils as test_utils
from nova.tests.unit.virt.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import host
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
CONF.import_opt('login_timeout', 'nova.virt.xenapi.client.session',
group="xenserver")
IMAGE_MACHINE = '1'
IMAGE_KERNEL = '2'
IMAGE_RAMDISK = '3'
IMAGE_RAW = '4'
IMAGE_VHD = '5'
IMAGE_ISO = '6'
IMAGE_IPXE_ISO = '7'
IMAGE_FROM_VOLUME = '8'
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
'image_meta': {'name': 'fakemachine', 'size': 0,
'disk_format': 'ami',
'container_format': 'ami',
'id': 'fake-image'},
},
IMAGE_KERNEL: {
'image_meta': {'name': 'fakekernel', 'size': 0,
'disk_format': 'aki',
'container_format': 'aki',
'id': 'fake-kernel'},
},
IMAGE_RAMDISK: {
'image_meta': {'name': 'fakeramdisk', 'size': 0,
'disk_format': 'ari',
'container_format': 'ari',
'id': 'fake-ramdisk'},
},
IMAGE_RAW: {
'image_meta': {'name': 'fakeraw', 'size': 0,
'disk_format': 'raw',
'container_format': 'bare',
'id': 'fake-image-raw'},
},
IMAGE_VHD: {
'image_meta': {'name': 'fakevhd', 'size': 0,
'disk_format': 'vhd',
'container_format': 'ovf',
'id': 'fake-image-vhd'},
},
IMAGE_ISO: {
'image_meta': {'name': 'fakeiso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare',
'id': 'fake-image-iso'},
},
IMAGE_IPXE_ISO: {
'image_meta': {'name': 'fake_ipxe_iso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare',
'id': 'fake-image-pxe',
'properties': {'ipxe_boot': 'true'}},
},
IMAGE_FROM_VOLUME: {
'image_meta': {'name': 'fake_ipxe_iso',
'id': 'fake-image-volume',
'properties': {'foo': 'bar'}},
},
}
def get_session():
return xenapi_session.XenAPISession('test_url', 'root', 'test_pass')
def set_image_fixtures():
image_service = fake_image.FakeImageService()
image_service.images.clear()
for image_id, image_meta in IMAGE_FIXTURES.items():
image_meta = image_meta['image_meta']
image_meta['id'] = image_id
image_service.create(None, image_meta)
def get_fake_device_info():
# FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
# can be removed from the dict when LP bug #1087308 is fixed
fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
fake = {'block_device_mapping':
[{'connection_info': {'driver_volume_type': 'iscsi',
'data': {'sr_uuid': 'falseSR',
'introduce_sr_keys': ['sr_type'],
'sr_type': 'iscsi',
'vdi_uuid': fake_vdi_uuid,
'target_discovered': False,
'target_iqn': 'foo_iqn:foo_volid',
'target_portal': 'localhost:3260',
'volume_id': 'foo_volid',
'target_lun': 1,
'auth_password': 'my-p@55w0rd',
'auth_username': 'johndoe',
'auth_method': u'CHAP'}, },
'mount_device': 'vda',
'delete_on_termination': False}, ],
'root_device_name': '/dev/sda',
'ephemerals': [],
'swap': None, }
return fake
def stub_vm_utils_with_vdi_attached_here(function):
"""vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_image_download(*args, **kwargs):
pass
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_image_download = fake_image._FakeImageService.download
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
fake_image._FakeImageService.download = fake_image_download
return function(self, *args, **kwargs)
finally:
fake_image._FakeImageService.download = orig_image_download
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
def create_instance_with_system_metadata(context, instance_values):
inst = objects.Instance(context=context,
system_metadata={})
for k, v in instance_values.items():
setattr(inst, k, v)
inst.flavor = objects.Flavor.get_by_id(context,
instance_values['instance_type_id'])
inst.old_flavor = None
inst.new_flavor = None
inst.create()
inst.pci_devices = objects.PciDeviceList(objects=[])
return inst
class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.instance = fake_instance.fake_db_instance(name='foo')
@classmethod
def _make_connection_info(cls):
target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
return {'driver_volume_type': 'iscsi',
'data': {'volume_id': 1,
'target_iqn': target_iqn,
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_username': 'username',
'auth_password': 'password'}}
def test_attach_volume(self):
# This shows how to test Ops classes' methods.
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
conn_info = self._make_connection_info()
self.assertIsNone(
conn.attach_volume(None, conn_info, self.instance, '/dev/sdc'))
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
# This shows how to test when exceptions are raised.
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(self.instance['name'], 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
None, {'driver_volume_type': 'nonexist'},
self.instance, '/dev/sdc')
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIVMTestCase(stubs.XenAPITestBase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.network = importutils.import_object(CONF.network_manager)
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', 'fake_br1')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stub_out_vm_methods(self.stubs)
fake_processutils.stub_out_processutils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn._session.is_local_connection = False
fake_image.stub_out_image_service(self.stubs)
set_image_fixtures()
stubs.stubout_image_service_download(self.stubs)
stubs.stubout_stream_disk(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
name_label = "fakenamelabel"
disk_type = "fakedisktype"
virtual_size = 777
return vm_utils.create_vdi(
session, sr_ref, instance, name_label, disk_type,
virtual_size)
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def tearDown(self):
fake_image.FakeImageService_reset()
super(XenAPIVMTestCase, self).tearDown()
def test_init_host(self):
session = get_session()
vm = vm_utils._get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
vbd0 = xenapi_fake.create_vbd(vm, vdi0)
# Instance VDI
vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
other_config={'nova_instance_uuid': 'aaaa'})
xenapi_fake.create_vbd(vm, vdi1)
# Only looks like instance VDI
vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
vbd2 = xenapi_fake.create_vbd(vm, vdi2)
self.conn.init_host(None)
self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
def test_instance_exists(self):
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), 'foo').AndReturn(True)
self.mox.ReplayAll()
self.stubs.Set(objects.Instance, 'name', 'foo')
instance = objects.Instance(uuid='fake-uuid')
self.assertTrue(self.conn.instance_exists(instance))
def test_instance_not_exists(self):
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), 'bar').AndReturn(None)
self.mox.ReplayAll()
self.stubs.Set(objects.Instance, 'name', 'bar')
instance = objects.Instance(uuid='fake-uuid')
self.assertFalse(self.conn.instance_exists(instance))
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEqual(instances, [])
def test_list_instance_uuids_0(self):
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(instance_uuids, [])
def test_list_instance_uuids(self):
uuids = []
for x in range(1, 4):
instance = self._create_instance()
uuids.append(instance['uuid'])
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(len(uuids), len(instance_uuids))
self.assertEqual(set(uuids), set(instance_uuids))
def test_get_rrd_server(self):
self.flags(connection_url='myscheme://myaddress/',
group='xenserver')
server_info = vm_utils._get_rrd_server()
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
expected_raw_diagnostics = {
'vbd_xvdb_write': '0.0',
'memory_target': '4294967296.0000',
'memory_internal_free': '1415564.0000',
'memory': '4294967296.0000',
'vbd_xvda_write': '0.0',
'cpu0': '0.0042',
'vif_0_tx': '287.4134',
'vbd_xvda_read': '0.0',
'vif_0_rx': '1816.0144',
'vif_2_rx': '0.0',
'vif_2_tx': '0.0',
'vbd_xvdb_read': '0.0',
'last_update': '1328795567',
}
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
expected = self.expected_raw_diagnostics
instance = self._create_instance()
actual = self.conn.get_diagnostics(instance)
self.assertThat(actual, matchers.DictMatches(expected))
def test_get_instance_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
expected = {
'config_drive': False,
'state': 'running',
'driver': 'xenapi',
'version': '1.0',
'uptime': 0,
'hypervisor_os': None,
'cpu_details': [{'time': 0}, {'time': 0},
{'time': 0}, {'time': 0}],
'nic_details': [{'mac_address': '00:00:00:00:00:00',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 0,
'rx_packets': 0,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 0,
'read_requests': 0,
'write_bytes': 0,
'write_requests': 0}],
'memory_details': {'maximum': 8192, 'used': 0}}
instance = self._create_instance()
actual = self.conn.get_instance_diagnostics(instance)
self.assertEqual(expected, actual.serialize())
def test_get_vnc_console(self):
instance = self._create_instance(obj=True)
session = get_session()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(session, instance['name'])
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console.internal_access_path.split('&')[0]
expected_path = "/console?ref=%s" % str(vm_ref)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_for_rescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
'Running')
# Set instance state to rescued
instance['vm_state'] = 'rescued'
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console.internal_access_path.split('&')[0]
expected_path = "/console?ref=%s" % str(rescue_vm)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_instance_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'building'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotFound,
conn.get_vnc_console, self.context, instance)
def test_get_vnc_console_rescue_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'rescued'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotReady,
conn.get_vnc_console, self.context, instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False,
osvol=False):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
image_id = "my_snapshot_id"
self.assertRaises(exception.NovaException, self.conn.snapshot,
self.context, instance, image_id,
lambda *args, **kwargs: None)
def test_instance_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
image_id = "my_snapshot_id"
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
self.fake_upload_called = False
def fake_image_upload(_self, ctx, session, inst, img_id, vdi_uuids):
self.fake_upload_called = True
self.assertEqual(ctx, self.context)
self.assertEqual(inst, instance)
self.assertIsInstance(vdi_uuids, list)
self.assertEqual(img_id, image_id)
self.stubs.Set(glance.GlanceStore, 'upload_image',
fake_image_upload)
self.conn.snapshot(self.context, instance, image_id,
func_call_matcher.call)
# Ensure VM was torn down
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEqual(vm_labels, [instance['name']])
# Ensure VBDs were torn down
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEqual(vbd_labels, [instance['name']])
# Ensure task states changed in correct order
self.assertIsNone(func_call_matcher.match())
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assertFalse(name_label.endswith('snapshot'))
self.assertTrue(self.fake_upload_called)
def create_vm_record(self, conn, os_type, name):
instances = conn.list_instances()
self.assertEqual(instances, [name])
# Get Nova record for VM
vm_info = conn.get_info({'name': name})
# Get XenAPI record for VM
vms = [rec for ref, rec
in six.iteritems(xenapi_fake.get_all_records('VM'))
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, instance_type_id, check_injection):
flavor = db.flavor_get(conn, instance_type_id)
mem_kib = long(flavor['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = flavor['vcpus']
vcpu_weight = flavor['vcpu_weight']
self.assertEqual(self.vm_info.max_mem_kb, mem_kib)
self.assertEqual(self.vm_info.mem_kb, mem_kib)
self.assertEqual(self.vm['memory_static_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
if vcpu_weight is None:
self.assertEqual(self.vm['VCPUs_params'], {})
else:
self.assertEqual(self.vm['VCPUs_params'],
{'weight': str(vcpu_weight), 'cap': '0'})
# Check that the VM is running according to Nova
self.assertEqual(self.vm_info.state, power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEqual(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
self.assertNotIn('vm-data/hostname', xenstore_data)
key = 'vm-data/networking/DEADBEEF0001'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertJsonEqual({'broadcast': '192.168.1.255',
'dns': ['192.168.1.4', '192.168.1.3'],
'gateway': '192.168.1.1',
'gateway_v6': '2001:db8:0:1::1',
'ip6s': [{'enabled': '1',
'ip': '2001:db8:0:1:dcad:beff:feef:1',
'netmask': 64,
'gateway': '2001:db8:0:1::1'}],
'ips': [{'enabled': '1',
'ip': '192.168.1.100',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'},
{'enabled': '1',
'ip': '192.168.1.101',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'}],
'label': 'test1',
'mac': 'DE:AD:BE:EF:00:01'}, tcpip_data)
def check_vm_params_for_windows(self):
self.assertEqual(self.vm['platform']['nx'], 'true')
self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], '')
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEqual(self.vm['PV_kernel'], '')
self.assertNotEqual(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
session = get_session()
return session.call_xenapi('VDI.get_all')
def _list_vms(self):
session = get_session()
return session.call_xenapi('VM.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if vdi_ref not in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
if 'other_config' in vdi_rec:
if 'image-id' not in vdi_rec['other_config']:
self.fail('Found unexpected VDI:%s' % vdi_ref)
else:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
injected_files=None, check_injection=False,
create_record=True, empty_dns=False,
block_device_info=None,
key_data=None):
if injected_files is None:
injected_files = []
# Fake out inject_instance_metadata
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
if create_record:
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.user_id = self.user_id
instance.image_ref = image_ref
instance.kernel_id = kernel_id
instance.ramdisk_id = ramdisk_id
instance.root_gb = 20
instance.ephemeral_gb = 0
instance.instance_type_id = instance_type_id
instance.os_type = os_type
instance.hostname = hostname
instance.key_data = key_data
instance.architecture = architecture
instance.system_metadata = {}
flavor = objects.Flavor.get_by_id(self.context,
instance_type_id)
if instance_type_id == 5:
# NOTE(danms): xenapi test stubs have flavor 5 with no
# vcpu_weight
flavor.vcpu_weight = None
instance.flavor = flavor
instance.create()
else:
instance = objects.Instance.get_by_id(self.context, instance_id,
expected_attrs=['flavor'])
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
if empty_dns:
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
image_meta = IMAGE_FIXTURES[image_ref]["image_meta"]
self.conn.spawn(self.context, instance, image_meta, injected_files,
'herp', network_info, block_device_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, instance_type_id, check_injection)
self.assertEqual(instance['os_type'], os_type)
self.assertEqual(instance['architecture'], architecture)
def test_spawn_ipxe_iso_success(self):
self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path')
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url='http://boot.example.com',
ipxe_mkisofs_cmd='/root/mkisofs',
group='xenserver')
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized(
'ipxe', 'inject', '/sr/path', mox.IgnoreArg(),
'http://boot.example.com', '192.168.1.100', '255.255.255.0',
'192.168.1.1', '192.168.1.3', '/root/mkisofs')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_network_name(self):
self.flags(ipxe_network_name=None,
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_boot_menu_url(self):
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url=None,
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_unknown_network_name(self):
self.flags(ipxe_network_name='test2',
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_empty_dns(self):
# Test spawning with an empty dns list.
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory,
self._test_spawn,
'1', 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_3(self):
"""Simulates an error while attaching disks.
Verifies that the VM and VDIs created are properly cleaned up.
"""
stubs.stubout_attach_disks(self.stubs)
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_raw_glance(self):
self._test_spawn(IMAGE_RAW, None, None, os_type=None)
self.check_vm_params_for_windows()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_windows(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="windows", architecture="i386",
instance_type_id=5)
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
def fake_fetch_disk_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = vm_utils.safe_find_sr(session)
image_type_str = vm_utils.ImageType.to_string(image_type)
vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
name_label, image_type_str, "20")
vdi_role = vm_utils.ImageType.get_role(image_type)
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
self.stubs.Set(vm_utils, '_fetch_disk_image',
fake_fetch_disk_image)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_boot_from_volume_no_glance_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(IMAGE_FROM_VOLUME, None, None,
block_device_info=dev_info)
def test_spawn_boot_from_volume_with_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(IMAGE_VHD, None, None,
block_device_info=dev_info)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
actual = kwargs.get('process_input', None)
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether DE:AD:BE:EF:00:01
address 192.168.1.100
netmask 255.255.255.0
broadcast 192.168.1.255
gateway 192.168.1.1
dns-nameservers 192.168.1.3 192.168.1.4
iface eth0 inet6 static
hwaddress ether DE:AD:BE:EF:00:01
address 2001:db8:0:1:dcad:beff:feef:1
netmask 64
gateway 2001:db8:0:1::1
"""
self.assertEqual(expected, actual)
self._tee_executed = True
return '', ''
def _readlink_handler(cmd_parts, **kwargs):
return os.path.realpath(cmd_parts[2]), ''
fake_processutils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
(r'readlink -nm.*', _readlink_handler),
])
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug('Creating files in %s to simulate guest agent',
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normally make files in the mounted filesystem
# disappear, so do that here
LOG.debug('Removing simulated guest agent files in %s',
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_processutils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn('1', 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_injects_auto_disk_config_to_xenstore(self):
instance = self._create_instance(spawn=False, obj=True)
self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
self.mox.ReplayAll()
self.conn.spawn(self.context, instance,
IMAGE_FIXTURES['1']["image_meta"], [], 'herp', '')
def test_spawn_vlanmanager(self):
self.flags(network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
inst2 = self._create_instance(False, obj=True)
networks = self.network.db.network_get_all(ctxt)
with mock.patch('nova.objects.network.Network._from_db_object'):
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=inst2.id,
instance_uuid=inst2.uuid,
host=CONF.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id,
macs=None)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
instance_id=inst2.id,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEqual(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 10 * 1024))
def test_spawn_ssh_key_injection(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
self.assertEqual("ssh-rsa fake_keydata", sshkey)
return "fake"
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-rsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-rsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_ssh_key_injection_non_rsa(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
raise NotImplementedError("Should not be called")
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-dsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-dsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_injected_files(self):
# Test spawning with injected_files.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
injected_files = [('/tmp/foo', 'foobar')]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
injected_files=injected_files)
self.check_vm_params_for_linux()
self.assertEqual(actual_injected_files, injected_files)
@mock.patch('nova.db.agent_build_get_by_triple')
def test_spawn_agent_upgrade(self, mock_get):
self.flags(use_agent_default=True,
group='xenserver')
mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf",
'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False,
'id': 1}
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
@mock.patch('nova.db.agent_build_get_by_triple')
def test_spawn_agent_upgrade_fails_silently(self, mock_get):
mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf",
'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False,
'id': 1}
self._test_spawn_fails_silently_with(exception.AgentError,
method="_plugin_agent_agentupdate", failure="fake_error")
def test_spawn_with_resetnetwork_alternative_returncode(self):
self.flags(use_agent_default=True,
group='xenserver')
def fake_resetnetwork(self, method, args):
fake_resetnetwork.called = True
# NOTE(johngarbutt): as returned by FreeBSD and Gentoo
return jsonutils.dumps({'returncode': '500',
'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_resetnetwork', fake_resetnetwork)
fake_resetnetwork.called = False
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.assertTrue(fake_resetnetwork.called)
def _test_spawn_fails_silently_with(self, expected_exception_cls,
method="_plugin_agent_version",
failure=None, value=None):
self.flags(use_agent_default=True,
agent_version_timeout=0,
group='xenserver')
def fake_agent_call(self, method, args):
if failure:
raise xenapi_fake.Failure([failure])
else:
return value
self.stubs.Set(stubs.FakeSessionForVMTests,
method, fake_agent_call)
called = {}
def fake_add_instance_fault(*args, **kwargs):
called["fake_add_instance_fault"] = args[2]
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
fake_add_instance_fault)
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
actual_exception = called["fake_add_instance_fault"]
self.assertIsInstance(actual_exception, expected_exception_cls)
def test_spawn_fails_silently_with_agent_timeout(self):
self._test_spawn_fails_silently_with(exception.AgentTimeout,
failure="TIMEOUT:fake")
def test_spawn_fails_silently_with_agent_not_implemented(self):
self._test_spawn_fails_silently_with(exception.AgentNotImplemented,
failure="NOT IMPLEMENTED:fake")
def test_spawn_fails_silently_with_agent_error(self):
self._test_spawn_fails_silently_with(exception.AgentError,
failure="fake_error")
def test_spawn_fails_silently_with_agent_bad_return(self):
error = jsonutils.dumps({'returncode': -1, 'message': 'fake'})
self._test_spawn_fails_silently_with(exception.AgentError,
value=error)
def test_spawn_sets_last_dom_id(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.assertEqual(self.vm['domid'],
self.vm['other_config']['last_dom_id'])
def test_rescue(self):
instance = self._create_instance(spawn=False, obj=True)
xenapi_fake.create_vm(instance['name'], 'Running')
session = get_session()
vm_ref = vm_utils.lookup(session, instance['name'])
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
root_vdi_ref = xenapi_fake.create_vdi('root', None)
eph1_vdi_ref = xenapi_fake.create_vdi('eph', None)
eph2_vdi_ref = xenapi_fake.create_vdi('eph', None)
vol_vdi_ref = xenapi_fake.create_vdi('volume', None)
xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2)
xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4)
xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5)
xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6,
other_config={'osvol': True})
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}}
conn.rescue(self.context, instance, [], image_meta, '')
vm = xenapi_fake.get_record('VM', vm_ref)
rescue_name = "%s-rescue" % vm["name_label"]
rescue_ref = vm_utils.lookup(session, rescue_name)
rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
vdi_refs = {}
for vbd_ref in rescue_vm['VBDs']:
vbd = xenapi_fake.get_record('VBD', vbd_ref)
vdi_refs[vbd['VDI']] = vbd['userdevice']
self.assertEqual('1', vdi_refs[root_vdi_ref])
self.assertEqual('2', vdi_refs[swap_vdi_ref])
self.assertEqual('4', vdi_refs[eph1_vdi_ref])
self.assertEqual('5', vdi_refs[eph2_vdi_ref])
self.assertNotIn(vol_vdi_ref, vdi_refs)
def test_rescue_preserve_disk_on_failure(self):
# test that the original disk is preserved if rescue setup fails
# bug #1227898
instance = self._create_instance(obj=True)
session = get_session()
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}}
vm_ref = vm_utils.lookup(session, instance['name'])
vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
# raise an error in the spawn setup process and trigger the
# undo manager logic:
def fake_start(*args, **kwargs):
raise test.TestingException('Start Error')
self.stubs.Set(self.conn._vmops, '_start', fake_start)
self.assertRaises(test.TestingException, self.conn.rescue,
self.context, instance, [], image_meta, '')
# confirm original disk still exists:
vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
self.assertEqual(vdi_ref, vdi_ref2)
self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid'])
def test_unrescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock(object):
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, context, instance, block_info,
power_on):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(self.context, instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "HARD")
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
instance = self._create_instance()
instances = [instance]
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.poll_rebooting_instances(60, instances)
def test_reboot_soft(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "SOFT")
def test_reboot_halted(self):
session = get_session()
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Halted')
conn.reboot(self.context, instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEqual(vm['power_state'], 'Running')
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
instance, None, "SOFT")
def test_reboot_rescued(self):
instance = self._create_instance()
instance['vm_state'] = vm_states.RESCUED
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
real_result = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(conn._session, instance['name'],
True).AndReturn(real_result)
self.mox.ReplayAll()
conn.reboot(self.context, instance, None, "SOFT")
def test_get_console_output_succeeds(self):
def fake_get_console_output(instance):
self.assertEqual("instance", instance)
return "console_log"
self.stubs.Set(self.conn._vmops, 'get_console_output',
fake_get_console_output)
self.assertEqual(self.conn.get_console_output('context', "instance"),
"console_log")
def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
# Record all the xenapi calls, and return a fake list of hosts
# for the host.get_all call
def fake_call_xenapi(method, *args):
api_calls[method] = args
if method == 'host.get_all':
return ['foo', 'bar', 'baz']
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
def fake_aggregate_get(context, host, key):
if find_aggregate:
return [test_aggregate.fake_aggregate]
else:
return []
self.stubs.Set(db, 'aggregate_get_by_host',
fake_aggregate_get)
def fake_host_find(context, session, src, dst):
if find_host:
return 'bar'
else:
raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
self.assertEqual(result, 'on_maintenance')
# We expect the VM.pool_migrate call to have been called to
# migrate our instance to the 'bar' host
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
host_ref = "foo"
expected = (vm_ref, host_ref, {"live": "true"})
self.assertEqual(api_calls.get('VM.pool_migrate'), expected)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], task_states.MIGRATING)
def test_maintenance_mode(self):
self._test_maintenance_mode(True, True)
def test_maintenance_mode_no_host(self):
self.assertRaises(exception.NoValidHost,
self._test_maintenance_mode, False, True)
def test_maintenance_mode_no_aggregate(self):
self.assertRaises(exception.NotFound,
self._test_maintenance_mode, True, False)
def test_uuid_find(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
fake_inst = fake_instance.fake_db_instance(id=123)
fake_inst2 = fake_instance.fake_db_instance(id=456)
db.instance_get_all_by_host(self.context, fake_inst['host'],
columns_to_join=None,
use_slave=False
).AndReturn([fake_inst, fake_inst2])
self.mox.ReplayAll()
expected_name = CONF.instance_name_template % fake_inst['id']
inst_uuid = host._uuid_find(self.context, fake_inst['host'],
expected_name)
self.assertEqual(inst_uuid, fake_inst['uuid'])
def test_session_virtapi(self):
was = {'called': False}
def fake_aggregate_get_by_host(self, *args, **kwargs):
was['called'] = True
raise test.TestingException()
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.stubs.Set(self.conn._session, "is_slave", True)
self.assertRaises(test.TestingException,
self.conn._session._get_host_uuid)
self.assertTrue(was['called'])
def test_per_instance_usage_running(self):
instance = self._create_instance(spawn=True)
flavor = flavors.get_flavor(3)
expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'],
'uuid': instance['uuid']}}
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
# Paused instances still consume resources:
self.conn.pause(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
def test_per_instance_usage_suspended(self):
# Suspended instances do not consume memory:
instance = self._create_instance(spawn=True)
self.conn.suspend(self.context, instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def test_per_instance_usage_halted(self):
instance = self._create_instance(spawn=True, obj=True)
self.conn.power_off(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def _create_instance(self, spawn=True, obj=False, **attrs):
"""Creates and spawns a test instance."""
instance_values = {
'uuid': str(uuid.uuid4()),
'display_name': 'host-',
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'vm_mode': 'hvm',
'architecture': 'x86-64'}
instance_values.update(attrs)
instance = create_instance_with_system_metadata(self.context,
instance_values)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
if spawn:
self.conn.spawn(self.context, instance, image_meta, [], 'herp',
network_info)
if obj:
return instance
return base.obj_to_primitive(instance)
def test_destroy_clean_up_kernel_and_ramdisk(self):
def fake_lookup_kernel_ramdisk(session, vm_ref):
return "kernel", "ramdisk"
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
fake_destroy_kernel_ramdisk.called = True
self.assertEqual("kernel", kernel)
self.assertEqual("ramdisk", ramdisk)
fake_destroy_kernel_ramdisk.called = False
self.stubs.Set(vm_utils, "destroy_kernel_ramdisk",
fake_destroy_kernel_ramdisk)
instance = self._create_instance(spawn=True, obj=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
self.conn.destroy(self.context, instance, network_info)
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
self.assertIsNone(vm_ref)
self.assertTrue(fake_destroy_kernel_ramdisk.called)
class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = agent.SimpleDH()
self.bob = agent.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEqual(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEqual(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in range(1024)]))
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIMigrateInstance(stubs.XenAPITestBase):
"""Unit test for verifying migration-related actions."""
REQUIRES_LOCKING = True
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', 'fake_br1')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_processutils.stub_out_processutils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80,
ephemeral_gb=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
volume_utils.is_booted_from_volume(conn._session, vm_ref)
self.mox.ReplayAll()
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80,
ephemeral_gb=0)
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
instance = db.instance_create(self.context, self.instance_values)
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0,
ephemeral_gb=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ResizeError,
conn.migrate_disk_and_power_off,
self.context, instance,
'fake_dest', flavor, None)
def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0,
ephemeral_gb=0)
values = copy.copy(self.instance_values)
values["root_gb"] = 0
values["ephemeral_gb"] = 0
instance = db.instance_create(self.context, values)
xenapi_fake.create_vm(instance['name'], 'Running')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
volume_utils.is_booted_from_volume(conn._session, vm_ref)
self.mox.ReplayAll()
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def _test_revert_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
context = 'fake_context'
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy=base_uuid, cow=cow_uuid),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
conn.finish_revert_migration(context, instance, network_info)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_revert_migrate_power_on(self):
self._test_revert_migrate(True)
def test_revert_migrate_power_off(self):
self._test_revert_migrate(False)
def _test_finish_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
def test_finish_migrate_power_on(self):
self._test_finish_migrate(True)
def test_finish_migrate_power_off(self):
self._test_finish_migrate(False)
def test_finish_migrate_no_local_storage(self):
values = copy.copy(self.instance_values)
values["root_gb"] = 0
values["ephemeral_gb"] = 0
instance = create_instance_with_system_metadata(self.context, values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
# Resize instance would be determined by the compute call
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
@stub_vm_utils_with_vdi_attached_here
def test_migrate_too_many_partitions_no_resize_down(self):
instance_values = self.instance_values
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = db.flavor_get_by_name(self.context, 'm1.small')
flavor = fake_flavor.fake_flavor_obj(self.context, **flavor)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
@stub_vm_utils_with_vdi_attached_here
def test_migrate_bad_fs_type_no_resize_down(self):
instance_values = self.instance_values
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = db.flavor_get_by_name(self.context, 'm1.small')
flavor = fake_flavor.fake_flavor_obj(self.context, **flavor)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, "ext2", "", "boot")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_rollback_when_resize_down_fs_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
self.mox.StubOutWithMock(vm_utils, 'resize_disk')
self.mox.StubOutWithMock(vm_utils, 'migrate_vhd')
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
instance = objects.Instance(context=self.context,
auto_disk_config=True, uuid='uuid')
instance.obj_reset_changes()
vm_ref = "vm_ref"
dest = "dest"
flavor = "type"
sr_path = "sr_path"
vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
vmops._apply_orig_vm_name_label(instance, vm_ref)
old_vdi_ref = "old_ref"
vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
(old_vdi_ref, None))
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
flavor).AndReturn((new_vdi_ref, new_vdi_uuid))
vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest,
sr_path, 0).AndRaise(
exception.ResizeError(reason="asdf"))
vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
vmops._restore_orig_vm_and_cleanup_orphan(instance)
self.mox.ReplayAll()
with mock.patch.object(instance, 'save') as mock_save:
self.assertRaises(exception.InstanceFaultRollback,
vmops._migrate_disk_resizing_down, self.context,
instance, dest, flavor, vm_ref, sr_path)
self.assertEqual(3, mock_save.call_count)
self.assertEqual(60.0, instance.progress)
def test_resize_ensure_vm_is_shutdown_cleanly(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_forced(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
class XenAPIImageTypeTestCase(test.NoDBTestCase):
"""Test ImageType class."""
def test_to_string(self):
# Can convert from type id to type string.
self.assertEqual(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def _assert_role(self, expected_role, image_type_id):
self.assertEqual(
expected_role,
vm_utils.ImageType.get_role(image_type_id))
def test_get_image_role_kernel(self):
self._assert_role('kernel', vm_utils.ImageType.KERNEL)
def test_get_image_role_ramdisk(self):
self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
def test_get_image_role_disk(self):
self._assert_role('root', vm_utils.ImageType.DISK)
def test_get_image_role_disk_raw(self):
self._assert_role('root', vm_utils.ImageType.DISK_RAW)
def test_get_image_role_disk_vhd(self):
self._assert_role('root', vm_utils.ImageType.DISK_VHD)
class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase):
"""Unit tests for code that detects the ImageType."""
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'ami'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'vhd'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIHostTestCase(stubs.XenAPITestBase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers.
"""
def setUp(self):
super(XenAPIHostTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.instance = fake_instance.fake_db_instance(name='foo')
def test_host_state(self):
stats = self.conn.host_state.get_host_stats(False)
# Values from fake.create_local_srs (ext SR)
self.assertEqual(stats['disk_total'], 40000)
self.assertEqual(stats['disk_used'], 20000)
# Values from fake._plugin_xenhost_host_data
self.assertEqual(stats['host_memory_total'], 10)
self.assertEqual(stats['host_memory_overhead'], 20)
self.assertEqual(stats['host_memory_free'], 30)
self.assertEqual(stats['host_memory_free_computed'], 40)
self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost')
self.assertEqual(stats['host_cpu_info']['cpu_count'], 4)
self.assertThat({
'vendor': 'GenuineIntel',
'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'topology': {
'sockets': 1,
'cores': 4,
'threads': 1,
},
'features': [
'fpu', 'de', 'tsc', 'msr', 'pae', 'mce',
'cx8', 'apic', 'sep', 'mtrr', 'mca',
'cmov', 'pat', 'clflush', 'acpi', 'mmx',
'fxsr', 'sse', 'sse2', 'ss', 'ht',
'nx', 'constant_tsc', 'nonstop_tsc',
'aperfmperf', 'pni', 'vmx', 'est', 'ssse3',
'sse4_1', 'sse4_2', 'popcnt', 'hypervisor',
'ida', 'tpr_shadow', 'vnmi', 'flexpriority',
'ept', 'vpid',
]},
matchers.DictMatches(stats['cpu_model']))
# No VMs running
self.assertEqual(stats['vcpus_used'], 0)
def test_host_state_vcpus_used(self):
stats = self.conn.host_state.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 0)
xenapi_fake.create_vm(self.instance['name'], 'Running')
stats = self.conn.host_state.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 4)
def test_pci_passthrough_devices(self):
stats = self.conn.host_state.get_host_stats(False)
self.assertEqual(len(stats['pci_passthrough_devices']), 2)
def test_host_state_missing_sr(self):
# Must trigger construction of 'host_state' property
# before introducing the stub which raises the error
hs = self.conn.host_state
def fake_safe_find_sr(session):
raise exception.StorageRepositoryNotFound('not there')
self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
self.assertRaises(exception.StorageRepositoryNotFound,
hs.get_host_stats,
refresh=True)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
if not expected:
expected = action
self.assertEqual(result, expected)
def _test_host_action_no_param(self, method, action, expected=None):
result = method(action)
if not expected:
expected = action
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action_no_param(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action_no_param(self.conn.host_power_action,
'shutdown')
def test_host_startup(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode,
True, 'on_maintenance')
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode,
False, 'off_maintenance')
def test_set_enable_host_enable(self):
_create_service_entries(self.context, values={'nova': ['fake-mini']})
self._test_host_action_no_param(self.conn.set_host_enabled,
True, 'enabled')
service = db.service_get_by_host_and_binary(self.context, 'fake-mini',
'nova-compute')
self.assertEqual(service.disabled, False)
def test_set_enable_host_disable(self):
_create_service_entries(self.context, values={'nova': ['fake-mini']})
self._test_host_action_no_param(self.conn.set_host_enabled,
False, 'disabled')
service = db.service_get_by_host_and_binary(self.context, 'fake-mini',
'nova-compute')
self.assertEqual(service.disabled, True)
def test_get_host_uptime(self):
result = self.conn.get_host_uptime()
self.assertEqual(result, 'fake uptime')
def test_supported_instances_is_included_in_host_state(self):
stats = self.conn.host_state.get_host_stats(False)
self.assertIn('supported_instances', stats)
def test_supported_instances_is_calculated_by_to_supported_instances(self):
def to_supported_instances(somedata):
return "SOMERETURNVALUE"
self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
stats = self.conn.host_state.get_host_stats(False)
self.assertEqual("SOMERETURNVALUE", stats['supported_instances'])
def test_update_stats_caches_hostname(self):
self.mox.StubOutWithMock(host, 'call_xenhost')
self.mox.StubOutWithMock(vm_utils, 'scan_default_sr')
self.mox.StubOutWithMock(vm_utils, 'list_vms')
self.mox.StubOutWithMock(self.conn._session, 'call_xenapi')
data = {'disk_total': 0,
'disk_used': 0,
'disk_available': 0,
'supported_instances': 0,
'host_capabilities': [],
'host_hostname': 'foo',
'vcpus_used': 0,
}
sr_rec = {
'physical_size': 0,
'physical_utilisation': 0,
'virtual_allocation': 0,
}
for i in range(3):
host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data)
vm_utils.scan_default_sr(self.conn._session).AndReturn("ref")
vm_utils.list_vms(self.conn._session).AndReturn([])
self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn(
sr_rec)
if i == 2:
# On the third call (the second below) change the hostname
data = dict(data, host_hostname='bar')
self.mox.ReplayAll()
stats = self.conn.host_state.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
stats = self.conn.host_state.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
class ToSupportedInstancesTestCase(test.NoDBTestCase):
def test_default_return_value(self):
self.assertEqual([],
host.to_supported_instances(None))
def test_return_value(self):
self.assertEqual([(arch.X86_64, hv_type.XEN, 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64']))
def test_invalid_values_do_not_break(self):
self.assertEqual([(arch.X86_64, hv_type.XEN, 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
def test_multiple_values(self):
self.assertEqual(
[
(arch.X86_64, hv_type.XEN, 'xen'),
(arch.I686, hv_type.XEN, 'hvm')
],
host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False):
pass
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old_sectors, new_sectors,
flags):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
context.RequestContext(self.user_id, self.project_id)
session = get_session()
disk_image_type = vm_utils.ImageType.DISK_VHD
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
image_meta = {'id': 'null',
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}}
self.conn._vmops._attach_disks(instance, image_meta, vm_ref,
instance['name'], vdis, disk_image_type, "fake_nw_inf")
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_fails_safe_two_partitions(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4', "", ""), (2, 100, 200, 'ext4' "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_fails_safe_badly_numbered(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(2, 100, 200, 'ext4', "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_fails_safe_bad_fstype(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 100, 200, 'asdf', "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4', "", "boot")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIGenerateLocal(stubs.XenAPITestBase):
"""Test generating of local disks, like swap and ephemeral."""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False, empty=False, unpluggable=True):
return session.call_xenapi('VBD.create', {'VM': vm_ref,
'VDI': vdi_ref})
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertCalled(self, instance,
disk_image_type=vm_utils.ImageType.DISK_VHD):
context.RequestContext(self.user_id, self.project_id)
session = get_session()
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdi_key = 'root'
if disk_image_type == vm_utils.ImageType.DISK_ISO:
vdi_key = 'iso'
vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
image_meta = {'id': 'null',
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}}
self.conn._vmops._attach_disks(instance, image_meta, vm_ref,
instance['name'], vdis, disk_image_type, "fake_nw_inf")
self.assertTrue(self.called)
def test_generate_swap(self):
# Test swap disk generation.
instance_values = dict(self.instance_values, instance_type_id=5)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_swap(*args, **kwargs):
self.called = True
self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
# Test ephemeral disk generation.
instance_values = dict(self.instance_values, instance_type_id=4)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
self.assertCalled(instance)
def test_generate_iso_blank_root_disk(self):
instance_values = dict(self.instance_values, instance_type_id=4)
instance_values.pop('kernel_id')
instance_values.pop('ramdisk_id')
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
pass
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
def fake_generate_iso(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
fake_generate_iso)
self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
FAKE_VMS = {'test1:ref': dict(name_label='test1',
other_config=dict(nova_uuid='hash'),
domid='12',
_vifmap={'0': "a:b:c:d...",
'1': "e:f:12:q..."}),
'test2:ref': dict(name_label='test2',
other_config=dict(nova_uuid='hash'),
domid='42',
_vifmap={'0': "a:3:c:d...",
'1': "e:f:42:q..."}),
}
def setUp(self):
super(XenAPIBWCountersTestCase, self).setUp()
self.stubs.Set(vm_utils, 'list_vms',
XenAPIBWCountersTestCase._fake_list_vms)
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def _fake_get_vif_device_map(vm_rec):
return vm_rec['_vifmap']
self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
_fake_get_vif_device_map)
@classmethod
def _fake_list_vms(cls, session):
return six.iteritems(cls.FAKE_VMS)
@staticmethod
def _fake_fetch_bandwidth_mt(session):
return {}
@staticmethod
def _fake_fetch_bandwidth(session):
return {'42':
{'0': {'bw_in': 21024, 'bw_out': 22048},
'1': {'bw_in': 231337, 'bw_out': 221212121}},
'12':
{'0': {'bw_in': 1024, 'bw_out': 2048},
'1': {'bw_in': 31337, 'bw_out': 21212121}},
}
def test_get_all_bw_counters(self):
instances = [dict(name='test1', uuid='1-2-3'),
dict(name='test2', uuid='4-5-6')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
bw_in=1024,
bw_out=2048), result)
self.assertIn(dict(uuid='1-2-3',
mac_address="e:f:12:q...",
bw_in=31337,
bw_out=21212121), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="a:3:c:d...",
bw_in=21024,
bw_out=22048), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="e:f:42:q...",
bw_in=231337,
bw_out=221212121), result)
def test_get_all_bw_counters_in_failure_case(self):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth_mt)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.unit.virt.test_libvirt.IPTablesFirewallDriverTestCase
# share a lot of code. Consider abstracting common code in a base
# class for firewall driver testing.
#
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
REQUIRES_LOCKING = True
_in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*mangle',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(CONF.network_manager)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = filter(lambda l: not l.startswith('#'),
self._in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertIn(rule, self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
' --icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_model = fake_network.fake_get_instance_nw_info(self.stubs, 1)
from nova.compute import utils as compute_utils # noqa
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
self.fw.prepare_instance_filter(instance_ref, network_model)
self.fw.apply_instance_filter(instance_ref, network_model)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
_get_instance_nw_info = fake_network.fake_get_instance_nw_info
network_info = _get_instance_nw_info(self.stubs,
networks_count,
ipv4_addr_per_network)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEqual(ipv4_network_rules, rules)
self.assertEqual(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instance_info[instance_ref['id']] = (instance_ref,
network_info)
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
# validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"Rules were not updated properly. "
"The rule for UDP acceptance is missing")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: as in libvirt tests
# peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
# Ensure StorageRepositoryNotFound is raise when wrong filter.
self.flags(sr_matching_filter='yadayadayada', group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
self.assertRaises(exception.StorageRepositoryNotFound,
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
# Ensure the default local-storage is found.
self.flags(sr_matching_filter='other-config:i18n-key=local-storage',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
# This test is only guaranteed if there is one host in the pool
self.assertEqual(len(xenapi_fake.get_all('host')), 1)
host_ref = xenapi_fake.get_all('host')[0]
pbd_refs = xenapi_fake.get_all('PBD')
for pbd_ref in pbd_refs:
pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
if pbd_rec['host'] != host_ref:
continue
sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
if sr_rec['other_config']['i18n-key'] == 'local-storage':
local_sr = pbd_rec['SR']
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
# Ensure the SR is found when using a different filter.
self.flags(sr_matching_filter='other-config:my_fake_sr=true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
# Ensure the default SR is found regardless of other-config.
self.flags(sr_matching_filter='default-sr:true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
pool_ref = session.call_xenapi('pool.get_all')[0]
expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in six.iteritems(values):
for service_host in hosts:
db.service_create(context,
{'host': service_host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
return values
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAggregateTestCase(stubs.XenAPITestBase):
"""Unit tests for aggregate operations."""
def setUp(self):
super(XenAPIAggregateTestCase, self).setUp()
self.flags(connection_url='http://test_url',
connection_username='test_user',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host',
compute_driver='xenapi.XenAPIDriver',
default_availability_zone='avail_zone1')
self.flags(use_local=True, group='conductor')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = importutils.import_object(CONF.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
'metadata': {'availability_zone': 'test_zone',
pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = objects.Aggregate(context=self.context, id=1,
**values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
'master_compute': 'host',
'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
def test_pool_add_to_aggregate_called_by_driver(self):
calls = []
def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_add_to_aggregate)
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
pool_add_to_aggregate)
self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_add_to_aggregate, calls)
def test_pool_remove_from_aggregate_called_by_driver(self):
calls = []
def pool_remove_from_aggregate(context, aggregate, host,
slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_remove_from_aggregate)
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
pool_remove_from_aggregate)
self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_remove_from_aggregate, calls)
def test_add_to_aggregate_for_first_host_sets_metadata(self):
def fake_init_pool(id, name):
fake_init_pool.called = True
self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_init_pool.called)
self.assertThat(self.fake_metadata,
matchers.DictMatches(result['metadetails']))
def test_join_slave(self):
# Ensure join_slave gets called when the request gets to master.
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
dict(compute_uuid='fake_uuid',
url='fake_url',
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid'))
self.assertTrue(fake_join_slave.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
fake_pool_set_name_label.called = True
self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
fake_pool_set_name_label)
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
metadata = {'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
aggregate = objects.Aggregate(context=self.context)
aggregate.name = 'fake_aggregate'
aggregate.metadata = dict(metadata)
aggregate.create()
aggregate.add_host('host')
self.assertEqual(["host"], aggregate.hosts)
self.assertEqual(metadata, aggregate.metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
def test_remove_from_aggregate_called(self):
def fake_remove_from_aggregate(context, aggregate, host):
fake_remove_from_aggregate.called = True
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
fake_remove_from_aggregate)
self.conn.remove_from_aggregate(None, None, None)
self.assertTrue(fake_remove_from_aggregate.called)
def test_remove_from_empty_aggregate(self):
result = self._aggregate_setup()
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn._pool.remove_from_aggregate,
self.context, result, "test_host")
def test_remove_slave(self):
# Ensure eject slave gets called.
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
# Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_clear_pool.called)
self.assertThat({'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result['metadetails']))
def test_remote_master_non_empty_pool(self):
# Ensure AggregateError is raised if removing the master.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn._pool.remove_from_aggregate,
self.context, aggregate, "host")
def _aggregate_setup(self, aggr_name='fake_aggregate',
aggr_zone='fake_zone',
aggr_state=pool_states.CREATED,
hosts=['host'], metadata=None):
aggregate = objects.Aggregate(context=self.context)
aggregate.name = aggr_name
aggregate.metadata = {'availability_zone': aggr_zone,
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: aggr_state,
}
if metadata:
aggregate.metadata.update(metadata)
aggregate.create()
for aggregate_host in hosts:
aggregate.add_host(aggregate_host)
return aggregate
def test_add_host_to_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateActionAdd is raised when adding host while
aggregate is not ready.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'host')
self.assertIn('setup in progress', str(ex))
def test_add_host_to_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateActionAdd is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
self.assertIn('aggregate deleted', str(ex))
def test_add_host_to_aggregate_invalid_error_status(self):
"""Ensure InvalidAggregateActionAdd is raised when aggregate is
in error.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
self.assertIn('aggregate in error', str(ex))
def test_remove_host_from_aggregate_error(self):
# Ensure we can remove a host from an aggregate even if in error.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
# let's mock the fact that the aggregate is ready!
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, aggr['id'], metadata)
for aggregate_host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], aggregate_host)
# let's mock the fact that the aggregate is in error!
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
values[fake_zone][0])
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
self.assertEqual(expected['metadata'][pool_states.KEY],
pool_states.ACTIVE)
def test_remove_host_from_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateActionDelete is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateActionDelete is raised when aggregate is
changing.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
# Ensure the undo operation works correctly on add.
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
raise exception.AggregateError(
aggregate_id='', action='', reason='')
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
self.aggr.metadata = metadata
self.aggr.hosts = ['fake_host']
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
self.context, host="fake_host",
aggregate=self.aggr,
slave_info=None)
self.assertEqual(self.aggr.metadata[pool_states.KEY],
pool_states.ERROR)
self.assertEqual(self.aggr.hosts, ['fake_host'])
class MockComputeAPI(object):
def __init__(self):
self._mock_calls = []
def add_aggregate_host(self, ctxt, aggregate,
host_param, host, slave_info):
self._mock_calls.append((
self.add_aggregate_host, ctxt, aggregate,
host_param, host, slave_info))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
host, slave_info):
self._mock_calls.append((
self.remove_aggregate_host, ctxt, aggregate_id,
host_param, host, slave_info))
class StubDependencies(object):
"""Stub dependencies for ResourcePool."""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
def _is_hv_pool(self, *_ignore):
return True
def _get_metadata(self, *_ignore):
return {
pool_states.KEY: {},
'master_compute': 'master'
}
def _create_slave_info(self, *ignore):
return "SLAVE_INFO"
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
"""A ResourcePool, use stub dependencies."""
class HypervisorPoolTestCase(test.NoDBTestCase):
fake_aggregate = {
'id': 98,
'hosts': [],
'metadata': {
'master_compute': 'master',
pool_states.POOL_FLAG: {},
pool_states.KEY: {}
}
}
def test_slave_asks_master_to_add_slave_to_pool(self):
slave = ResourcePoolWithStubs()
slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
"CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
"slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
slave = ResourcePoolWithStubs()
slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.remove_aggregate_host,
"CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
class SwapXapiHostTestCase(test.NoDBTestCase):
def test_swapping(self):
self.assertEqual(
"http://otherserver:8765/somepath",
pool.swap_xapi_host(
"http://someserver:8765/somepath", 'otherserver'))
def test_no_port(self):
self.assertEqual(
"http://otherserver/somepath",
pool.swap_xapi_host(
"http://someserver/somepath", 'otherserver'))
def test_no_path(self):
self.assertEqual(
"http://otherserver",
pool.swap_xapi_host(
"http://someserver", 'otherserver'))
class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for live_migration."""
def setUp(self):
super(XenAPILiveMigrateTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
db_fakes.stub_out_db_instance_api(self.stubs)
self.context = context.get_admin_context()
def test_live_migration_calls_vmops(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_live_migrate(context, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data):
fake_live_migrate.called = True
self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
self.conn.live_migration(None, None, None, None, None)
self.assertTrue(fake_live_migrate.called)
def test_pre_live_migration(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.pre_live_migration(None, None, None, None, None)
def test_post_live_migration_at_destination(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_instance = {"name": "name"}
fake_network_info = "network_info"
def fake_fw(instance, network_info):
self.assertEqual(instance, fake_instance)
self.assertEqual(network_info, fake_network_info)
fake_fw.call_count += 1
def fake_create_kernel_and_ramdisk(context, session, instance,
name_label):
return "fake-kernel-file", "fake-ramdisk-file"
fake_fw.call_count = 0
_vmops = self.conn._vmops
self.stubs.Set(_vmops.firewall_driver,
'setup_basic_filtering', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'prepare_instance_filter', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'apply_instance_filter', fake_fw)
self.stubs.Set(vm_utils, "create_kernel_and_ramdisk",
fake_create_kernel_and_ramdisk)
def fake_get_vm_opaque_ref(instance):
fake_get_vm_opaque_ref.called = True
self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref)
fake_get_vm_opaque_ref.called = False
def fake_strip_base_mirror_from_vdis(session, vm_ref):
fake_strip_base_mirror_from_vdis.called = True
self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis",
fake_strip_base_mirror_from_vdis)
fake_strip_base_mirror_from_vdis.called = False
self.conn.post_live_migration_at_destination(None, fake_instance,
fake_network_info, None)
self.assertEqual(fake_fw.call_count, 3)
self.assertTrue(fake_get_vm_opaque_ref.called)
self.assertTrue(fake_strip_base_mirror_from_vdis.called)
def test_check_can_live_migrate_destination_with_block_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
expected = {'block_migration': True,
'migrate_data': {
'migrate_send_data': "fake_migrate_data",
'destination_sr_ref': 'asdf'
}
}
result = self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'},
{}, {},
True, False)
self.assertEqual(expected, result)
def test_check_live_migrate_destination_verifies_ip(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
for pif_ref in xenapi_fake.get_all('PIF'):
pif_rec = xenapi_fake.get_record('PIF', pif_ref)
pif_rec['IP'] = ''
pif_rec['IPv6'] = ''
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def test_check_can_live_migrate_destination_block_migration_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def _add_default_live_migrate_stubs(self, conn):
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return []
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
def fake_lookup_kernel_ramdisk(session, vm):
return ("fake_PV_kernel", "fake_PV_ramdisk")
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
self.stubs.Set(conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def test_check_can_live_migrate_source_with_block_migrate(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return "true"
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return {'returncode': 'error', 'message': 'Plugin not found'}
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context, {'host': 'host'},
{})
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context,
{'host': 'host'},
dest_check_data)
def test_check_can_live_migrate_works(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [dict(test_aggregate.fake_aggregate,
metadetails={"host": "test_host_uuid"})]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'}, False, False)
def test_check_can_live_migrate_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [dict(test_aggregate.fake_aggregate,
metadetails={"dest_other": "test_host_uuid"})]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'}, None, None)
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
self.conn.live_migration(self.conn, None, None, post_method, None)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_on_failure(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def fake_call_xenapi(*args):
raise NotImplementedError()
self.stubs.Set(self.conn._vmops._session, "call_xenapi",
fake_call_xenapi)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
self.assertRaises(NotImplementedError, self.conn.live_migration,
self.conn, None, None, None, recover_method)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_calls_post_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
# pass block_migration = True and migrate data
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_block_cleans_srs(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(context, instance):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_forget_sr(context, instance):
fake_forget_sr.called = True
self.stubs.Set(volume_utils, "forget_sr",
fake_forget_sr)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
self.assertTrue(fake_forget_sr.called, "forget_sr.called")
def test_live_migration_with_block_migration_raises_invalid_param(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and no migrate data
self.assertRaises(exception.InvalidParameterValue,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, None)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_with_block_migration_fails_migrate_send(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and migrate data
migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
self.assertRaises(exception.MigrationError,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, migrate_data)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migrate_block_migration_xapi_call_parameters(self):
fake_vdi_map = object()
class Session(xenapi_fake.SessionBase):
def VM_migrate_send(self_, session, vmref, migrate_data, islive,
vdi_map, vif_map, options):
self.assertEqual('SOMEDATA', migrate_data)
self.assertEqual(fake_vdi_map, vdi_map)
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
return fake_vdi_map
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def dummy_callback(*args, **kwargs):
pass
conn.live_migration(
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration="SOMEDATA",
migrate_data=dict(migrate_send_data='SOMEDATA',
destination_sr_ref="TARGET_SR_OPAQUE_REF"))
def test_live_migrate_pool_migration_xapi_call_parameters(self):
class Session(xenapi_fake.SessionBase):
def VM_pool_migrate(self_, session, vm_ref, host_ref, options):
self.assertEqual("fake_ref", host_ref)
self.assertEqual({"live": "true"}, options)
raise IOError()
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_get_host_opaque_ref(context, destination):
return "fake_ref"
self.stubs.Set(conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def dummy_callback(*args, **kwargs):
pass
self.assertRaises(IOError, conn.live_migration,
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration=False, migrate_data={})
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = "fake_vm_ref"
def fake_find_sr(_session):
self.assertEqual(conn._session, _session)
return "source_sr_ref"
self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
self.assertEqual(conn._session, _session)
self.assertEqual(vm_ref, _vm_ref)
self.assertEqual("source_sr_ref", _sr_ref)
return ["vdi0", "vdi1"]
self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
fake_get_instance_vdis_for_sr)
result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
self.assertEqual({"vdi0": "dest_sr_ref",
"vdi1": "dest_sr_ref"}, result)
def test_rollback_live_migration_at_destination(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn, "destroy") as mock_destroy:
conn.rollback_live_migration_at_destination("context",
"instance", [], {'block_device_mapping': []})
self.assertFalse(mock_destroy.called)
class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(XenAPIInjectMetadataTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.xenstore = dict(persist={}, ephem={})
self.called_fake_get_vm_opaque_ref = False
def fake_get_vm_opaque_ref(inst, instance):
self.called_fake_get_vm_opaque_ref = True
if instance["uuid"] == "not_found":
raise exception.NotFound
self.assertEqual(instance, {'uuid': 'fake'})
return 'vm_ref'
def fake_add_to_param_xenstore(inst, vm_ref, key, val):
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['persist'][key] = val
def fake_remove_from_param_xenstore(inst, vm_ref, key):
self.assertEqual(vm_ref, 'vm_ref')
if key in self.xenstore['persist']:
del self.xenstore['persist'][key]
def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['ephem'][path] = jsonutils.dumps(value)
def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
if path in self.xenstore['ephem']:
del self.xenstore['ephem'][path]
self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
fake_get_vm_opaque_ref)
self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
fake_add_to_param_xenstore)
self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
fake_remove_from_param_xenstore)
self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
fake_write_to_xenstore)
self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
fake_delete_from_xenstore)
def test_inject_instance_metadata(self):
# Add some system_metadata to ensure it doesn't get added
# to xenstore
instance = dict(metadata=[{'key': 'a', 'value': 1},
{'key': 'b', 'value': 2},
{'key': 'c', 'value': 3},
# Check xenstore key sanitizing
{'key': 'hi.there', 'value': 4},
{'key': 'hi!t.e/e', 'value': 5}],
# Check xenstore key sanitizing
system_metadata=[{'key': 'sys_a', 'value': 1},
{'key': 'sys_b', 'value': 2},
{'key': 'sys_c', 'value': 3}],
uuid='fake')
self.conn._vmops._inject_instance_metadata(instance, 'vm_ref')
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/hi_there': '4',
'vm-data/user-metadata/hi_t_e_e': '5',
},
'ephem': {},
})
def test_change_instance_metadata_add(self):
# Test XenStore key sanitizing here, too.
diff = {'test.key': ['+', 4]}
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
})
def test_change_instance_metadata_update(self):
diff = dict(b=['+', 4])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_delete(self):
diff = dict(b=['-'])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_not_found(self):
instance = {'uuid': 'not_found'}
self.conn._vmops.change_instance_metadata(instance, "fake_diff")
self.assertTrue(self.called_fake_get_vm_opaque_ref)
class XenAPISessionTestCase(test.NoDBTestCase):
def _get_mock_xapisession(self, software_version):
class MockXapiSession(xenapi_session.XenAPISession):
def __init__(_ignore):
"Skip the superclass's dirty init"
def _get_software_version(_ignore):
return software_version
return MockXapiSession()
def test_local_session(self):
session = self._get_mock_xapisession({})
session.is_local_connection = True
session.XenAPI = self.mox.CreateMockAnything()
session.XenAPI.xapi_local().AndReturn("local_connection")
self.mox.ReplayAll()
self.assertEqual("local_connection",
session._create_session("unix://local"))
def test_remote_session(self):
session = self._get_mock_xapisession({})
session.is_local_connection = False
session.XenAPI = self.mox.CreateMockAnything()
session.XenAPI.Session("url").AndReturn("remote_connection")
self.mox.ReplayAll()
self.assertEqual("remote_connection", session._create_session("url"))
def test_get_product_version_product_brand_does_not_fail(self):
session = self._get_mock_xapisession({
'build_number': '0',
'date': '2012-08-03',
'hostname': 'komainu',
'linux': '3.2.0-27-generic',
'network_backend': 'bridge',
'platform_name': 'XCP_Kronos',
'platform_version': '1.6.0',
'xapi': '1.3',
'xen': '4.1.2',
'xencenter_max': '1.10',
'xencenter_min': '1.10'
})
self.assertEqual(
((1, 6, 0), None),
session._get_product_version_and_brand()
)
def test_get_product_version_product_brand_xs_6(self):
session = self._get_mock_xapisession({
'product_brand': 'XenServer',
'product_version': '6.0.50',
'platform_version': '0.0.1'
})
self.assertEqual(
((6, 0, 50), 'XenServer'),
session._get_product_version_and_brand()
)
def test_verify_plugin_version_same(self):
session = self._get_mock_xapisession({})
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.4")
self.mox.ReplayAll()
session._verify_plugin_version()
def test_verify_plugin_version_compatible(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.5")
self.mox.ReplayAll()
session._verify_plugin_version()
def test_verify_plugin_version_bad_maj(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("3.0")
self.mox.ReplayAll()
self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
def test_verify_plugin_version_bad_min(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.3")
self.mox.ReplayAll()
self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
def test_verify_current_version_matches(self):
session = self._get_mock_xapisession({})
# Import the plugin to extract its version
path = os.path.dirname(__file__)
rel_path_elem = "../../../../../plugins/xenserver/xenapi/etc/xapi.d/" \
"plugins/nova_plugin_version"
for elem in rel_path_elem.split('/'):
path = os.path.join(path, elem)
path = os.path.realpath(path)
plugin_version = None
with open(path) as plugin_file:
for line in plugin_file:
if "PLUGIN_VERSION = " in line:
plugin_version = line.strip()[17:].strip('"')
self.assertEqual(session.PLUGIN_REQUIRED_VERSION,
plugin_version)
class XenAPIFakeTestCase(test.NoDBTestCase):
def test_query_matches(self):
record = {'a': '1', 'b': '2', 'c_d': '3'}
tests = {'field "a"="1"': True,
'field "b"="2"': True,
'field "b"="4"': False,
'not field "b"="4"': True,
'field "a"="1" and field "b"="4"': False,
'field "a"="1" or field "b"="4"': True,
'field "c__d"="3"': True,
'field \'b\'=\'2\'': True,
}
for query in tests.keys():
expected = tests[query]
fail_msg = "for test '%s'" % query
self.assertEqual(xenapi_fake._query_matches(record, query),
expected, fail_msg)
def test_query_bad_format(self):
record = {'a': '1', 'b': '2', 'c': '3'}
tests = ['"a"="1" or "b"="4"',
'a=1',
]
for query in tests:
fail_msg = "for test '%s'" % query
self.assertFalse(xenapi_fake._query_matches(record, query),
fail_msg)
|
florianholzapfel/home-assistant
|
refs/heads/dev
|
homeassistant/components/automation/time.py
|
19
|
"""
Offer time listening automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/components/automation/#time-trigger
"""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import CONF_AFTER, CONF_PLATFORM
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import async_track_time_change
CONF_HOURS = "hours"
CONF_MINUTES = "minutes"
CONF_SECONDS = "seconds"
_LOGGER = logging.getLogger(__name__)
TRIGGER_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_PLATFORM): 'time',
CONF_AFTER: cv.time,
CONF_HOURS: vol.Any(vol.Coerce(int), vol.Coerce(str)),
CONF_MINUTES: vol.Any(vol.Coerce(int), vol.Coerce(str)),
CONF_SECONDS: vol.Any(vol.Coerce(int), vol.Coerce(str)),
}), cv.has_at_least_one_key(CONF_HOURS, CONF_MINUTES,
CONF_SECONDS, CONF_AFTER))
def async_trigger(hass, config, action):
"""Listen for state changes based on configuration."""
if CONF_AFTER in config:
after = config.get(CONF_AFTER)
hours, minutes, seconds = after.hour, after.minute, after.second
else:
hours = config.get(CONF_HOURS)
minutes = config.get(CONF_MINUTES)
seconds = config.get(CONF_SECONDS)
@callback
def time_automation_listener(now):
"""Listen for time changes and calls action."""
hass.async_run_job(action, {
'trigger': {
'platform': 'time',
'now': now,
},
})
return async_track_time_change(hass, time_automation_listener,
hour=hours, minute=minutes, second=seconds)
|
ChristopherHogan/numpy
|
refs/heads/master
|
numpy/polynomial/tests/test_legendre.py
|
123
|
"""Tests for legendre module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.legendre as leg
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
L0 = np.array([1])
L1 = np.array([0, 1])
L2 = np.array([-1, 0, 3])/2
L3 = np.array([0, -3, 0, 5])/2
L4 = np.array([3, 0, -30, 0, 35])/8
L5 = np.array([0, 15, 0, -70, 0, 63])/8
L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16
L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16
L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128
L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128
Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9]
def trim(x):
return leg.legtrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_legdomain(self):
assert_equal(leg.legdomain, [-1, 1])
def test_legzero(self):
assert_equal(leg.legzero, [0])
def test_legone(self):
assert_equal(leg.legone, [1])
def test_legx(self):
assert_equal(leg.legx, [0, 1])
class TestArithmetic(TestCase):
x = np.linspace(-1, 1, 100)
def test_legadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = leg.legadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = leg.legsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legmulx(self):
assert_equal(leg.legmulx([0]), [0])
assert_equal(leg.legmulx([1]), [0, 1])
for i in range(1, 5):
tmp = 2*i + 1
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp]
assert_equal(leg.legmulx(ser), tgt)
def test_legmul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = leg.legval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = leg.legval(self.x, pol2)
pol3 = leg.legmul(pol1, pol2)
val3 = leg.legval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_legdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = leg.legadd(ci, cj)
quo, rem = leg.legdiv(tgt, ci)
res = leg.legadd(leg.legmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2., 2., 2.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_legval(self):
#check empty input
assert_equal(leg.legval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Llist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = leg.legval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(leg.legval(x, [1]).shape, dims)
assert_equal(leg.legval(x, [1, 0]).shape, dims)
assert_equal(leg.legval(x, [1, 0, 0]).shape, dims)
def test_legval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = leg.legval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.legval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_legval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = leg.legval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.legval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_leggrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = leg.leggrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.leggrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_leggrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = leg.leggrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.leggrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_legint(self):
# check exceptions
assert_raises(ValueError, leg.legint, [0], .5)
assert_raises(ValueError, leg.legint, [0], -1)
assert_raises(ValueError, leg.legint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = leg.legint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i])
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(leg.legval(-1, legint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], scl=2)
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1)
res = leg.legint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k])
res = leg.legint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1)
res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k], scl=2)
res = leg.legint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_legint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([leg.legint(c) for c in c2d.T]).T
res = leg.legint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legint(c) for c in c2d])
res = leg.legint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legint(c, k=3) for c in c2d])
res = leg.legint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_legder(self):
# check exceptions
assert_raises(ValueError, leg.legder, [0], .5)
assert_raises(ValueError, leg.legder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = leg.legder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = leg.legder(leg.legint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_legder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([leg.legder(c) for c in c2d.T]).T
res = leg.legder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legder(c) for c in c2d])
res = leg.legder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_legvander(self):
# check for 1d x
x = np.arange(3)
v = leg.legvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], leg.legval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = leg.legvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], leg.legval(x, coef))
def test_legvander2d(self):
# also tests polyval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = leg.legvander2d(x1, x2, [1, 2])
tgt = leg.legval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = leg.legvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_legvander3d(self):
# also tests polyval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = leg.legvander3d(x1, x2, x3, [1, 2, 3])
tgt = leg.legval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_legfit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, leg.legfit, [1], [1], -1)
assert_raises(TypeError, leg.legfit, [[1]], [1], 0)
assert_raises(TypeError, leg.legfit, [], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0)
assert_raises(TypeError, leg.legfit, [1, 2], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [1, 2], 0)
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = leg.legfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(leg.legval(x, coef3), y)
#
coef4 = leg.legfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(leg.legval(x, coef4), y)
#
coef2d = leg.legfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = leg.legfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(leg.legfit(x, x, 1), [0, 1])
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, leg.legcompanion, [])
assert_raises(ValueError, leg.legcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(leg.legcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(leg.legcompanion([1, 2])[0, 0] == -.5)
class TestGauss(TestCase):
def test_100(self):
x, w = leg.leggauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = leg.legvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = 2.0
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_legfromroots(self):
res = leg.legfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = leg.legfromroots(roots)
res = leg.legval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(leg.leg2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_legroots(self):
assert_almost_equal(leg.legroots([1]), [])
assert_almost_equal(leg.legroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = leg.legroots(leg.legfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_legtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, leg.legtrim, coef, -1)
# Test results
assert_equal(leg.legtrim(coef), coef[:-1])
assert_equal(leg.legtrim(coef, 1), coef[:-3])
assert_equal(leg.legtrim(coef, 2), [0])
def test_legline(self):
assert_equal(leg.legline(3, 4), [3, 4])
def test_leg2poly(self):
for i in range(10):
assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i])
def test_poly2leg(self):
for i in range(10):
assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-1, 1, 11)
tgt = 1.
res = leg.legweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
|
JTarball/docker-django-polymer-starter-kit
|
refs/heads/master
|
docker/app/app/backend/apps/_archive/blog_old__/migrations/0002_blog_fixture.py
|
8
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
from django.core.management import call_command
call_command("loaddata", "blog_fixtures.json")
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'accounts.accountsuser': {
'Meta': {'object_name': 'AccountsUser'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_subscribed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'blog.category': {
'Meta': {'ordering': "['title']", 'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'blog.post': {
'Meta': {'ordering': "['-created_at', 'title']", 'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': u"orm['accounts.AccountsUser']"}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['blog.Category']", 'null': 'True', 'blank': 'True'}),
'changelog': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['blog.Update']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'content_markdown': ('django.db.models.fields.TextField', [], {}),
'content_markup': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'blog.update': {
'Meta': {'object_name': 'Update'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blog']
symmetrical = True
|
AnimeshSinha1309/Website-Edunet
|
refs/heads/master
|
WebsiteEdunet/env/Lib/site-packages/pip/_vendor/requests/packages/chardet/euctwprober.py
|
2993
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
|
bob123bob/Sick-Beard
|
refs/heads/development
|
lib/subliminal/language.py
|
23
|
# -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from .utils import to_unicode
import re
import logging
logger = logging.getLogger("subliminal")
COUNTRIES = [('AF', 'AFG', '004', u'Afghanistan'),
('AX', 'ALA', '248', u'Åland Islands'),
('AL', 'ALB', '008', u'Albania'),
('DZ', 'DZA', '012', u'Algeria'),
('AS', 'ASM', '016', u'American Samoa'),
('AD', 'AND', '020', u'Andorra'),
('AO', 'AGO', '024', u'Angola'),
('AI', 'AIA', '660', u'Anguilla'),
('AQ', 'ATA', '010', u'Antarctica'),
('AG', 'ATG', '028', u'Antigua and Barbuda'),
('AR', 'ARG', '032', u'Argentina'),
('AM', 'ARM', '051', u'Armenia'),
('AW', 'ABW', '533', u'Aruba'),
('AU', 'AUS', '036', u'Australia'),
('AT', 'AUT', '040', u'Austria'),
('AZ', 'AZE', '031', u'Azerbaijan'),
('BS', 'BHS', '044', u'Bahamas'),
('BH', 'BHR', '048', u'Bahrain'),
('BD', 'BGD', '050', u'Bangladesh'),
('BB', 'BRB', '052', u'Barbados'),
('BY', 'BLR', '112', u'Belarus'),
('BE', 'BEL', '056', u'Belgium'),
('BZ', 'BLZ', '084', u'Belize'),
('BJ', 'BEN', '204', u'Benin'),
('BM', 'BMU', '060', u'Bermuda'),
('BT', 'BTN', '064', u'Bhutan'),
('BO', 'BOL', '068', u'Bolivia, Plurinational State of'),
('BQ', 'BES', '535', u'Bonaire, Sint Eustatius and Saba'),
('BA', 'BIH', '070', u'Bosnia and Herzegovina'),
('BW', 'BWA', '072', u'Botswana'),
('BV', 'BVT', '074', u'Bouvet Island'),
('BR', 'BRA', '076', u'Brazil'),
('IO', 'IOT', '086', u'British Indian Ocean Territory'),
('BN', 'BRN', '096', u'Brunei Darussalam'),
('BG', 'BGR', '100', u'Bulgaria'),
('BF', 'BFA', '854', u'Burkina Faso'),
('BI', 'BDI', '108', u'Burundi'),
('KH', 'KHM', '116', u'Cambodia'),
('CM', 'CMR', '120', u'Cameroon'),
('CA', 'CAN', '124', u'Canada'),
('CV', 'CPV', '132', u'Cape Verde'),
('KY', 'CYM', '136', u'Cayman Islands'),
('CF', 'CAF', '140', u'Central African Republic'),
('TD', 'TCD', '148', u'Chad'),
('CL', 'CHL', '152', u'Chile'),
('CN', 'CHN', '156', u'China'),
('CX', 'CXR', '162', u'Christmas Island'),
('CC', 'CCK', '166', u'Cocos (Keeling) Islands'),
('CO', 'COL', '170', u'Colombia'),
('KM', 'COM', '174', u'Comoros'),
('CG', 'COG', '178', u'Congo'),
('CD', 'COD', '180', u'Congo, The Democratic Republic of the'),
('CK', 'COK', '184', u'Cook Islands'),
('CR', 'CRI', '188', u'Costa Rica'),
('CI', 'CIV', '384', u'Côte d\'Ivoire'),
('HR', 'HRV', '191', u'Croatia'),
('CU', 'CUB', '192', u'Cuba'),
('CW', 'CUW', '531', u'Curaçao'),
('CY', 'CYP', '196', u'Cyprus'),
('CZ', 'CZE', '203', u'Czech Republic'),
('DK', 'DNK', '208', u'Denmark'),
('DJ', 'DJI', '262', u'Djibouti'),
('DM', 'DMA', '212', u'Dominica'),
('DO', 'DOM', '214', u'Dominican Republic'),
('EC', 'ECU', '218', u'Ecuador'),
('EG', 'EGY', '818', u'Egypt'),
('SV', 'SLV', '222', u'El Salvador'),
('GQ', 'GNQ', '226', u'Equatorial Guinea'),
('ER', 'ERI', '232', u'Eritrea'),
('EE', 'EST', '233', u'Estonia'),
('ET', 'ETH', '231', u'Ethiopia'),
('FK', 'FLK', '238', u'Falkland Islands (Malvinas)'),
('FO', 'FRO', '234', u'Faroe Islands'),
('FJ', 'FJI', '242', u'Fiji'),
('FI', 'FIN', '246', u'Finland'),
('FR', 'FRA', '250', u'France'),
('GF', 'GUF', '254', u'French Guiana'),
('PF', 'PYF', '258', u'French Polynesia'),
('TF', 'ATF', '260', u'French Southern Territories'),
('GA', 'GAB', '266', u'Gabon'),
('GM', 'GMB', '270', u'Gambia'),
('GE', 'GEO', '268', u'Georgia'),
('DE', 'DEU', '276', u'Germany'),
('GH', 'GHA', '288', u'Ghana'),
('GI', 'GIB', '292', u'Gibraltar'),
('GR', 'GRC', '300', u'Greece'),
('GL', 'GRL', '304', u'Greenland'),
('GD', 'GRD', '308', u'Grenada'),
('GP', 'GLP', '312', u'Guadeloupe'),
('GU', 'GUM', '316', u'Guam'),
('GT', 'GTM', '320', u'Guatemala'),
('GG', 'GGY', '831', u'Guernsey'),
('GN', 'GIN', '324', u'Guinea'),
('GW', 'GNB', '624', u'Guinea-Bissau'),
('GY', 'GUY', '328', u'Guyana'),
('HT', 'HTI', '332', u'Haiti'),
('HM', 'HMD', '334', u'Heard Island and McDonald Islands'),
('VA', 'VAT', '336', u'Holy See (Vatican City State)'),
('HN', 'HND', '340', u'Honduras'),
('HK', 'HKG', '344', u'Hong Kong'),
('HU', 'HUN', '348', u'Hungary'),
('IS', 'ISL', '352', u'Iceland'),
('IN', 'IND', '356', u'India'),
('ID', 'IDN', '360', u'Indonesia'),
('IR', 'IRN', '364', u'Iran, Islamic Republic of'),
('IQ', 'IRQ', '368', u'Iraq'),
('IE', 'IRL', '372', u'Ireland'),
('IM', 'IMN', '833', u'Isle of Man'),
('IL', 'ISR', '376', u'Israel'),
('IT', 'ITA', '380', u'Italy'),
('JM', 'JAM', '388', u'Jamaica'),
('JP', 'JPN', '392', u'Japan'),
('JE', 'JEY', '832', u'Jersey'),
('JO', 'JOR', '400', u'Jordan'),
('KZ', 'KAZ', '398', u'Kazakhstan'),
('KE', 'KEN', '404', u'Kenya'),
('KI', 'KIR', '296', u'Kiribati'),
('KP', 'PRK', '408', u'Korea, Democratic People\'s Republic of'),
('KR', 'KOR', '410', u'Korea, Republic of'),
('KW', 'KWT', '414', u'Kuwait'),
('KG', 'KGZ', '417', u'Kyrgyzstan'),
('LA', 'LAO', '418', u'Lao People\'s Democratic Republic'),
('LV', 'LVA', '428', u'Latvia'),
('LB', 'LBN', '422', u'Lebanon'),
('LS', 'LSO', '426', u'Lesotho'),
('LR', 'LBR', '430', u'Liberia'),
('LY', 'LBY', '434', u'Libya'),
('LI', 'LIE', '438', u'Liechtenstein'),
('LT', 'LTU', '440', u'Lithuania'),
('LU', 'LUX', '442', u'Luxembourg'),
('MO', 'MAC', '446', u'Macao'),
('MK', 'MKD', '807', u'Macedonia, Republic of'),
('MG', 'MDG', '450', u'Madagascar'),
('MW', 'MWI', '454', u'Malawi'),
('MY', 'MYS', '458', u'Malaysia'),
('MV', 'MDV', '462', u'Maldives'),
('ML', 'MLI', '466', u'Mali'),
('MT', 'MLT', '470', u'Malta'),
('MH', 'MHL', '584', u'Marshall Islands'),
('MQ', 'MTQ', '474', u'Martinique'),
('MR', 'MRT', '478', u'Mauritania'),
('MU', 'MUS', '480', u'Mauritius'),
('YT', 'MYT', '175', u'Mayotte'),
('MX', 'MEX', '484', u'Mexico'),
('FM', 'FSM', '583', u'Micronesia, Federated States of'),
('MD', 'MDA', '498', u'Moldova, Republic of'),
('MC', 'MCO', '492', u'Monaco'),
('MN', 'MNG', '496', u'Mongolia'),
('ME', 'MNE', '499', u'Montenegro'),
('MS', 'MSR', '500', u'Montserrat'),
('MA', 'MAR', '504', u'Morocco'),
('MZ', 'MOZ', '508', u'Mozambique'),
('MM', 'MMR', '104', u'Myanmar'),
('NA', 'NAM', '516', u'Namibia'),
('NR', 'NRU', '520', u'Nauru'),
('NP', 'NPL', '524', u'Nepal'),
('NL', 'NLD', '528', u'Netherlands'),
('NC', 'NCL', '540', u'New Caledonia'),
('NZ', 'NZL', '554', u'New Zealand'),
('NI', 'NIC', '558', u'Nicaragua'),
('NE', 'NER', '562', u'Niger'),
('NG', 'NGA', '566', u'Nigeria'),
('NU', 'NIU', '570', u'Niue'),
('NF', 'NFK', '574', u'Norfolk Island'),
('MP', 'MNP', '580', u'Northern Mariana Islands'),
('NO', 'NOR', '578', u'Norway'),
('OM', 'OMN', '512', u'Oman'),
('PK', 'PAK', '586', u'Pakistan'),
('PW', 'PLW', '585', u'Palau'),
('PS', 'PSE', '275', u'Palestinian Territory, Occupied'),
('PA', 'PAN', '591', u'Panama'),
('PG', 'PNG', '598', u'Papua New Guinea'),
('PY', 'PRY', '600', u'Paraguay'),
('PE', 'PER', '604', u'Peru'),
('PH', 'PHL', '608', u'Philippines'),
('PN', 'PCN', '612', u'Pitcairn'),
('PL', 'POL', '616', u'Poland'),
('PT', 'PRT', '620', u'Portugal'),
('PR', 'PRI', '630', u'Puerto Rico'),
('QA', 'QAT', '634', u'Qatar'),
('RE', 'REU', '638', u'Réunion'),
('RO', 'ROU', '642', u'Romania'),
('RU', 'RUS', '643', u'Russian Federation'),
('RW', 'RWA', '646', u'Rwanda'),
('BL', 'BLM', '652', u'Saint Barthélemy'),
('SH', 'SHN', '654', u'Saint Helena, Ascension and Tristan da Cunha'),
('KN', 'KNA', '659', u'Saint Kitts and Nevis'),
('LC', 'LCA', '662', u'Saint Lucia'),
('MF', 'MAF', '663', u'Saint Martin (French part)'),
('PM', 'SPM', '666', u'Saint Pierre and Miquelon'),
('VC', 'VCT', '670', u'Saint Vincent and the Grenadines'),
('WS', 'WSM', '882', u'Samoa'),
('SM', 'SMR', '674', u'San Marino'),
('ST', 'STP', '678', u'Sao Tome and Principe'),
('SA', 'SAU', '682', u'Saudi Arabia'),
('SN', 'SEN', '686', u'Senegal'),
('RS', 'SRB', '688', u'Serbia'),
('SC', 'SYC', '690', u'Seychelles'),
('SL', 'SLE', '694', u'Sierra Leone'),
('SG', 'SGP', '702', u'Singapore'),
('SX', 'SXM', '534', u'Sint Maarten (Dutch part)'),
('SK', 'SVK', '703', u'Slovakia'),
('SI', 'SVN', '705', u'Slovenia'),
('SB', 'SLB', '090', u'Solomon Islands'),
('SO', 'SOM', '706', u'Somalia'),
('ZA', 'ZAF', '710', u'South Africa'),
('GS', 'SGS', '239', u'South Georgia and the South Sandwich Islands'),
('ES', 'ESP', '724', u'Spain'),
('LK', 'LKA', '144', u'Sri Lanka'),
('SD', 'SDN', '729', u'Sudan'),
('SR', 'SUR', '740', u'Suriname'),
('SS', 'SSD', '728', u'South Sudan'),
('SJ', 'SJM', '744', u'Svalbard and Jan Mayen'),
('SZ', 'SWZ', '748', u'Swaziland'),
('SE', 'SWE', '752', u'Sweden'),
('CH', 'CHE', '756', u'Switzerland'),
('SY', 'SYR', '760', u'Syrian Arab Republic'),
('TW', 'TWN', '158', u'Taiwan, Province of China'),
('TJ', 'TJK', '762', u'Tajikistan'),
('TZ', 'TZA', '834', u'Tanzania, United Republic of'),
('TH', 'THA', '764', u'Thailand'),
('TL', 'TLS', '626', u'Timor-Leste'),
('TG', 'TGO', '768', u'Togo'),
('TK', 'TKL', '772', u'Tokelau'),
('TO', 'TON', '776', u'Tonga'),
('TT', 'TTO', '780', u'Trinidad and Tobago'),
('TN', 'TUN', '788', u'Tunisia'),
('TR', 'TUR', '792', u'Turkey'),
('TM', 'TKM', '795', u'Turkmenistan'),
('TC', 'TCA', '796', u'Turks and Caicos Islands'),
('TV', 'TUV', '798', u'Tuvalu'),
('UG', 'UGA', '800', u'Uganda'),
('UA', 'UKR', '804', u'Ukraine'),
('AE', 'ARE', '784', u'United Arab Emirates'),
('GB', 'GBR', '826', u'United Kingdom'),
('US', 'USA', '840', u'United States'),
('UM', 'UMI', '581', u'United States Minor Outlying Islands'),
('UY', 'URY', '858', u'Uruguay'),
('UZ', 'UZB', '860', u'Uzbekistan'),
('VU', 'VUT', '548', u'Vanuatu'),
('VE', 'VEN', '862', u'Venezuela, Bolivarian Republic of'),
('VN', 'VNM', '704', u'Viet Nam'),
('VG', 'VGB', '092', u'Virgin Islands, British'),
('VI', 'VIR', '850', u'Virgin Islands, U.S.'),
('WF', 'WLF', '876', u'Wallis and Futuna'),
('EH', 'ESH', '732', u'Western Sahara'),
('YE', 'YEM', '887', u'Yemen'),
('ZM', 'ZMB', '894', u'Zambia'),
('ZW', 'ZWE', '716', u'Zimbabwe')]
LANGUAGES = [('aar', '', 'aa', u'Afar', u'afar'),
('abk', '', 'ab', u'Abkhazian', u'abkhaze'),
('ace', '', '', u'Achinese', u'aceh'),
('ach', '', '', u'Acoli', u'acoli'),
('ada', '', '', u'Adangme', u'adangme'),
('ady', '', '', u'Adyghe; Adygei', u'adyghé'),
('afa', '', '', u'Afro-Asiatic languages', u'afro-asiatiques, langues'),
('afh', '', '', u'Afrihili', u'afrihili'),
('afr', '', 'af', u'Afrikaans', u'afrikaans'),
('ain', '', '', u'Ainu', u'aïnou'),
('aka', '', 'ak', u'Akan', u'akan'),
('akk', '', '', u'Akkadian', u'akkadien'),
('alb', 'sqi', 'sq', u'Albanian', u'albanais'),
('ale', '', '', u'Aleut', u'aléoute'),
('alg', '', '', u'Algonquian languages', u'algonquines, langues'),
('alt', '', '', u'Southern Altai', u'altai du Sud'),
('amh', '', 'am', u'Amharic', u'amharique'),
('ang', '', '', u'English, Old (ca.450-1100)', u'anglo-saxon (ca.450-1100)'),
('anp', '', '', u'Angika', u'angika'),
('apa', '', '', u'Apache languages', u'apaches, langues'),
('ara', '', 'ar', u'Arabic', u'arabe'),
('arc', '', '', u'Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)', u'araméen d\'empire (700-300 BCE)'),
('arg', '', 'an', u'Aragonese', u'aragonais'),
('arm', 'hye', 'hy', u'Armenian', u'arménien'),
('arn', '', '', u'Mapudungun; Mapuche', u'mapudungun; mapuche; mapuce'),
('arp', '', '', u'Arapaho', u'arapaho'),
('art', '', '', u'Artificial languages', u'artificielles, langues'),
('arw', '', '', u'Arawak', u'arawak'),
('asm', '', 'as', u'Assamese', u'assamais'),
('ast', '', '', u'Asturian; Bable; Leonese; Asturleonese', u'asturien; bable; léonais; asturoléonais'),
('ath', '', '', u'Athapascan languages', u'athapascanes, langues'),
('aus', '', '', u'Australian languages', u'australiennes, langues'),
('ava', '', 'av', u'Avaric', u'avar'),
('ave', '', 'ae', u'Avestan', u'avestique'),
('awa', '', '', u'Awadhi', u'awadhi'),
('aym', '', 'ay', u'Aymara', u'aymara'),
('aze', '', 'az', u'Azerbaijani', u'azéri'),
('bad', '', '', u'Banda languages', u'banda, langues'),
('bai', '', '', u'Bamileke languages', u'bamiléké, langues'),
('bak', '', 'ba', u'Bashkir', u'bachkir'),
('bal', '', '', u'Baluchi', u'baloutchi'),
('bam', '', 'bm', u'Bambara', u'bambara'),
('ban', '', '', u'Balinese', u'balinais'),
('baq', 'eus', 'eu', u'Basque', u'basque'),
('bas', '', '', u'Basa', u'basa'),
('bat', '', '', u'Baltic languages', u'baltes, langues'),
('bej', '', '', u'Beja; Bedawiyet', u'bedja'),
('bel', '', 'be', u'Belarusian', u'biélorusse'),
('bem', '', '', u'Bemba', u'bemba'),
('ben', '', 'bn', u'Bengali', u'bengali'),
('ber', '', '', u'Berber languages', u'berbères, langues'),
('bho', '', '', u'Bhojpuri', u'bhojpuri'),
('bih', '', 'bh', u'Bihari languages', u'langues biharis'),
('bik', '', '', u'Bikol', u'bikol'),
('bin', '', '', u'Bini; Edo', u'bini; edo'),
('bis', '', 'bi', u'Bislama', u'bichlamar'),
('bla', '', '', u'Siksika', u'blackfoot'),
('bnt', '', '', u'Bantu (Other)', u'bantoues, autres langues'),
('bos', '', 'bs', u'Bosnian', u'bosniaque'),
('bra', '', '', u'Braj', u'braj'),
('bre', '', 'br', u'Breton', u'breton'),
('btk', '', '', u'Batak languages', u'batak, langues'),
('bua', '', '', u'Buriat', u'bouriate'),
('bug', '', '', u'Buginese', u'bugi'),
('bul', '', 'bg', u'Bulgarian', u'bulgare'),
('bur', 'mya', 'my', u'Burmese', u'birman'),
('byn', '', '', u'Blin; Bilin', u'blin; bilen'),
('cad', '', '', u'Caddo', u'caddo'),
('cai', '', '', u'Central American Indian languages', u'amérindiennes de L\'Amérique centrale, langues'),
('car', '', '', u'Galibi Carib', u'karib; galibi; carib'),
('cat', '', 'ca', u'Catalan; Valencian', u'catalan; valencien'),
('cau', '', '', u'Caucasian languages', u'caucasiennes, langues'),
('ceb', '', '', u'Cebuano', u'cebuano'),
('cel', '', '', u'Celtic languages', u'celtiques, langues; celtes, langues'),
('cha', '', 'ch', u'Chamorro', u'chamorro'),
('chb', '', '', u'Chibcha', u'chibcha'),
('che', '', 'ce', u'Chechen', u'tchétchène'),
('chg', '', '', u'Chagatai', u'djaghataï'),
('chi', 'zho', 'zh', u'Chinese', u'chinois'),
('chk', '', '', u'Chuukese', u'chuuk'),
('chm', '', '', u'Mari', u'mari'),
('chn', '', '', u'Chinook jargon', u'chinook, jargon'),
('cho', '', '', u'Choctaw', u'choctaw'),
('chp', '', '', u'Chipewyan; Dene Suline', u'chipewyan'),
('chr', '', '', u'Cherokee', u'cherokee'),
('chu', '', 'cu', u'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic', u'slavon d\'église; vieux slave; slavon liturgique; vieux bulgare'),
('chv', '', 'cv', u'Chuvash', u'tchouvache'),
('chy', '', '', u'Cheyenne', u'cheyenne'),
('cmc', '', '', u'Chamic languages', u'chames, langues'),
('cop', '', '', u'Coptic', u'copte'),
('cor', '', 'kw', u'Cornish', u'cornique'),
('cos', '', 'co', u'Corsican', u'corse'),
('cpe', '', '', u'Creoles and pidgins, English based', u'créoles et pidgins basés sur l\'anglais'),
('cpf', '', '', u'Creoles and pidgins, French-based ', u'créoles et pidgins basés sur le français'),
('cpp', '', '', u'Creoles and pidgins, Portuguese-based ', u'créoles et pidgins basés sur le portugais'),
('cre', '', 'cr', u'Cree', u'cree'),
('crh', '', '', u'Crimean Tatar; Crimean Turkish', u'tatar de Crimé'),
('crp', '', '', u'Creoles and pidgins ', u'créoles et pidgins'),
('csb', '', '', u'Kashubian', u'kachoube'),
('cus', '', '', u'Cushitic languages', u'couchitiques, langues'),
('cze', 'ces', 'cs', u'Czech', u'tchèque'),
('dak', '', '', u'Dakota', u'dakota'),
('dan', '', 'da', u'Danish', u'danois'),
('dar', '', '', u'Dargwa', u'dargwa'),
('day', '', '', u'Land Dayak languages', u'dayak, langues'),
('del', '', '', u'Delaware', u'delaware'),
('den', '', '', u'Slave (Athapascan)', u'esclave (athapascan)'),
('dgr', '', '', u'Dogrib', u'dogrib'),
('din', '', '', u'Dinka', u'dinka'),
('div', '', 'dv', u'Divehi; Dhivehi; Maldivian', u'maldivien'),
('doi', '', '', u'Dogri', u'dogri'),
('dra', '', '', u'Dravidian languages', u'dravidiennes, langues'),
('dsb', '', '', u'Lower Sorbian', u'bas-sorabe'),
('dua', '', '', u'Duala', u'douala'),
('dum', '', '', u'Dutch, Middle (ca.1050-1350)', u'néerlandais moyen (ca. 1050-1350)'),
('dut', 'nld', 'nl', u'Dutch; Flemish', u'néerlandais; flamand'),
('dyu', '', '', u'Dyula', u'dioula'),
('dzo', '', 'dz', u'Dzongkha', u'dzongkha'),
('efi', '', '', u'Efik', u'efik'),
('egy', '', '', u'Egyptian (Ancient)', u'égyptien'),
('eka', '', '', u'Ekajuk', u'ekajuk'),
('elx', '', '', u'Elamite', u'élamite'),
('eng', '', 'en', u'English', u'anglais'),
('enm', '', '', u'English, Middle (1100-1500)', u'anglais moyen (1100-1500)'),
('epo', '', 'eo', u'Esperanto', u'espéranto'),
('est', '', 'et', u'Estonian', u'estonien'),
('ewe', '', 'ee', u'Ewe', u'éwé'),
('ewo', '', '', u'Ewondo', u'éwondo'),
('fan', '', '', u'Fang', u'fang'),
('fao', '', 'fo', u'Faroese', u'féroïen'),
('fat', '', '', u'Fanti', u'fanti'),
('fij', '', 'fj', u'Fijian', u'fidjien'),
('fil', '', '', u'Filipino; Pilipino', u'filipino; pilipino'),
('fin', '', 'fi', u'Finnish', u'finnois'),
('fiu', '', '', u'Finno-Ugrian languages', u'finno-ougriennes, langues'),
('fon', '', '', u'Fon', u'fon'),
('fre', 'fra', 'fr', u'French', u'français'),
('frm', '', '', u'French, Middle (ca.1400-1600)', u'français moyen (1400-1600)'),
('fro', '', '', u'French, Old (842-ca.1400)', u'français ancien (842-ca.1400)'),
('frr', '', '', u'Northern Frisian', u'frison septentrional'),
('frs', '', '', u'Eastern Frisian', u'frison oriental'),
('fry', '', 'fy', u'Western Frisian', u'frison occidental'),
('ful', '', 'ff', u'Fulah', u'peul'),
('fur', '', '', u'Friulian', u'frioulan'),
('gaa', '', '', u'Ga', u'ga'),
('gay', '', '', u'Gayo', u'gayo'),
('gba', '', '', u'Gbaya', u'gbaya'),
('gem', '', '', u'Germanic languages', u'germaniques, langues'),
('geo', 'kat', 'ka', u'Georgian', u'géorgien'),
('ger', 'deu', 'de', u'German', u'allemand'),
('gez', '', '', u'Geez', u'guèze'),
('gil', '', '', u'Gilbertese', u'kiribati'),
('gla', '', 'gd', u'Gaelic; Scottish Gaelic', u'gaélique; gaélique écossais'),
('gle', '', 'ga', u'Irish', u'irlandais'),
('glg', '', 'gl', u'Galician', u'galicien'),
('glv', '', 'gv', u'Manx', u'manx; mannois'),
('gmh', '', '', u'German, Middle High (ca.1050-1500)', u'allemand, moyen haut (ca. 1050-1500)'),
('goh', '', '', u'German, Old High (ca.750-1050)', u'allemand, vieux haut (ca. 750-1050)'),
('gon', '', '', u'Gondi', u'gond'),
('gor', '', '', u'Gorontalo', u'gorontalo'),
('got', '', '', u'Gothic', u'gothique'),
('grb', '', '', u'Grebo', u'grebo'),
('grc', '', '', u'Greek, Ancient (to 1453)', u'grec ancien (jusqu\'à 1453)'),
('gre', 'ell', 'el', u'Greek, Modern (1453-)', u'grec moderne (après 1453)'),
('grn', '', 'gn', u'Guarani', u'guarani'),
('gsw', '', '', u'Swiss German; Alemannic; Alsatian', u'suisse alémanique; alémanique; alsacien'),
('guj', '', 'gu', u'Gujarati', u'goudjrati'),
('gwi', '', '', u'Gwich\'in', u'gwich\'in'),
('hai', '', '', u'Haida', u'haida'),
('hat', '', 'ht', u'Haitian; Haitian Creole', u'haïtien; créole haïtien'),
('hau', '', 'ha', u'Hausa', u'haoussa'),
('haw', '', '', u'Hawaiian', u'hawaïen'),
('heb', '', 'he', u'Hebrew', u'hébreu'),
('her', '', 'hz', u'Herero', u'herero'),
('hil', '', '', u'Hiligaynon', u'hiligaynon'),
('him', '', '', u'Himachali languages; Western Pahari languages', u'langues himachalis; langues paharis occidentales'),
('hin', '', 'hi', u'Hindi', u'hindi'),
('hit', '', '', u'Hittite', u'hittite'),
('hmn', '', '', u'Hmong; Mong', u'hmong'),
('hmo', '', 'ho', u'Hiri Motu', u'hiri motu'),
('hrv', '', 'hr', u'Croatian', u'croate'),
('hsb', '', '', u'Upper Sorbian', u'haut-sorabe'),
('hun', '', 'hu', u'Hungarian', u'hongrois'),
('hup', '', '', u'Hupa', u'hupa'),
('iba', '', '', u'Iban', u'iban'),
('ibo', '', 'ig', u'Igbo', u'igbo'),
('ice', 'isl', 'is', u'Icelandic', u'islandais'),
('ido', '', 'io', u'Ido', u'ido'),
('iii', '', 'ii', u'Sichuan Yi; Nuosu', u'yi de Sichuan'),
('ijo', '', '', u'Ijo languages', u'ijo, langues'),
('iku', '', 'iu', u'Inuktitut', u'inuktitut'),
('ile', '', 'ie', u'Interlingue; Occidental', u'interlingue'),
('ilo', '', '', u'Iloko', u'ilocano'),
('ina', '', 'ia', u'Interlingua (International Auxiliary Language Association)', u'interlingua (langue auxiliaire internationale)'),
('inc', '', '', u'Indic languages', u'indo-aryennes, langues'),
('ind', '', 'id', u'Indonesian', u'indonésien'),
('ine', '', '', u'Indo-European languages', u'indo-européennes, langues'),
('inh', '', '', u'Ingush', u'ingouche'),
('ipk', '', 'ik', u'Inupiaq', u'inupiaq'),
('ira', '', '', u'Iranian languages', u'iraniennes, langues'),
('iro', '', '', u'Iroquoian languages', u'iroquoises, langues'),
('ita', '', 'it', u'Italian', u'italien'),
('jav', '', 'jv', u'Javanese', u'javanais'),
('jbo', '', '', u'Lojban', u'lojban'),
('jpn', '', 'ja', u'Japanese', u'japonais'),
('jpr', '', '', u'Judeo-Persian', u'judéo-persan'),
('jrb', '', '', u'Judeo-Arabic', u'judéo-arabe'),
('kaa', '', '', u'Kara-Kalpak', u'karakalpak'),
('kab', '', '', u'Kabyle', u'kabyle'),
('kac', '', '', u'Kachin; Jingpho', u'kachin; jingpho'),
('kal', '', 'kl', u'Kalaallisut; Greenlandic', u'groenlandais'),
('kam', '', '', u'Kamba', u'kamba'),
('kan', '', 'kn', u'Kannada', u'kannada'),
('kar', '', '', u'Karen languages', u'karen, langues'),
('kas', '', 'ks', u'Kashmiri', u'kashmiri'),
('kau', '', 'kr', u'Kanuri', u'kanouri'),
('kaw', '', '', u'Kawi', u'kawi'),
('kaz', '', 'kk', u'Kazakh', u'kazakh'),
('kbd', '', '', u'Kabardian', u'kabardien'),
('kha', '', '', u'Khasi', u'khasi'),
('khi', '', '', u'Khoisan languages', u'khoïsan, langues'),
('khm', '', 'km', u'Central Khmer', u'khmer central'),
('kho', '', '', u'Khotanese; Sakan', u'khotanais; sakan'),
('kik', '', 'ki', u'Kikuyu; Gikuyu', u'kikuyu'),
('kin', '', 'rw', u'Kinyarwanda', u'rwanda'),
('kir', '', 'ky', u'Kirghiz; Kyrgyz', u'kirghiz'),
('kmb', '', '', u'Kimbundu', u'kimbundu'),
('kok', '', '', u'Konkani', u'konkani'),
('kom', '', 'kv', u'Komi', u'kom'),
('kon', '', 'kg', u'Kongo', u'kongo'),
('kor', '', 'ko', u'Korean', u'coréen'),
('kos', '', '', u'Kosraean', u'kosrae'),
('kpe', '', '', u'Kpelle', u'kpellé'),
('krc', '', '', u'Karachay-Balkar', u'karatchai balkar'),
('krl', '', '', u'Karelian', u'carélien'),
('kro', '', '', u'Kru languages', u'krou, langues'),
('kru', '', '', u'Kurukh', u'kurukh'),
('kua', '', 'kj', u'Kuanyama; Kwanyama', u'kuanyama; kwanyama'),
('kum', '', '', u'Kumyk', u'koumyk'),
('kur', '', 'ku', u'Kurdish', u'kurde'),
('kut', '', '', u'Kutenai', u'kutenai'),
('lad', '', '', u'Ladino', u'judéo-espagnol'),
('lah', '', '', u'Lahnda', u'lahnda'),
('lam', '', '', u'Lamba', u'lamba'),
('lao', '', 'lo', u'Lao', u'lao'),
('lat', '', 'la', u'Latin', u'latin'),
('lav', '', 'lv', u'Latvian', u'letton'),
('lez', '', '', u'Lezghian', u'lezghien'),
('lim', '', 'li', u'Limburgan; Limburger; Limburgish', u'limbourgeois'),
('lin', '', 'ln', u'Lingala', u'lingala'),
('lit', '', 'lt', u'Lithuanian', u'lituanien'),
('lol', '', '', u'Mongo', u'mongo'),
('loz', '', '', u'Lozi', u'lozi'),
('ltz', '', 'lb', u'Luxembourgish; Letzeburgesch', u'luxembourgeois'),
('lua', '', '', u'Luba-Lulua', u'luba-lulua'),
('lub', '', 'lu', u'Luba-Katanga', u'luba-katanga'),
('lug', '', 'lg', u'Ganda', u'ganda'),
('lui', '', '', u'Luiseno', u'luiseno'),
('lun', '', '', u'Lunda', u'lunda'),
('luo', '', '', u'Luo (Kenya and Tanzania)', u'luo (Kenya et Tanzanie)'),
('lus', '', '', u'Lushai', u'lushai'),
('mac', 'mkd', 'mk', u'Macedonian', u'macédonien'),
('mad', '', '', u'Madurese', u'madourais'),
('mag', '', '', u'Magahi', u'magahi'),
('mah', '', 'mh', u'Marshallese', u'marshall'),
('mai', '', '', u'Maithili', u'maithili'),
('mak', '', '', u'Makasar', u'makassar'),
('mal', '', 'ml', u'Malayalam', u'malayalam'),
('man', '', '', u'Mandingo', u'mandingue'),
('mao', 'mri', 'mi', u'Maori', u'maori'),
('map', '', '', u'Austronesian languages', u'austronésiennes, langues'),
('mar', '', 'mr', u'Marathi', u'marathe'),
('mas', '', '', u'Masai', u'massaï'),
('may', 'msa', 'ms', u'Malay', u'malais'),
('mdf', '', '', u'Moksha', u'moksa'),
('mdr', '', '', u'Mandar', u'mandar'),
('men', '', '', u'Mende', u'mendé'),
('mga', '', '', u'Irish, Middle (900-1200)', u'irlandais moyen (900-1200)'),
('mic', '', '', u'Mi\'kmaq; Micmac', u'mi\'kmaq; micmac'),
('min', '', '', u'Minangkabau', u'minangkabau'),
('mkh', '', '', u'Mon-Khmer languages', u'môn-khmer, langues'),
('mlg', '', 'mg', u'Malagasy', u'malgache'),
('mlt', '', 'mt', u'Maltese', u'maltais'),
('mnc', '', '', u'Manchu', u'mandchou'),
('mni', '', '', u'Manipuri', u'manipuri'),
('mno', '', '', u'Manobo languages', u'manobo, langues'),
('moh', '', '', u'Mohawk', u'mohawk'),
('mon', '', 'mn', u'Mongolian', u'mongol'),
('mos', '', '', u'Mossi', u'moré'),
('mun', '', '', u'Munda languages', u'mounda, langues'),
('mus', '', '', u'Creek', u'muskogee'),
('mwl', '', '', u'Mirandese', u'mirandais'),
('mwr', '', '', u'Marwari', u'marvari'),
('myn', '', '', u'Mayan languages', u'maya, langues'),
('myv', '', '', u'Erzya', u'erza'),
('nah', '', '', u'Nahuatl languages', u'nahuatl, langues'),
('nai', '', '', u'North American Indian languages', u'nord-amérindiennes, langues'),
('nap', '', '', u'Neapolitan', u'napolitain'),
('nau', '', 'na', u'Nauru', u'nauruan'),
('nav', '', 'nv', u'Navajo; Navaho', u'navaho'),
('nbl', '', 'nr', u'Ndebele, South; South Ndebele', u'ndébélé du Sud'),
('nde', '', 'nd', u'Ndebele, North; North Ndebele', u'ndébélé du Nord'),
('ndo', '', 'ng', u'Ndonga', u'ndonga'),
('nds', '', '', u'Low German; Low Saxon; German, Low; Saxon, Low', u'bas allemand; bas saxon; allemand, bas; saxon, bas'),
('nep', '', 'ne', u'Nepali', u'népalais'),
('new', '', '', u'Nepal Bhasa; Newari', u'nepal bhasa; newari'),
('nia', '', '', u'Nias', u'nias'),
('nic', '', '', u'Niger-Kordofanian languages', u'nigéro-kordofaniennes, langues'),
('niu', '', '', u'Niuean', u'niué'),
('nno', '', 'nn', u'Norwegian Nynorsk; Nynorsk, Norwegian', u'norvégien nynorsk; nynorsk, norvégien'),
('nob', '', 'nb', u'Bokmål, Norwegian; Norwegian Bokmål', u'norvégien bokmål'),
('nog', '', '', u'Nogai', u'nogaï; nogay'),
('non', '', '', u'Norse, Old', u'norrois, vieux'),
('nor', '', 'no', u'Norwegian', u'norvégien'),
('nqo', '', '', u'N\'Ko', u'n\'ko'),
('nso', '', '', u'Pedi; Sepedi; Northern Sotho', u'pedi; sepedi; sotho du Nord'),
('nub', '', '', u'Nubian languages', u'nubiennes, langues'),
('nwc', '', '', u'Classical Newari; Old Newari; Classical Nepal Bhasa', u'newari classique'),
('nya', '', 'ny', u'Chichewa; Chewa; Nyanja', u'chichewa; chewa; nyanja'),
('nym', '', '', u'Nyamwezi', u'nyamwezi'),
('nyn', '', '', u'Nyankole', u'nyankolé'),
('nyo', '', '', u'Nyoro', u'nyoro'),
('nzi', '', '', u'Nzima', u'nzema'),
('oci', '', 'oc', u'Occitan (post 1500); Provençal', u'occitan (après 1500); provençal'),
('oji', '', 'oj', u'Ojibwa', u'ojibwa'),
('ori', '', 'or', u'Oriya', u'oriya'),
('orm', '', 'om', u'Oromo', u'galla'),
('osa', '', '', u'Osage', u'osage'),
('oss', '', 'os', u'Ossetian; Ossetic', u'ossète'),
('ota', '', '', u'Turkish, Ottoman (1500-1928)', u'turc ottoman (1500-1928)'),
('oto', '', '', u'Otomian languages', u'otomi, langues'),
('paa', '', '', u'Papuan languages', u'papoues, langues'),
('pag', '', '', u'Pangasinan', u'pangasinan'),
('pal', '', '', u'Pahlavi', u'pahlavi'),
('pam', '', '', u'Pampanga; Kapampangan', u'pampangan'),
('pan', '', 'pa', u'Panjabi; Punjabi', u'pendjabi'),
('pap', '', '', u'Papiamento', u'papiamento'),
('pau', '', '', u'Palauan', u'palau'),
('peo', '', '', u'Persian, Old (ca.600-400 B.C.)', u'perse, vieux (ca. 600-400 av. J.-C.)'),
('per', 'fas', 'fa', u'Persian', u'persan'),
('phi', '', '', u'Philippine languages', u'philippines, langues'),
('phn', '', '', u'Phoenician', u'phénicien'),
('pli', '', 'pi', u'Pali', u'pali'),
('pol', '', 'pl', u'Polish', u'polonais'),
('pon', '', '', u'Pohnpeian', u'pohnpei'),
('pob', '', 'pb', u'Brazilian Portuguese', u'brazilian portuguese'),
('por', '', 'pt', u'Portuguese', u'portugais'),
('pra', '', '', u'Prakrit languages', u'prâkrit, langues'),
('pro', '', '', u'Provençal, Old (to 1500)', u'provençal ancien (jusqu\'à 1500)'),
('pus', '', 'ps', u'Pushto; Pashto', u'pachto'),
('que', '', 'qu', u'Quechua', u'quechua'),
('raj', '', '', u'Rajasthani', u'rajasthani'),
('rap', '', '', u'Rapanui', u'rapanui'),
('rar', '', '', u'Rarotongan; Cook Islands Maori', u'rarotonga; maori des îles Cook'),
('roa', '', '', u'Romance languages', u'romanes, langues'),
('roh', '', 'rm', u'Romansh', u'romanche'),
('rom', '', '', u'Romany', u'tsigane'),
('rum', 'ron', 'ro', u'Romanian; Moldavian; Moldovan', u'roumain; moldave'),
('run', '', 'rn', u'Rundi', u'rundi'),
('rup', '', '', u'Aromanian; Arumanian; Macedo-Romanian', u'aroumain; macédo-roumain'),
('rus', '', 'ru', u'Russian', u'russe'),
('sad', '', '', u'Sandawe', u'sandawe'),
('sag', '', 'sg', u'Sango', u'sango'),
('sah', '', '', u'Yakut', u'iakoute'),
('sai', '', '', u'South American Indian (Other)', u'indiennes d\'Amérique du Sud, autres langues'),
('sal', '', '', u'Salishan languages', u'salishennes, langues'),
('sam', '', '', u'Samaritan Aramaic', u'samaritain'),
('san', '', 'sa', u'Sanskrit', u'sanskrit'),
('sas', '', '', u'Sasak', u'sasak'),
('sat', '', '', u'Santali', u'santal'),
('scn', '', '', u'Sicilian', u'sicilien'),
('sco', '', '', u'Scots', u'écossais'),
('sel', '', '', u'Selkup', u'selkoupe'),
('sem', '', '', u'Semitic languages', u'sémitiques, langues'),
('sga', '', '', u'Irish, Old (to 900)', u'irlandais ancien (jusqu\'à 900)'),
('sgn', '', '', u'Sign Languages', u'langues des signes'),
('shn', '', '', u'Shan', u'chan'),
('sid', '', '', u'Sidamo', u'sidamo'),
('sin', '', 'si', u'Sinhala; Sinhalese', u'singhalais'),
('sio', '', '', u'Siouan languages', u'sioux, langues'),
('sit', '', '', u'Sino-Tibetan languages', u'sino-tibétaines, langues'),
('sla', '', '', u'Slavic languages', u'slaves, langues'),
('slo', 'slk', 'sk', u'Slovak', u'slovaque'),
('slv', '', 'sl', u'Slovenian', u'slovène'),
('sma', '', '', u'Southern Sami', u'sami du Sud'),
('sme', '', 'se', u'Northern Sami', u'sami du Nord'),
('smi', '', '', u'Sami languages', u'sames, langues'),
('smj', '', '', u'Lule Sami', u'sami de Lule'),
('smn', '', '', u'Inari Sami', u'sami d\'Inari'),
('smo', '', 'sm', u'Samoan', u'samoan'),
('sms', '', '', u'Skolt Sami', u'sami skolt'),
('sna', '', 'sn', u'Shona', u'shona'),
('snd', '', 'sd', u'Sindhi', u'sindhi'),
('snk', '', '', u'Soninke', u'soninké'),
('sog', '', '', u'Sogdian', u'sogdien'),
('som', '', 'so', u'Somali', u'somali'),
('son', '', '', u'Songhai languages', u'songhai, langues'),
('sot', '', 'st', u'Sotho, Southern', u'sotho du Sud'),
('spa', '', 'es', u'Spanish; Castilian', u'espagnol; castillan'),
('srd', '', 'sc', u'Sardinian', u'sarde'),
('srn', '', '', u'Sranan Tongo', u'sranan tongo'),
('srp', '', 'sr', u'Serbian', u'serbe'),
('srr', '', '', u'Serer', u'sérère'),
('ssa', '', '', u'Nilo-Saharan languages', u'nilo-sahariennes, langues'),
('ssw', '', 'ss', u'Swati', u'swati'),
('suk', '', '', u'Sukuma', u'sukuma'),
('sun', '', 'su', u'Sundanese', u'soundanais'),
('sus', '', '', u'Susu', u'soussou'),
('sux', '', '', u'Sumerian', u'sumérien'),
('swa', '', 'sw', u'Swahili', u'swahili'),
('swe', '', 'sv', u'Swedish', u'suédois'),
('syc', '', '', u'Classical Syriac', u'syriaque classique'),
('syr', '', '', u'Syriac', u'syriaque'),
('tah', '', 'ty', u'Tahitian', u'tahitien'),
('tai', '', '', u'Tai languages', u'tai, langues'),
('tam', '', 'ta', u'Tamil', u'tamoul'),
('tat', '', 'tt', u'Tatar', u'tatar'),
('tel', '', 'te', u'Telugu', u'télougou'),
('tem', '', '', u'Timne', u'temne'),
('ter', '', '', u'Tereno', u'tereno'),
('tet', '', '', u'Tetum', u'tetum'),
('tgk', '', 'tg', u'Tajik', u'tadjik'),
('tgl', '', 'tl', u'Tagalog', u'tagalog'),
('tha', '', 'th', u'Thai', u'thaï'),
('tib', 'bod', 'bo', u'Tibetan', u'tibétain'),
('tig', '', '', u'Tigre', u'tigré'),
('tir', '', 'ti', u'Tigrinya', u'tigrigna'),
('tiv', '', '', u'Tiv', u'tiv'),
('tkl', '', '', u'Tokelau', u'tokelau'),
('tlh', '', '', u'Klingon; tlhIngan-Hol', u'klingon'),
('tli', '', '', u'Tlingit', u'tlingit'),
('tmh', '', '', u'Tamashek', u'tamacheq'),
('tog', '', '', u'Tonga (Nyasa)', u'tonga (Nyasa)'),
('ton', '', 'to', u'Tonga (Tonga Islands)', u'tongan (Îles Tonga)'),
('tpi', '', '', u'Tok Pisin', u'tok pisin'),
('tsi', '', '', u'Tsimshian', u'tsimshian'),
('tsn', '', 'tn', u'Tswana', u'tswana'),
('tso', '', 'ts', u'Tsonga', u'tsonga'),
('tuk', '', 'tk', u'Turkmen', u'turkmène'),
('tum', '', '', u'Tumbuka', u'tumbuka'),
('tup', '', '', u'Tupi languages', u'tupi, langues'),
('tur', '', 'tr', u'Turkish', u'turc'),
('tut', '', '', u'Altaic languages', u'altaïques, langues'),
('tvl', '', '', u'Tuvalu', u'tuvalu'),
('twi', '', 'tw', u'Twi', u'twi'),
('tyv', '', '', u'Tuvinian', u'touva'),
('udm', '', '', u'Udmurt', u'oudmourte'),
('uga', '', '', u'Ugaritic', u'ougaritique'),
('uig', '', 'ug', u'Uighur; Uyghur', u'ouïgour'),
('ukr', '', 'uk', u'Ukrainian', u'ukrainien'),
('umb', '', '', u'Umbundu', u'umbundu'),
('und', '', '', u'Undetermined', u'indéterminée'),
('urd', '', 'ur', u'Urdu', u'ourdou'),
('uzb', '', 'uz', u'Uzbek', u'ouszbek'),
('vai', '', '', u'Vai', u'vaï'),
('ven', '', 've', u'Venda', u'venda'),
('vie', '', 'vi', u'Vietnamese', u'vietnamien'),
('vol', '', 'vo', u'Volapük', u'volapük'),
('vot', '', '', u'Votic', u'vote'),
('wak', '', '', u'Wakashan languages', u'wakashanes, langues'),
('wal', '', '', u'Walamo', u'walamo'),
('war', '', '', u'Waray', u'waray'),
('was', '', '', u'Washo', u'washo'),
('wel', 'cym', 'cy', u'Welsh', u'gallois'),
('wen', '', '', u'Sorbian languages', u'sorabes, langues'),
('wln', '', 'wa', u'Walloon', u'wallon'),
('wol', '', 'wo', u'Wolof', u'wolof'),
('xal', '', '', u'Kalmyk; Oirat', u'kalmouk; oïrat'),
('xho', '', 'xh', u'Xhosa', u'xhosa'),
('yao', '', '', u'Yao', u'yao'),
('yap', '', '', u'Yapese', u'yapois'),
('yid', '', 'yi', u'Yiddish', u'yiddish'),
('yor', '', 'yo', u'Yoruba', u'yoruba'),
('ypk', '', '', u'Yupik languages', u'yupik, langues'),
('zap', '', '', u'Zapotec', u'zapotèque'),
('zbl', '', '', u'Blissymbols; Blissymbolics; Bliss', u'symboles Bliss; Bliss'),
('zen', '', '', u'Zenaga', u'zenaga'),
('zha', '', 'za', u'Zhuang; Chuang', u'zhuang; chuang'),
('znd', '', '', u'Zande languages', u'zandé, langues'),
('zul', '', 'zu', u'Zulu', u'zoulou'),
('zun', '', '', u'Zuni', u'zuni'),
('zza', '', '', u'Zaza; Dimili; Dimli; Kirdki; Kirmanjki; Zazaki', u'zaza; dimili; dimli; kirdki; kirmanjki; zazaki')]
class Country(object):
"""Country according to ISO-3166
:param string country: country name, alpha2 code, alpha3 code or numeric code
:param list countries: all countries
:type countries: see :data:`~subliminal.language.COUNTRIES`
"""
def __init__(self, country, countries=None):
countries = countries or COUNTRIES
country = to_unicode(country.strip().lower())
country_tuple = None
# Try to find the country
if len(country) == 2:
country_tuple = dict((c[0].lower(), c) for c in countries).get(country)
elif len(country) == 3 and not country.isdigit():
country_tuple = dict((c[1].lower(), c) for c in countries).get(country)
elif len(country) == 3 and country.isdigit():
country_tuple = dict((c[2].lower(), c) for c in countries).get(country)
if country_tuple is None:
country_tuple = dict((c[3].lower(), c) for c in countries).get(country)
# Raise ValueError if nothing is found
if country_tuple is None:
raise ValueError('Country %s does not exist' % country)
# Set default attrs
self.alpha2 = country_tuple[0]
self.alpha3 = country_tuple[1]
self.numeric = country_tuple[2]
self.name = country_tuple[3]
def __hash__(self):
return hash(self.alpha3)
def __eq__(self, other):
if isinstance(other, Country):
return self.alpha3 == other.alpha3
return False
def __ne__(self, other):
return not self == other
def __unicode__(self):
return self.name
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
return 'Country(%s)' % self
class Language(object):
"""Language according to ISO-639
:param string language: language name (english or french), alpha2 code, alpha3 code, terminologic code or numeric code, eventually with a country
:param country: country of the language
:type country: :class:`Country` or string
:param languages: all languages
:type languages: see :data:`~subliminal.language.LANGUAGES`
:param countries: all countries
:type countries: see :data:`~subliminal.language.COUNTRIES`
:param bool strict: whether to raise a ValueError on unknown language or not
:class:`Language` implements the inclusion test, with the ``in`` keyword::
>>> Language('pt-BR') in Language('pt') # Portuguese (Brazil) is included in Portuguese
True
>>> Language('pt') in Language('pt-BR') # Portuguese is not included in Portuguese (Brazil)
False
"""
with_country_regexps = [re.compile('(.*)\((.*)\)'), re.compile('(.*)[-_](.*)')]
def __init__(self, language, country=None, languages=None, countries=None, strict=True):
languages = languages or LANGUAGES
countries = countries or COUNTRIES
# Get the country
self.country = None
if isinstance(country, Country):
self.country = country
elif isinstance(country, basestring):
try:
self.country = Country(country, countries)
except ValueError:
logger.warning(u'Country %s could not be identified' % country)
if strict:
raise
# Language + Country format
#TODO: Improve this part
if country is None:
for regexp in [r.match(language) for r in self.with_country_regexps]:
if regexp:
language = regexp.group(1)
try:
self.country = Country(regexp.group(2), countries)
except ValueError:
logger.warning(u'Country %s could not be identified' % country)
if strict:
raise
break
# Try to find the language
language = to_unicode(language.strip().lower())
language_tuple = None
if len(language) == 2:
language_tuple = dict((l[2].lower(), l) for l in languages).get(language)
elif len(language) == 3:
language_tuple = dict((l[0].lower(), l) for l in languages).get(language)
if language_tuple is None:
language_tuple = dict((l[1].lower(), l) for l in languages).get(language)
if language_tuple is None:
language_tuple = dict((l[3].split('; ')[0].lower(), l) for l in languages).get(language)
if language_tuple is None:
language_tuple = dict((l[4].split('; ')[0].lower(), l) for l in languages).get(language)
# Raise ValueError if strict or continue with Undetermined
if language_tuple is None:
if strict:
raise ValueError('Language %s does not exist' % language)
language_tuple = dict((l[0].lower(), l) for l in languages).get('und')
# Set attributes
self.alpha2 = language_tuple[2]
self.alpha3 = language_tuple[0]
self.terminologic = language_tuple[1]
self.name = language_tuple[3]
self.french_name = language_tuple[4]
def __hash__(self):
if self.country is None:
return hash(self.alpha3)
return hash(self.alpha3 + self.country.alpha3)
def __eq__(self, other):
if isinstance(other, Language):
return self.alpha3 == other.alpha3 and self.country == other.country
return False
def __contains__(self, item):
if isinstance(item, Language):
if self == item:
return True
if self.country is None:
return self.alpha3 == item.alpha3
return False
def __ne__(self, other):
return not self == other
def __nonzero__(self):
return self.alpha3 != 'und'
def __unicode__(self):
if self.country is None:
return self.name
return '%s (%s)' % (self.name, self.country)
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
if self.country is None:
return 'Language(%s)' % self.name.encode('utf-8')
return 'Language(%s, country=%s)' % (self.name.encode('utf-8'), self.country)
class language_set(set):
"""Set of :class:`Language` with some specificities.
:param iterable: where to take elements from
:type iterable: iterable of :class:`Languages <Language>` or string
:param languages: all languages
:type languages: see :data:`~subliminal.language.LANGUAGES`
:param bool strict: whether to raise a ValueError on invalid language or not
The following redefinitions are meant to reflect the inclusion logic in :class:`Language`
* Inclusion test, with the ``in`` keyword
* Intersection
* Substraction
Here is an illustration of the previous points::
>>> Language('en') in language_set(['en-US', 'en-CA'])
False
>>> Language('en-US') in language_set(['en', 'fr'])
True
>>> language_set(['en']) & language_set(['en-US', 'en-CA'])
language_set([Language(English, country=Canada), Language(English, country=United States)])
>>> language_set(['en-US', 'en-CA', 'fr']) - language_set(['en'])
language_set([Language(French)])
"""
def __init__(self, iterable=None, languages=None, strict=True):
iterable = iterable or []
languages = languages or LANGUAGES
items = []
for i in iterable:
if isinstance(i, Language):
items.append(i)
continue
if isinstance(i, tuple):
items.append(Language(i[0], languages=languages, strict=strict))
continue
items.append(Language(i, languages=languages, strict=strict))
super(language_set, self).__init__(items)
def __contains__(self, item):
for i in self:
if item in i:
return True
return super(language_set, self).__contains__(item)
def __and__(self, other):
results = language_set()
for i in self:
for j in other:
if i in j:
results.add(i)
for i in other:
for j in self:
if i in j:
results.add(i)
return results
def __sub__(self, other):
results = language_set()
for i in self:
if i not in other:
results.add(i)
return results
class language_list(list):
"""List of :class:`Language` with some specificities.
:param iterable: where to take elements from
:type iterable: iterable of :class:`Languages <Language>` or string
:param languages: all languages
:type languages: see :data:`~subliminal.language.LANGUAGES`
:param bool strict: whether to raise a ValueError on invalid language or not
The following redefinitions are meant to reflect the inclusion logic in :class:`Language`
* Inclusion test, with the ``in`` keyword
* Index
Here is an illustration of the previous points::
>>> Language('en') in language_list(['en-US', 'en-CA'])
False
>>> Language('en-US') in language_list(['en', 'fr-BE'])
True
>>> language_list(['en', 'fr-BE']).index(Language('en-US'))
0
"""
def __init__(self, iterable=None, languages=None, strict=True):
iterable = iterable or []
languages = languages or LANGUAGES
items = []
for i in iterable:
if isinstance(i, Language):
items.append(i)
continue
if isinstance(i, tuple):
items.append(Language(i[0], languages=languages, strict=strict))
continue
items.append(Language(i, languages=languages, strict=strict))
super(language_list, self).__init__(items)
def __contains__(self, item):
for i in self:
if item in i:
return True
return super(language_list, self).__contains__(item)
def index(self, x, strict=False):
if not strict:
for i in range(len(self)):
if x in self[i]:
return i
return super(language_list, self).index(x)
|
titu1994/Neural-Style-Transfer
|
refs/heads/master
|
script_helper/Script/neural_doodle.py
|
2
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import time
import argparse
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imread, imsave, imresize, fromimage, toimage, imfilter
from keras import backend as K
from keras.layers import Input, AveragePooling2D
from keras.models import Model
from keras.preprocessing.image import load_img, img_to_array
from keras.applications import vgg16
"""
Neural Doodle in Keras using Keras 1.2.2
Based on the original script available at : https://github.com/fchollet/keras/blob/master/examples/neural_doodle.py
References:
[Dmitry Ulyanov's blog on fast-neural-doodle](http://dmitryulyanov.github.io/feed-forward-neural-doodle/)
[Torch code for fast-neural-doodle](https://github.com/DmitryUlyanov/fast-neural-doodle)
[Torch code for online-neural-doodle](https://github.com/DmitryUlyanov/online-neural-doodle)
[Paper Texture Networks: Feed-forward Synthesis of Textures and Stylized Images](http://arxiv.org/abs/1603.03417)
[Discussion on parameter tuning](https://github.com/fchollet/keras/issues/3705)
"""
# Command line arguments
parser = argparse.ArgumentParser(description='Keras neural doodle example')
parser.add_argument('--nlabels', type=int,help='number of semantic labels (regions in differnet colors)'
' in style_mask/target_mask')
parser.add_argument('--style-image', type=str, help='path to image to learn style from')
parser.add_argument('--style-mask', type=str, help='path to semantic mask of style image')
parser.add_argument('--target-mask', type=str, help='path to semantic mask of target image')
parser.add_argument('--content-image', type=str, default=None, help='path to optional content image')
parser.add_argument('--target-image-prefix', type=str, help='path prefix for generated results')
parser.add_argument("--img_size", type=int, default=-1, help='Image size will be rescaled to these dimensions. '
'Use -1 for no rescaling of input images')
parser.add_argument("--num_iter", dest="num_iter", default=10, type=int, help="Number of iterations")
parser.add_argument('--preserve_color', dest='color', default="False", type=str,
help='Preserve original color in image')
parser.add_argument("--min_improvement", default=0.0, type=float,
help="Minimum improvement required to continue training")
parser.add_argument("--content_weight", dest="content_weight", default=0.1, type=float, help="Weight of content")
parser.add_argument("--style_weight", dest="style_weight", default=1, type=float, help="Weight of content")
parser.add_argument("--tv_weight", dest="tv_weight", default=8.5e-5, type=float,
help="Total Variation in the Weights")
parser.add_argument("--region_style_weight", dest="region_weight", default=1.0, type=float, help="Region Style Weight")
args = parser.parse_args()
def str_to_bool(v):
return v.lower() in ("true", "yes", "t", "1")
style_img_path = args.style_image
style_mask_path = args.style_mask
target_mask_path = args.target_mask
content_img_path = args.content_image
target_img_prefix = args.target_image_prefix
use_content_img = content_img_path is not None
nb_labels = args.nlabels
nb_colors = 3 # RGB
# determine image sizes based on target_mask
ref_img = imread(target_mask_path)
if args.img_size != -1:
aspect_ratio = float(ref_img.shape[1]) / float(ref_img.shape[0])
ref_img = imresize(ref_img, (int(args.img_size), int(args.img_size * aspect_ratio)))
img_nrows, img_ncols = ref_img.shape[:2]
total_variation_weight = float(args.tv_weight)
style_weight = float(args.style_weight)
content_weight =float(args.content_weight) if use_content_img else 0
region_style_weight = float(args.region_weight)
content_feature_layers = ['block5_conv2']
# To get better generation qualities, use more conv layers for style features
style_feature_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1',
'block4_conv1', 'block5_conv1']
preserve_color = str_to_bool(args.color)
# helper functions for reading/processing images
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_nrows, img_ncols))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
def deprocess_image(x):
if K.image_dim_ordering() == 'th':
x = x.reshape((3, img_nrows, img_ncols))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_nrows, img_ncols, 3))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# BGR to RGB
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
# util function to preserve image color
def original_color_transform(content, generated):
generated = fromimage(toimage(generated, mode='RGB'), mode='YCbCr') # Convert to YCbCr color space
generated[:, :, 1:] = content[:, :, 1:] # Generated CbCr = Content CbCr
generated = fromimage(toimage(generated, mode='YCbCr'), mode='RGB') # Convert to RGB color space
return generated
def kmeans(xs, k):
assert xs.ndim == 2
try:
from sklearn.cluster import k_means
_, labels, _ = k_means(xs.astype("float64"), k)
except ImportError:
from scipy.cluster.vq import kmeans2
_, labels = kmeans2(xs, k, missing='raise')
return labels
def load_mask_labels():
'''Load both target and style masks.
A mask image (nr x nc) with m labels/colors will be loaded
as a 4D boolean tensor: (1, m, nr, nc) for 'th' or (1, nr, nc, m) for 'tf'
'''
target_mask_img = load_img(target_mask_path,
target_size=(img_nrows, img_ncols))
target_mask_img = img_to_array(target_mask_img)
style_mask_img = load_img(style_mask_path,
target_size=(img_nrows, img_ncols))
style_mask_img = img_to_array(style_mask_img)
if K.image_dim_ordering() == 'th':
mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T,
target_mask_img.reshape((3, -1)).T])
else:
mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)),
target_mask_img.reshape((-1, 3))])
labels = kmeans(mask_vecs, nb_labels)
style_mask_label = labels[:img_nrows *
img_ncols].reshape((img_nrows, img_ncols))
target_mask_label = labels[img_nrows *
img_ncols:].reshape((img_nrows, img_ncols))
stack_axis = 0 if K.image_dim_ordering() == 'th' else -1
style_mask = np.stack([style_mask_label == r for r in range(nb_labels)],
axis=stack_axis)
target_mask = np.stack([target_mask_label == r for r in range(nb_labels)],
axis=stack_axis)
return (np.expand_dims(style_mask, axis=0),
np.expand_dims(target_mask, axis=0))
# Create tensor variables for images
if K.image_dim_ordering() == 'th':
shape = (1, nb_colors, img_nrows, img_ncols)
else:
shape = (1, img_nrows, img_ncols, nb_colors)
style_image = K.variable(preprocess_image(style_img_path))
target_image = K.placeholder(shape=shape)
if use_content_img:
content_image = K.variable(preprocess_image(content_img_path))
else:
content_image = K.zeros(shape=shape)
images = K.concatenate([style_image, target_image, content_image], axis=0)
# Create tensor variables for masks
raw_style_mask, raw_target_mask = load_mask_labels()
style_mask = K.variable(raw_style_mask.astype("float32"))
target_mask = K.variable(raw_target_mask.astype("float32"))
masks = K.concatenate([style_mask, target_mask], axis=0)
# index constants for images and tasks variables
STYLE, TARGET, CONTENT = 0, 1, 2
# Build image model, mask model and use layer outputs as features
# image model as VGG19
image_model = vgg16.VGG16(include_top=False, input_tensor=images)
# mask model as a series of pooling
mask_input = Input(tensor=masks, shape=(None, None, None), name="mask_input")
x = mask_input
for layer in image_model.layers[1:]:
name = 'mask_%s' % layer.name
if 'conv' in layer.name:
x = AveragePooling2D((3, 3), strides=(1, 1), name=name, border_mode="same")(x)
elif 'pool' in layer.name:
x = AveragePooling2D((2, 2), name=name)(x)
mask_model = Model(mask_input, x)
# Collect features from image_model and task_model
image_features = {}
mask_features = {}
for img_layer, mask_layer in zip(image_model.layers, mask_model.layers):
if 'conv' in img_layer.name:
assert 'mask_' + img_layer.name == mask_layer.name
layer_name = img_layer.name
img_feat, mask_feat = img_layer.output, mask_layer.output
image_features[layer_name] = img_feat
mask_features[layer_name] = mask_feat
# Define loss functions
def gram_matrix(x):
assert K.ndim(x) == 3
features = K.batch_flatten(x)
gram = K.dot(features, K.transpose(features))
return gram
def region_style_loss(style_image, target_image, style_mask, target_mask):
'''Calculate style loss between style_image and target_image,
for one common region specified by their (boolean) masks
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 2 == K.ndim(style_mask) == K.ndim(target_mask)
if K.image_dim_ordering() == 'th':
masked_style = style_image * style_mask
masked_target = target_image * target_mask
nb_channels = K.shape(style_image)[0]
else:
masked_style = K.permute_dimensions(
style_image, (2, 0, 1)) * style_mask
masked_target = K.permute_dimensions(
target_image, (2, 0, 1)) * target_mask
nb_channels = K.shape(style_image)[-1]
s = gram_matrix(masked_style) / K.mean(style_mask) / nb_channels
c = gram_matrix(masked_target) / K.mean(target_mask) / nb_channels
return K.mean(K.square(s - c))
def style_loss(style_image, target_image, style_masks, target_masks):
'''Calculate style loss between style_image and target_image,
in all regions.
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 3 == K.ndim(style_masks) == K.ndim(target_masks)
loss = K.variable(0)
for i in range(nb_labels):
if K.image_dim_ordering() == 'th':
style_mask = style_masks[i, :, :]
target_mask = target_masks[i, :, :]
else:
style_mask = style_masks[:, :, i]
target_mask = target_masks[:, :, i]
loss += region_style_weight * region_style_loss(style_image, target_image, style_mask, target_mask)
return loss
def content_loss(content_image, target_image):
return K.sum(K.square(target_image - content_image))
def total_variation_loss(x):
assert 4 == K.ndim(x)
if K.image_dim_ordering() == 'th':
a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
x[:, :, 1:, :img_ncols - 1])
b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
x[:, :, :img_nrows - 1, 1:])
else:
a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
x[:, 1:, :img_ncols - 1, :])
b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
x[:, :img_nrows - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
# Overall loss is the weighted sum of content_loss, style_loss and tv_loss
# Each individual loss uses features from image/mask models.
loss = K.variable(0)
for layer in content_feature_layers:
content_feat = image_features[layer][CONTENT, :, :, :]
target_feat = image_features[layer][TARGET, :, :, :]
loss += content_weight * content_loss(content_feat, target_feat)
for layer in style_feature_layers:
style_feat = image_features[layer][STYLE, :, :, :]
target_feat = image_features[layer][TARGET, :, :, :]
style_masks = mask_features[layer][STYLE, :, :, :]
target_masks = mask_features[layer][TARGET, :, :, :]
sl = style_loss(style_feat, target_feat, style_masks, target_masks)
loss += (style_weight / len(style_feature_layers)) * sl
loss += total_variation_weight * total_variation_loss(target_image)
loss_grads = K.gradients(loss, target_image)
# Evaluator class for computing efficiency
outputs = [loss]
if type(loss_grads) in {list, tuple}:
outputs += loss_grads
else:
outputs.append(loss_grads)
f_outputs = K.function([target_image], outputs)
def eval_loss_and_grads(x):
if K.image_dim_ordering() == 'th':
x = x.reshape((1, 3, img_nrows, img_ncols))
else:
x = x.reshape((1, img_nrows, img_ncols, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# Generate images by iterative optimization
if use_content_img:
x = preprocess_image(content_img_path)
else:
if K.image_dim_ordering() == 'th':
x = np.random.uniform(0, 255, (1, 3, img_nrows, img_ncols)) - 128.
else:
x = np.random.uniform(0, 255, (1, img_nrows, img_ncols, 3)) - 128.
# We require original image if we are to preserve color in YCbCr mode
if preserve_color and use_content_img:
content = imread(content_img_path, mode="YCbCr")
content = imresize(content, (img_nrows, img_ncols))
prev_min_val = 0.
for i in range(args.num_iter):
print('Start of iteration', i + 1)
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20)
if prev_min_val == 0:
improvement = 0
else:
improvement = (prev_min_val - min_val) / prev_min_val * 100
print("Current loss value:", min_val, " Improvement : %0.3f" % improvement, "%")
prev_min_val = min_val
# save current generated image
img = deprocess_image(x.copy())
if not use_content_img:
img = imfilter(img, ftype='smooth')
img = imfilter(img, ftype='sharpen')
if use_content_img and preserve_color and content is not None:
img = original_color_transform(content, img)
fname = target_img_prefix + '_at_iteration_%d.png' % (i + 1)
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i + 1, end_time - start_time))
if args.min_improvement != 0.0:
if improvement < args.min_improvement and i > 1:
print("Script is early stopping since improvement (%0.2f) < min improvement (%0.2f)" %
(improvement, args.min_improvement))
output_image = target_img_prefix + '.png'
imsave(output_image, img)
exit()
|
mbalasso/mynumpy
|
refs/heads/master
|
numpy/fft/setup.py
|
48
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('fft',parent_package,top_path)
config.add_data_dir('tests')
# Configure fftpack_lite
config.add_extension('fftpack_lite',
sources=['fftpack_litemodule.c', 'fftpack.c']
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
abalkin/numpy
|
refs/heads/master
|
numpy/core/tests/test_indexing.py
|
6
|
import sys
import warnings
import functools
import operator
import numpy as np
from numpy.core._multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_warns,
HAS_REFCOUNT,
)
class TestIndexing:
def test_index_no_floats(self):
a = np.array([[[5]]])
assert_raises(IndexError, lambda: a[0.0])
assert_raises(IndexError, lambda: a[0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0])
assert_raises(IndexError, lambda: a[0.0,:])
assert_raises(IndexError, lambda: a[:, 0.0])
assert_raises(IndexError, lambda: a[:, 0.0,:])
assert_raises(IndexError, lambda: a[0.0,:,:])
assert_raises(IndexError, lambda: a[0, 0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0, 0])
assert_raises(IndexError, lambda: a[0, 0.0, 0])
assert_raises(IndexError, lambda: a[-1.4])
assert_raises(IndexError, lambda: a[0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0])
assert_raises(IndexError, lambda: a[-1.4,:])
assert_raises(IndexError, lambda: a[:, -1.4])
assert_raises(IndexError, lambda: a[:, -1.4,:])
assert_raises(IndexError, lambda: a[-1.4,:,:])
assert_raises(IndexError, lambda: a[0, 0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0, 0])
assert_raises(IndexError, lambda: a[0, -1.4, 0])
assert_raises(IndexError, lambda: a[0.0:, 0.0])
assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
def test_slicing_no_floats(self):
a = np.array([[5]])
# start as float.
assert_raises(TypeError, lambda: a[0.0:])
assert_raises(TypeError, lambda: a[0:, 0.0:2])
assert_raises(TypeError, lambda: a[0.0::2, :0])
assert_raises(TypeError, lambda: a[0.0:1:2,:])
assert_raises(TypeError, lambda: a[:, 0.0:])
# stop as float.
assert_raises(TypeError, lambda: a[:0.0])
assert_raises(TypeError, lambda: a[:0, 1:2.0])
assert_raises(TypeError, lambda: a[:0.0:2, :0])
assert_raises(TypeError, lambda: a[:0.0,:])
assert_raises(TypeError, lambda: a[:, 0:4.0:2])
# step as float.
assert_raises(TypeError, lambda: a[::1.0])
assert_raises(TypeError, lambda: a[0:, :2:2.0])
assert_raises(TypeError, lambda: a[1::4.0, :0])
assert_raises(TypeError, lambda: a[::5.0,:])
assert_raises(TypeError, lambda: a[:, 0:4:2.0])
# mixed.
assert_raises(TypeError, lambda: a[1.0:2:2.0])
assert_raises(TypeError, lambda: a[1.0::2.0])
assert_raises(TypeError, lambda: a[0:, :2.0:2.0])
assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])
assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])
assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])
# should still get the DeprecationWarning if step = 0.
assert_raises(TypeError, lambda: a[::0.0])
def test_index_no_array_to_index(self):
# No non-scalar arrays.
a = np.array([[[1]]])
assert_raises(TypeError, lambda: a[a:a:a])
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1)
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = np.array([1, 2, 3])
assert_equal(a[()], a)
assert_(a[()].base is a)
a = np.array(0)
assert_(isinstance(a[()], np.int_))
def test_void_scalar_empty_tuple(self):
s = np.zeros((), dtype='V4')
assert_equal(s[()].dtype, s.dtype)
assert_equal(s[()], s)
assert_equal(type(s[...]), np.ndarray)
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
index = np.arange(5)
u_index = index.astype(np.uintp)
arr = np.arange(10)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)
assert_array_equal(arr, np.arange(10))
arr = np.arange(10).reshape(5, 2)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)[:,None]
assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
arr = np.arange(25).reshape(5, 5)
assert_array_equal(arr[u_index, u_index], arr[index, index])
def test_empty_fancy_index(self):
# Empty list index creates an empty array
# with the same dtype (but with weird shape)
a = np.array([1, 2, 3])
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([], dtype=np.intp)
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([])
assert_raises(IndexError, a.__getitem__, b)
def test_ellipsis_index(self):
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
assert_(a[...].base is a)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
assert_equal(a[0, ...], a[0])
assert_equal(a[0, ...], a[0,:])
assert_equal(a[..., 0], a[:, 0])
# Slicing with ellipsis always results
# in an array, not a scalar
assert_equal(a[0, ..., 1], np.array(2))
# Assignment with `(Ellipsis,)` on 0-d arrays
b = np.array(1)
b[(Ellipsis,)] = 2
assert_equal(b, 2)
def test_single_int_index(self):
# Single integer index selects one row
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[0], [1, 2, 3])
assert_equal(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 30)
# Index overflow produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 64)
def test_single_bool_index(self):
# Single boolean index
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[np.array(True)], a[None])
assert_equal(a[np.array(False)], a[None][0:0])
def test_boolean_shape_mismatch(self):
arr = np.ones((5, 4, 3))
index = np.array([True])
assert_raises(IndexError, arr.__getitem__, index)
index = np.array([False] * 6)
assert_raises(IndexError, arr.__getitem__, index)
index = np.zeros((4, 4), dtype=bool)
assert_raises(IndexError, arr.__getitem__, index)
assert_raises(IndexError, arr.__getitem__, (slice(None), index))
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
# boolean array of length one
a = np.array([[ 0., 0., 0.]])
b = np.array([ True], dtype=bool)
assert_equal(a[b], a)
# boolean assignment
a[b] = 1.
assert_equal(a, [[1., 1., 1.]])
def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
a = np.arange(4)
def f(a, v):
a[a > -1] = v
assert_raises(ValueError, f, a, [])
assert_raises(ValueError, f, a, [1, 2, 3])
assert_raises(ValueError, f, a[:1], [1, 2, 3])
def test_boolean_assignment_needs_api(self):
# See also gh-7666
# This caused a segfault on Python 2 due to the GIL not being
# held when the iterator does not need it, but the transfer function
# does
arr = np.zeros(1000)
indx = np.zeros(1000, dtype=bool)
indx[:100] = True
arr[indx] = np.ones(100, dtype=object)
expected = np.zeros(1000)
expected[:100] = 1
assert_array_equal(arr, expected)
def test_boolean_indexing_twodim(self):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = np.array([[ True, False, True],
[False, True, False],
[ True, False, True]])
assert_equal(a[b], [1, 3, 5, 7, 9])
assert_equal(a[b[1]], [[4, 5, 6]])
assert_equal(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
assert_equal(a, [[0, 2, 0],
[4, 0, 6],
[0, 8, 0]])
def test_boolean_indexing_list(self):
# Regression test for #13715. It's a use-after-free bug which the
# test won't directly catch, but it will show up in valgrind.
a = np.array([1, 2, 3])
b = [True, False, True]
# Two variants of the test because the first takes a fast path
assert_equal(a[b], [1, 3])
assert_equal(a[None, b], [[1, 3]])
def test_reverse_strides_and_subspace_bufferinit(self):
# This tests that the strides are not reversed for simple and
# subspace fancy indexing.
a = np.ones(5)
b = np.zeros(5, dtype=np.intp)[::-1]
c = np.arange(5)[::-1]
a[b] = c
# If the strides are not reversed, the 0 in the arange comes last.
assert_equal(a[0], 0)
# This also tests that the subspace buffer is initialized:
a = np.ones((5, 2))
c = np.arange(10).reshape(5, 2)[::-1]
a[b, :] = c
assert_equal(a[0], [0, 1])
def test_reversed_strides_result_allocation(self):
# Test a bug when calculating the output strides for a result array
# when the subspace size was 1 (and test other cases as well)
a = np.arange(10)[:, None]
i = np.arange(10)[::-1]
assert_array_equal(a[i], a[i.copy('C')])
a = np.arange(20).reshape(-1, 2)
def test_uncontiguous_subspace_assignment(self):
# During development there was a bug activating a skip logic
# based on ndim instead of size.
a = np.full((3, 4, 2), -1)
b = np.full((3, 4, 2), -1)
a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
assert_equal(a, b)
def test_too_many_fancy_indices_special_case(self):
# Just documents behaviour, this is a small limitation.
a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
def test_scalar_array_bool(self):
# NumPy bools can be used as boolean index (python ones as of yet not)
a = np.array(1)
assert_equal(a[np.bool_(True)], a[np.array(True)])
assert_equal(a[np.bool_(False)], a[np.array(False)])
# After deprecating bools as integers:
#a = np.array([0,1,2])
#assert_equal(a[True, :], a[None, :])
#assert_equal(a[:, True], a[:, None])
#
#assert_(not np.may_share_memory(a, a[True, :]))
def test_everything_returns_views(self):
# Before `...` would return a itself.
a = np.arange(5)
assert_(a is not a[()])
assert_(a is not a[...])
assert_(a is not a[:])
def test_broaderrors_indexing(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_trivial_fancy_out_of_bounds(self):
a = np.zeros(5)
ind = np.ones(20, dtype=np.intp)
ind[-1] = 10
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
ind = np.ones(20, dtype=np.intp)
ind[0] = 11
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
def test_trivial_fancy_not_possible(self):
# Test that the fast path for trivial assignment is not incorrectly
# used when the index is not contiguous or 1D, see also gh-11467.
a = np.arange(6)
idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
assert_array_equal(a[idx], idx)
# this case must not go into the fast path, note that idx is
# a non-contiuguous none 1D array here.
a[idx] = -1
res = np.arange(6)
res[0] = -1
res[3] = -1
assert_array_equal(a, res)
def test_nonbaseclass_values(self):
class SubClass(np.ndarray):
def __array_finalize__(self, old):
# Have array finalize do funny things
self.fill(99)
a = np.zeros((5, 5))
s = a.copy().view(type=SubClass)
s.fill(1)
a[[0, 1, 2, 3, 4], :] = s
assert_((a == 1).all())
# Subspace is last, so transposing might want to finalize
a[:, [0, 1, 2, 3, 4]] = s
assert_((a == 1).all())
a.fill(0)
a[...] = s
assert_((a == 1).all())
def test_subclass_writeable(self):
d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
dtype=[('target', 'S20'), ('V_mag', '>f4')])
ind = np.array([False, True, True], dtype=bool)
assert_(d[ind].flags.writeable)
ind = np.array([0, 1])
assert_(d[ind].flags.writeable)
assert_(d[...].flags.writeable)
assert_(d[0].flags.writeable)
def test_memory_order(self):
# This is not necessary to preserve. Memory layouts for
# more complex indices are not as simple.
a = np.arange(10)
b = np.arange(10).reshape(5,2).T
assert_(a[b].flags.f_contiguous)
# Takes a different implementation branch:
a = a.reshape(-1, 1)
assert_(a[b, 0].flags.f_contiguous)
def test_scalar_return_type(self):
# Full scalar indices should return scalars and object
# arrays should not call PyArray_Return on their items
class Zero:
# The most basic valid indexing
def __index__(self):
return 0
z = Zero()
class ArrayLike:
# Simple array, should behave like the array
def __array__(self):
return np.array(0)
a = np.zeros(())
assert_(isinstance(a[()], np.float_))
a = np.zeros(1)
assert_(isinstance(a[z], np.float_))
a = np.zeros((1, 1))
assert_(isinstance(a[z, np.array(0)], np.float_))
assert_(isinstance(a[z, ArrayLike()], np.float_))
# And object arrays do not call it too often:
b = np.array(0)
a = np.array(0, dtype=object)
a[()] = b
assert_(isinstance(a[()], np.ndarray))
a = np.array([b, None])
assert_(isinstance(a[z], np.ndarray))
a = np.array([[b, None]])
assert_(isinstance(a[z, np.array(0)], np.ndarray))
assert_(isinstance(a[z, ArrayLike()], np.ndarray))
def test_small_regressions(self):
# Reference count of intp for index checks
a = np.array([0])
if HAS_REFCOUNT:
refcount = sys.getrefcount(np.dtype(np.intp))
# item setting always checks indices in separate function:
a[np.array([0], dtype=np.intp)] = 1
a[np.array([0], dtype=np.uint8)] = 1
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.intp), 1)
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.uint8), 1)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
def test_unaligned(self):
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
d = v.view(np.dtype("S8"))
# unaligned source
x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
x = x.view(np.dtype("S8"))
x[...] = np.array("b" * 8, dtype="S")
b = np.arange(d.size)
#trivial
assert_equal(d[b], d)
d[b] = x
# nontrivial
# unaligned index array
b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
b = b.view(np.intp)[:d.size]
b[...] = np.arange(d.size)
assert_equal(d[b.astype(np.int16)], d)
d[b.astype(np.int16)] = x
# boolean
d[b % 2 == 0]
d[b % 2 == 0] = x[::2]
def test_tuple_subclass(self):
arr = np.ones((5, 5))
# A tuple subclass should also be an nd-index
class TupleSubclass(tuple):
pass
index = ([1], [1])
index = TupleSubclass(index)
assert_(arr[index].shape == (1,))
# Unlike the non nd-index:
assert_(arr[index,].shape != (1,))
def test_broken_sequence_not_nd_index(self):
# See gh-5063:
# If we have an object which claims to be a sequence, but fails
# on item getting, this should not be converted to an nd-index (tuple)
# If this object happens to be a valid index otherwise, it should work
# This object here is very dubious and probably bad though:
class SequenceLike:
def __index__(self):
return 0
def __len__(self):
return 1
def __getitem__(self, item):
raise IndexError('Not possible')
arr = np.arange(10)
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
# also test that field indexing does not segfault
# for a similar reason, by indexing a structured array
arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
def test_indexing_array_weird_strides(self):
# See also gh-6221
# the shapes used here come from the issue and create the correct
# size for the iterator buffering size.
x = np.ones(10)
x2 = np.ones((10, 2))
ind = np.arange(10)[:, None, None, None]
ind = np.broadcast_to(ind, (10, 55, 4, 4))
# single advanced index case
assert_array_equal(x[ind], x[ind.copy()])
# higher dimensional advanced index
zind = np.zeros(4, dtype=np.intp)
assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
def test_indexing_array_negative_strides(self):
# From gh-8264,
# core dumps if negative strides are used in iteration
arro = np.zeros((4, 4))
arr = arro[::-1, ::-1]
slices = (slice(None), [0, 1, 2, 3])
arr[slices] = 10
assert_array_equal(arr, 10.)
class TestFieldIndexing:
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
a = np.zeros((), [('a','f8')])
assert_(isinstance(a['a'], np.ndarray))
assert_(isinstance(a[['a']], np.ndarray))
class TestBroadcastedAssignments:
def assign(self, a, ind, val):
a[ind] = val
return a
def test_prepending_ones(self):
a = np.zeros((3, 2))
a[...] = np.ones((1, 3, 2))
# Fancy with subspace with and without transpose
a[[0, 1, 2], :] = np.ones((1, 3, 2))
a[:, [0, 1]] = np.ones((1, 3, 2))
# Fancy without subspace (with broadcasting)
a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
def test_prepend_not_one(self):
assign = self.assign
s_ = np.s_
a = np.zeros(5)
# Too large and not only ones.
assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
def test_simple_broadcasting_errors(self):
assign = self.assign
s_ = np.s_
a = np.zeros((5, 1))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
def test_index_is_larger(self):
# Simple case of fancy index broadcasting of the index.
a = np.zeros((5, 5))
a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
assert_((a[:3, :3] == [2, 3, 4]).all())
def test_broadcast_subspace(self):
a = np.zeros((100, 100))
v = np.arange(100)[:,None]
b = np.arange(100)[::-1]
a[b] = v
assert_((a[::-1] == v).all())
class TestSubclasses:
def test_basic(self):
# Test that indexing in various ways produces SubClass instances,
# and that the base is set up correctly: the original subclass
# instance for views, and a new ndarray for advanced/boolean indexing
# where a copy was made (latter a regression test for gh-11983).
class SubClass(np.ndarray):
pass
a = np.arange(5)
s = a.view(SubClass)
s_slice = s[:3]
assert_(type(s_slice) is SubClass)
assert_(s_slice.base is s)
assert_array_equal(s_slice, a[:3])
s_fancy = s[[0, 1, 2]]
assert_(type(s_fancy) is SubClass)
assert_(s_fancy.base is not s)
assert_(type(s_fancy.base) is np.ndarray)
assert_array_equal(s_fancy, a[[0, 1, 2]])
assert_array_equal(s_fancy.base, a[[0, 1, 2]])
s_bool = s[s > 0]
assert_(type(s_bool) is SubClass)
assert_(s_bool.base is not s)
assert_(type(s_bool.base) is np.ndarray)
assert_array_equal(s_bool, a[a > 0])
assert_array_equal(s_bool.base, a[a > 0])
def test_fancy_on_read_only(self):
# Test that fancy indexing on read-only SubClass does not make a
# read-only copy (gh-14132)
class SubClass(np.ndarray):
pass
a = np.arange(5)
s = a.view(SubClass)
s.flags.writeable = False
s_fancy = s[[0, 1, 2]]
assert_(s_fancy.flags.writeable)
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
def __array_finalize__(self, old):
self.finalize_status = np.array(self)
self.old = old
s = np.arange(10).view(SubClass)
new_s = s[:3]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[[0,1,2,3]]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[s > 0]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
class TestFancyIndexingCast:
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
bool_index = np.zeros(shape).astype(bool)
bool_index[0, 1] = True
zero_array = np.zeros(shape)
# Assigning float is fine.
zero_array[bool_index] = np.array([1])
assert_equal(zero_array[0, 1], 1)
# Fancy indexing works, although we get a cast warning.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
assert_equal(zero_array[0, 1], 2) # No complex part
# Cast complex to float, throwing away the imaginary portion.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
class TestFancyIndexingEquivalence:
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
a = np.arange(5, dtype=object)
b = a.copy()
a[:3] = [1, (1,2), 3]
b[[0, 1, 2]] = [1, (1,2), 3]
assert_array_equal(a, b)
# test same for subspace fancy indexing
b = np.arange(5, dtype=object)[None, :]
b[[0], :3] = [[1, (1,2), 3]]
assert_array_equal(a, b[0])
# Check that swapping of axes works.
# There was a bug that made the later assignment throw a ValueError
# do to an incorrectly transposed temporary right hand side (gh-5714)
b = b.T
b[:3, [0]] = [[1], [(1,2)], [3]]
assert_array_equal(a, b[:, 0])
# Another test for the memory order of the subspace
arr = np.ones((3, 4, 5), dtype=object)
# Equivalent slicing assignment for comparison
cmp_arr = arr.copy()
cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
arr = arr.copy('F')
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
def test_cast_equivalence(self):
# Yes, normal slicing uses unsafe casting.
a = np.arange(5)
b = a.copy()
a[:3] = np.array(['2', '-3', '-1'])
b[[0, 2, 1]] = np.array(['2', '-1', '-3'])
assert_array_equal(a, b)
# test the same for subspace fancy indexing
b = np.arange(5)[None, :]
b[[0], :3] = np.array([['2', '-3', '-1']])
assert_array_equal(a, b[0])
class TestMultiIndexingAutomated:
"""
These tests use code to mimic the C-Code indexing for selection.
NOTE:
* This still lacks tests for complex item setting.
* If you change behavior of indexing, you might want to modify
these tests to try more combinations.
* Behavior was written to match numpy version 1.8. (though a
first version matched 1.7.)
* Only tuple indices are supported by the mimicking code.
(and tested as of writing this)
* Error types should match most of the time as long as there
is only one error. For multiple errors, what gets raised
will usually not be the same one. They are *not* tested.
Update 2016-11-30: It is probably not worth maintaining this test
indefinitely and it can be dropped if maintenance becomes a burden.
"""
def setup(self):
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
self.b = np.empty((3, 0, 5, 6))
self.complex_indices = ['skip', Ellipsis,
0,
# Boolean indices, up to 3-d for some special cases of eating up
# dimensions, also need to test all False
np.array([True, False, False]),
np.array([[True, False], [False, True]]),
np.array([[[False, False], [False, False]]]),
# Some slices:
slice(-5, 5, 2),
slice(1, 1, 100),
slice(4, -1, -2),
slice(None, None, -3),
# Some Fancy indexes:
np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
np.array([0, 1, -2]),
np.array([[2], [0], [1]]),
np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
np.array([2, -1], dtype=np.int8),
np.zeros([1]*31, dtype=int), # trigger too large array.
np.array([0., 1.])] # invalid datatype
# Some simpler indices that still cover a bit more
self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
'skip']
# Very simple ones to fill the rest:
self.fill_indices = [slice(None, None), 0]
def _get_multi_index(self, arr, indices):
"""Mimic multi dimensional indexing.
Parameters
----------
arr : ndarray
Array to be indexed.
indices : tuple of index objects
Returns
-------
out : ndarray
An array equivalent to the indexing operation (but always a copy).
`arr[indices]` should be identical.
no_copy : bool
Whether the indexing operation requires a copy. If this is `True`,
`np.may_share_memory(arr, arr[indices])` should be `True` (with
some exceptions for scalars and possibly 0-d arrays).
Notes
-----
While the function may mostly match the errors of normal indexing this
is generally not the case.
"""
in_indices = list(indices)
indices = []
# if False, this is a fancy or boolean index
no_copy = True
# number of fancy/scalar indexes that are not consecutive
num_fancy = 0
# number of dimensions indexed by a "fancy" index
fancy_dim = 0
# NOTE: This is a funny twist (and probably OK to change).
# The boolean array has illegal indexes, but this is
# allowed if the broadcast fancy-indices are 0-sized.
# This variable is to catch that case.
error_unless_broadcast_to_empty = False
# We need to handle Ellipsis and make arrays from indices, also
# check if this is fancy indexing (set no_copy).
ndim = 0
ellipsis_pos = None # define here mostly to replace all but first.
for i, indx in enumerate(in_indices):
if indx is None:
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
no_copy = False
if indx.ndim == 0:
raise IndexError
# boolean indices can have higher dimensions
ndim += indx.ndim
fancy_dim += indx.ndim
continue
if indx is Ellipsis:
if ellipsis_pos is None:
ellipsis_pos = i
continue # do not increment ndim counter
raise IndexError
if isinstance(indx, slice):
ndim += 1
continue
if not isinstance(indx, np.ndarray):
# This could be open for changes in numpy.
# numpy should maybe raise an error if casting to intp
# is not safe. It rejects np.array([1., 2.]) but not
# [1., 2.] as index (same for ie. np.take).
# (Note the importance of empty lists if changing this here)
try:
indx = np.array(indx, dtype=np.intp)
except ValueError:
raise IndexError
in_indices[i] = indx
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
raise IndexError('arrays used as indices must be of '
'integer (or boolean) type')
if indx.ndim != 0:
no_copy = False
ndim += 1
fancy_dim += 1
if arr.ndim - ndim < 0:
# we can't take more dimensions then we have, not even for 0-d
# arrays. since a[()] makes sense, but not a[(),]. We will
# raise an error later on, unless a broadcasting error occurs
# first.
raise IndexError
if ndim == 0 and None not in in_indices:
# Well we have no indexes or one Ellipsis. This is legal.
return arr.copy(), no_copy
if ellipsis_pos is not None:
in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
(arr.ndim - ndim))
for ax, indx in enumerate(in_indices):
if isinstance(indx, slice):
# convert to an index array
indx = np.arange(*indx.indices(arr.shape[ax]))
indices.append(['s', indx])
continue
elif indx is None:
# this is like taking a slice with one element from a new axis:
indices.append(['n', np.array([0], dtype=np.intp)])
arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
if indx.shape != arr.shape[ax:ax+indx.ndim]:
raise IndexError
try:
flat_indx = np.ravel_multi_index(np.nonzero(indx),
arr.shape[ax:ax+indx.ndim], mode='raise')
except Exception:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
# concatenate axis into a single one:
if indx.ndim != 0:
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(arr.shape[ax:ax+indx.ndim]),)
+ arr.shape[ax+indx.ndim:]))
indx = flat_indx
else:
# This could be changed, a 0-d boolean index can
# make sense (even outside the 0-d indexed array case)
# Note that originally this is could be interpreted as
# integer in the full integer special case.
raise IndexError
else:
# If the index is a singleton, the bounds check is done
# before the broadcasting. This used to be different in <1.9
if indx.ndim == 0:
if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
raise IndexError
if indx.ndim == 0:
# The index is a scalar. This used to be two fold, but if
# fancy indexing was active, the check was done later,
# possibly after broadcasting it away (1.7. or earlier).
# Now it is always done.
if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
raise IndexError
if (len(indices) > 0 and
indices[-1][0] == 'f' and
ax != ellipsis_pos):
# NOTE: There could still have been a 0-sized Ellipsis
# between them. Checked that with ellipsis_pos.
indices[-1].append(indx)
else:
# We have a fancy index that is not after an existing one.
# NOTE: A 0-d array triggers this as well, while one may
# expect it to not trigger it, since a scalar would not be
# considered fancy indexing.
num_fancy += 1
indices.append(['f', indx])
if num_fancy > 1 and not no_copy:
# We have to flush the fancy indexes left
new_indices = indices[:]
axes = list(range(arr.ndim))
fancy_axes = []
new_indices.insert(0, ['f'])
ni = 0
ai = 0
for indx in indices:
ni += 1
if indx[0] == 'f':
new_indices[0].extend(indx[1:])
del new_indices[ni]
ni -= 1
for ax in range(ai, ai + len(indx[1:])):
fancy_axes.append(ax)
axes.remove(ax)
ai += len(indx) - 1 # axis we are at
indices = new_indices
# and now we need to transpose arr:
arr = arr.transpose(*(fancy_axes + axes))
# We only have one 'f' index now and arr is transposed accordingly.
# Now handle newaxis by reshaping...
ax = 0
for indx in indices:
if indx[0] == 'f':
if len(indx) == 1:
continue
# First of all, reshape arr to combine fancy axes into one:
orig_shape = arr.shape
orig_slice = orig_shape[ax:ax + len(indx[1:])]
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(orig_slice).astype(int),)
+ arr.shape[ax + len(indx[1:]):]))
# Check if broadcasting works
res = np.broadcast(*indx[1:])
# unfortunately the indices might be out of bounds. So check
# that first, and use mode='wrap' then. However only if
# there are any indices...
if res.size != 0:
if error_unless_broadcast_to_empty:
raise IndexError
for _indx, _size in zip(indx[1:], orig_slice):
if _indx.size == 0:
continue
if np.any(_indx >= _size) or np.any(_indx < -_size):
raise IndexError
if len(indx[1:]) == len(orig_slice):
if np.product(orig_slice) == 0:
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='raise')
except Exception:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
else:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='wrap')
else:
# Maybe never happens...
raise ValueError
arr = arr.take(mi.ravel(), axis=ax)
try:
arr = arr.reshape((arr.shape[:ax]
+ mi.shape
+ arr.shape[ax+1:]))
except ValueError:
# too many dimensions, probably
raise IndexError
ax += mi.ndim
continue
# If we are here, we have a 1D array for take:
arr = arr.take(indx[1], axis=ax)
ax += 1
return arr, no_copy
def _check_multi_index(self, arr, index):
"""Check a multi index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be a reshaped arange.
index : tuple of indexing objects
Index being tested.
"""
# Test item getting
try:
mimic_get, no_copy = self._get_multi_index(arr, index)
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _check_single_index(self, arr, index):
"""Check a single index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be an arange.
index : indexing object
Index being tested. Must be a single index and not a tuple
of indexing objects (see also `_check_multi_index`).
"""
try:
mimic_get, no_copy = self._get_multi_index(arr, (index,))
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _compare_index_result(self, arr, index, mimic_get, no_copy):
"""Compare mimicked result to indexing result.
"""
arr = arr.copy()
indexed_arr = arr[index]
assert_array_equal(indexed_arr, mimic_get)
# Check if we got a view, unless its a 0-sized or 0-d array.
# (then its not a view, and that does not matter)
if indexed_arr.size != 0 and indexed_arr.ndim != 0:
assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
# Check reference count of the original array
if HAS_REFCOUNT:
if no_copy:
# refcount increases by one:
assert_equal(sys.getrefcount(arr), 3)
else:
assert_equal(sys.getrefcount(arr), 2)
# Test non-broadcast setitem:
b = arr.copy()
b[index] = mimic_get + 1000
if b.size == 0:
return # nothing to compare here...
if no_copy and indexed_arr.ndim != 0:
# change indexed_arr in-place to manipulate original:
indexed_arr += 1000
assert_array_equal(arr, b)
return
# Use the fact that the array is originally an arange:
arr.flat[indexed_arr.ravel()] += 1000
assert_array_equal(arr, b)
def test_boolean(self):
a = np.array(5)
assert_equal(a[np.array(True)], 5)
a[np.array(True)] = 1
assert_equal(a, 1)
# NOTE: This is different from normal broadcasting, as
# arr[boolean_array] works like in a multi index. Which means
# it is aligned to the left. This is probably correct for
# consistency with arr[boolean_array,] also no broadcasting
# is done at all
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool),))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
def test_multidim(self):
# Automatically test combinations with complex indexes on 2nd (or 1st)
# spot and the simple ones in one other spot.
with warnings.catch_warnings():
# This is so that np.array(True) is not accepted in a full integer
# index, when running the file separately.
warnings.filterwarnings('error', '', DeprecationWarning)
warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
def isskip(idx):
return isinstance(idx, str) and idx == "skip"
for simple_pos in [0, 2, 3]:
tocheck = [self.fill_indices, self.complex_indices,
self.fill_indices, self.fill_indices]
tocheck[simple_pos] = self.simple_indices
for index in product(*tocheck):
index = tuple(i for i in index if not isskip(i))
self._check_multi_index(self.a, index)
self._check_multi_index(self.b, index)
# Check very simple item getting:
self._check_multi_index(self.a, (0, 0, 0, 0))
self._check_multi_index(self.b, (0, 0, 0, 0))
# Also check (simple cases of) too many indices:
assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
def test_1d(self):
a = np.arange(10)
for index in self.complex_indices:
self._check_single_index(a, index)
class TestFloatNonIntegerArgument:
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
"""
def test_valid_indexing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[np.array([0])]
a[[0, 0]]
a[:, [0, 0]]
a[:, 0,:]
a[:,:,:]
def test_valid_slicing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[::]
a[0:]
a[:2]
a[0:2]
a[::2]
a[1::2]
a[:2:2]
a[1:2:2]
def test_non_integer_argument_errors(self):
a = np.array([[5]])
assert_raises(TypeError, np.reshape, a, (1., 1., -1))
assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
assert_raises(TypeError, np.take, a, [0], 1.)
assert_raises(TypeError, np.take, a, [0], np.float64(1.))
def test_non_integer_sequence_multiplication(self):
# NumPy scalar sequence multiply should not work with non-integers
def mult(a, b):
return a * b
assert_raises(TypeError, mult, [1], np.float_(3))
# following should be OK
mult([1], np.int_(3))
def test_reduce_axis_float_index(self):
d = np.zeros((3,3,3))
assert_raises(TypeError, np.min, d, 0.5)
assert_raises(TypeError, np.min, d, (0.5, 1))
assert_raises(TypeError, np.min, d, (1, 2.2))
assert_raises(TypeError, np.min, d, (.2, 1.2))
class TestBooleanIndexing:
# Using a boolean as integer argument/indexing is an error.
def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
assert_raises(TypeError, np.reshape, a, (True, -1))
assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
# Note that operator.index(np.array(True)) does not work, a boolean
# array is thus also deprecated, but not with the same message:
assert_raises(TypeError, operator.index, np.array(True))
assert_warns(DeprecationWarning, operator.index, np.True_)
assert_raises(TypeError, np.take, args=(a, [0], False))
def test_boolean_indexing_weirdness(self):
# Weird boolean indexing things
a = np.ones((2, 3, 4))
a[False, True, ...].shape == (0, 2, 3, 4)
a[True, [0, 1], True, True, [1], [[2]]] == (1, 2)
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
class TestArrayToIndexDeprecation:
"""Creating an an index from array not 0-D is an error.
"""
def test_array_to_index_error(self):
# so no exception is expected. The raising is effectively tested above.
a = np.array([[[1]]])
assert_raises(TypeError, operator.index, np.array([1]))
assert_raises(TypeError, np.reshape, a, (a, -1))
assert_raises(TypeError, np.take, a, [0], a)
class TestNonIntegerArrayLike:
"""Tests that array_likes only valid if can safely cast to integer.
For instance, lists give IndexError when they cannot be safely cast to
an integer.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
assert_raises(IndexError, a.__getitem__, (['1', '2'],))
# The following is valid
a.__getitem__([])
class TestMultipleEllipsisError:
"""An index can only have a single ellipsis.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, lambda: a[..., ...])
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
class TestCApiAccess:
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
# 0-d arrays don't work:
assert_raises(IndexError, subscript, np.ones(()), 0)
# Out of bound values:
assert_raises(IndexError, subscript, np.ones(10), 11)
assert_raises(IndexError, subscript, np.ones(10), -11)
assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
a = np.arange(10)
assert_array_equal(a[4], subscript(a, 4))
a = a.reshape(5, 2)
assert_array_equal(a[-4], subscript(a, -4))
def test_setitem(self):
assign = functools.partial(array_indexing, 1)
# Deletion is impossible:
assert_raises(ValueError, assign, np.ones(10), 0)
# 0-d arrays don't work:
assert_raises(IndexError, assign, np.ones(()), 0, 0)
# Out of bound values:
assert_raises(IndexError, assign, np.ones(10), 11, 0)
assert_raises(IndexError, assign, np.ones(10), -11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
a = np.arange(10)
assign(a, 4, 10)
assert_(a[4] == 10)
a = a.reshape(5, 2)
assign(a, 4, 10)
assert_array_equal(a[-1], [10, 10])
|
grahame/ealgis
|
refs/heads/master
|
django/ealgis/tests/__init__.py
|
12133432
| |
omniscale/cartodb-wmsproxy
|
refs/heads/master
|
wmsproxy/test/__init__.py
|
12133432
| |
rhndg/openedx
|
refs/heads/master
|
cms/djangoapps/contentstore/features/__init__.py
|
12133432
| |
Sabayon/entropy
|
refs/heads/master
|
rigo/rigo/ui/gtk3/models/preferencesliststore.py
|
6
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2012 Fabio Erculiani
Authors:
Fabio Erculiani
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 3.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
from gi.repository import Gtk, GObject
class PreferencesListStore(Gtk.ListStore):
# NoticeBoard object
COL_TYPES = (GObject.TYPE_PYOBJECT,)
ICON_SIZE = 48
__gsignals__ = {
"redraw-request" : (GObject.SignalFlags.RUN_LAST,
None,
tuple(),
),
}
def __init__(self):
Gtk.ListStore.__init__(self)
self.set_column_types(self.COL_TYPES)
self.set_default_sort_func(self._sort, user_data=None)
self.set_sort_column_id(-1, Gtk.SortType.ASCENDING)
def _sort(self, model, iter1, iter2, user_data):
conf_a = model.get_value(iter1, 0)
conf_b = model.get_value(iter2, 0)
return conf_a.priority() >= conf_b.priority()
|
citrix-openstack-build/python-openstackclient
|
refs/heads/master
|
openstackclient/tests/object/v1/lib/__init__.py
|
6
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
|
moreati/django
|
refs/heads/master
|
tests/template_tests/test_logging.py
|
210
|
from __future__ import unicode_literals
import logging
from django.template import Engine, Variable, VariableDoesNotExist
from django.test import SimpleTestCase
class TestHandler(logging.Handler):
def __init__(self):
super(TestHandler, self).__init__()
self.log_record = None
def emit(self, record):
self.log_record = record
class VariableResolveLoggingTests(SimpleTestCase):
def setUp(self):
self.test_handler = TestHandler()
self.logger = logging.getLogger('django.template')
self.original_level = self.logger.level
self.logger.addHandler(self.test_handler)
self.logger.setLevel(logging.DEBUG)
def tearDown(self):
self.logger.removeHandler(self.test_handler)
self.logger.level = self.original_level
def test_log_on_variable_does_not_exist_silent(self):
class TestObject(object):
class SilentDoesNotExist(Exception):
silent_variable_failure = True
@property
def template_name(self):
return "template"
@property
def template(self):
return Engine().from_string('')
@property
def article(self):
raise TestObject.SilentDoesNotExist("Attribute does not exist.")
def __iter__(self):
return iter(attr for attr in dir(TestObject) if attr[:2] != "__")
def __getitem__(self, item):
return self.__dict__[item]
Variable('article').resolve(TestObject())
self.assertEqual(
self.test_handler.log_record.msg,
'template - Attribute does not exist.'
)
def test_log_on_variable_does_not_exist_not_silent(self):
with self.assertRaises(VariableDoesNotExist):
Variable('article.author').resolve({'article': {'section': 'News'}})
self.assertEqual(
self.test_handler.log_record.msg,
'unknown - Failed lookup for key [author] in %r' %
("{%r: %r}" % ('section', 'News'), )
)
def test_no_log_when_variable_exists(self):
Variable('article.section').resolve({'article': {'section': 'News'}})
self.assertIsNone(self.test_handler.log_record)
|
stuartdrew/plugin.video.CHSPORT
|
refs/heads/master
|
_DMsearch.py
|
35
|
import urllib
import urllib2
import xbmcvfs
import os,xbmc,xbmcaddon,xbmcgui,re,xbmcplugin,sys
import json
import datetime
addon = xbmcaddon.Addon('plugin.video.SimpleKore')
profile = xbmc.translatePath(addon.getAddonInfo('profile').decode('utf-8'))
cacheDir = os.path.join(profile, 'cachedir')
headers=dict({'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; rv:32.0) Gecko/20100101 Firefox/32.0'})
if not cacheDir.startswith(('smb://', 'nfs://', 'upnp://', 'ftp://')) and not os.path.isdir(cacheDir):
os.mkdir(cacheDir)
def addLink(url,name,iconimage,fanart,description,genre,date,showcontext,duration,total):
contextMenu = []
url = 'plugin://plugin.video.dailymotion_com/?mode=playVideo&url='+url
print 'adding link'
try:
name = name.encode('utf-8')
except: pass
ok = True
mode = '12'
contextMenu.append(('[COLOR white]!!Download Currently Playing!![/COLOR]','XBMC.RunPlugin(%s?url=%s&mode=21&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
if date == '':
date = None
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={ "Title": name, "Plot": description,"Aired": date, "Genre": genre, "Duration": duration })
liz.setProperty("Fanart_Image", fanart)
liz.setProperty('IsPlayable', 'true')
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,totalItems=total)
return ok
# Thanks to AddonScriptorde
#https://github.com/AddonScriptorDE/plugin.video.dailymotion_com/blob/master/default.py#L174
def listVideos(url):
content = cache(url,int(addon.getSetting("dmotion")))
content = json.loads(content)
count = 1
for item in content['list']:
id = item['id']
title = item['title'].encode('utf-8')
desc = item['description'].encode('utf-8')
duration = item['duration']
user = item['owner.username']
date = item['taken_time']
thumb = item['thumbnail_large_url']
views = item['views_total']
duration = str(int(duration)/60+1)
try:
date = datetime.datetime.fromtimestamp(int(date)).strftime('%Y-%m-%d')
except:
date = ""
temp = ("User: "+user+" | "+str(views)+" Views | "+date).encode('utf-8')
try:
desc = temp+"\n"+desc
except:
desc = ""
if user == "hulu":
pass
elif user == "cracklemovies":
pass
else:
addLink(id, title,thumb.replace("\\", ""),'', desc, user, date,'',duration, count)
count+=1
def re_me(data, re_patten):
match = ''
m = re.search(re_patten, data,re.I)
if m != None:
match = m.group(1)
else:
match = ''
return match
def notification(header="", message="", sleep=3000):
""" Will display a notification dialog with the specified header and message,
in addition you can set the length of time it displays in milliseconds and a icon image.
"""
xbmc.executebuiltin("XBMC.Notification(%s,%s,%i)" % ( header, message, sleep ))
def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s))
def makeRequest(url,referer=None):
if referer:
headers.update=({'Referer':referer})
else:
req = urllib2.Request(url,None,headers)
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
# from AddonScriptorde X:\plugin.video.my_music_tv\default.py
def cache(url, duration=0):
cacheFile = os.path.join(cacheDir, (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip())
if os.path.exists(cacheFile) and duration!=0 and (time.time()-os.path.getmtime(cacheFile) < 60*60*24*duration):
fh = xbmcvfs.File(cacheFile, 'r')
content = fh.read()
fh.close()
return content
else:
content = makeRequest(url)
fh = xbmcvfs.File(cacheFile, 'w')
fh.write(content)
fh.close()
return content
|
sakshaat/noclick
|
refs/heads/master
|
custom_parser.py
|
1
|
from sumy.parsers.html import HtmlParser
from sumy.utils import _HTTP_HEADERS, fetch_url
from contextlib import closing
from custom_article import CustomArticle as Article
import requests, lxml.html
class CustomParser(HtmlParser):
''' Custom Parser that allows to omit certain keywords and tweak the original for our use'''
@classmethod
def from_url(cls, url, tokenizer):
data = fetch_url(url)
# form the lxml tree from the data
html = lxml.html.fromstring(data)
# find and store the title in the instance
title = html.find(".//title").text
return cls(data, tokenizer, url, title)
def __init__(self, html_content, tokenizer, url, title):
super(HtmlParser, self).__init__(tokenizer)
self._article = Article(html_content, url)
self._title = title
def get_title(self):
return self._title
|
ldoktor/autotest
|
refs/heads/master
|
client/kernel_versions_unittest.py
|
6
|
#!/usr/bin/python
import unittest
try:
import autotest.common as common
except ImportError:
import common
from autotest.client import kernel_versions
class kernel_versions_test(unittest.TestCase):
def increases(self, kernels):
for i in xrange(len(kernels)-1):
k1 = kernels[i]
k2 = kernels[i+1]
ek1 = kernel_versions.version_encode(k1)
ek2 = kernel_versions.version_encode(k2)
self.assertTrue(ek1 < ek2,
'%s (-> %s) should sort < %s (-> %s)'
% (k1, ek1, k2, ek2) )
def test_version_encode(self):
series1 = [
'2.6',
'2.6.0',
'2.6.1-rc1',
'2.6.1-rc1_fix',
'2.6.1-rc1_patch',
'2.6.1-rc9',
'2.6.1-rc9-mm1',
'2.6.1-rc9-mm2',
'2.6.1-rc10',
'2.6.1-rc98',
'2.6.1',
'2.6.1_patch',
'2.6.9',
'2.6.10',
'2.6.99',
'2.7',
'2.9.99',
'2.10.0',
'99.99.99',
'UNKNOWN',
]
self.increases(series1)
self.increases(['pathX'+k for k in series1])
series2 = [
'2.6.18-smp-220',
'2.6.18-smp-220.0',
'2.6.18-smp-220.1_rc1',
'2.6.18-smp-220.1_rc1_fix',
'2.6.18-smp-220.1_rc1_patch',
'2.6.18-smp-220.1_rc9',
'2.6.18-smp-220.1_rc9_mm1',
'2.6.18-smp-220.1_rc9_mm2',
'2.6.18-smp-220.1_rc10',
'2.6.18-smp-220.1_rc98',
'2.6.18-smp-220.1',
'2.6.18-smp-220.1_patch',
'2.6.18-smp-220.9',
'2.6.18-smp-220.10',
'2.6.18-smp-220.99',
'2.6.18-smp-221',
'UNKNOWN',
]
self.increases(series2)
self.increases(['pathX'+k for k in series2])
releases = ['2.6.1' , '2.6.18-smp-220.0' ]
candidates = ['2.6.1-rc1' , '2.6.18-smp-220.0_rc1']
experiments = ['2.6.1-patch', '2.6.1-rc1_patch', '2.6.18-smp-220.0_patch',
'UNKNOWN']
def test_is_released_kernel(self):
for v in self.releases:
self.assertTrue(kernel_versions.is_released_kernel(v))
for v in self.candidates + self.experiments:
self.assertFalse(kernel_versions.is_released_kernel(v))
def test_is_release_candidate(self):
for v in self.releases + self.candidates:
self.assertTrue(kernel_versions.is_release_candidate(v))
for v in self.experiments:
self.assertFalse(kernel_versions.is_release_candidate(v))
if __name__ == "__main__":
unittest.main()
|
stevenhwu/googletest
|
refs/heads/master
|
test/gtest_uninitialized_test.py
|
2901
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
|
jayceyxc/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/utils/importlib.py
|
105
|
# Taken from Python 2.7 with permission from/by the original author.
import sys
from django.utils import six
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in range(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
if six.PY3:
from importlib import import_module
else:
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
|
Jgarcia-IAS/SAT
|
refs/heads/master
|
openerp/addons/web_gantt/__openerp__.py
|
387
|
{
'name': 'Web Gantt',
'category': 'Hidden',
'description': """
OpenERP Web Gantt chart view.
=============================
""",
'version': '2.0',
'depends': ['web'],
'data' : [
'views/web_gantt.xml',
],
'qweb': [
'static/src/xml/*.xml',
],
'auto_install': True
}
|
Servir-Mekong/ecodash
|
refs/heads/master
|
lib/ee/ee_list.py
|
9
|
#!/usr/bin/env python
"""A wrapper for lists."""
import apifunction
import computedobject
import ee_exception
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
class List(computedobject.ComputedObject):
"""An object to represent lists."""
_initialized = False
def __init__(self, arg):
"""Construct a list wrapper.
This constuctor accepts the following args:
1) A bare list.
2) A ComputedObject returning a list.
Args:
arg: The list to wrap.
Raises:
ee_exception.EEException: On bad input.
"""
self.initialize()
if isinstance(arg, (list, tuple)):
super(List, self).__init__(None, None)
self._list = arg
elif isinstance(arg, computedobject.ComputedObject):
super(List, self).__init__(arg.func, arg.args, arg.varName)
self._list = None
else:
raise ee_exception.EEException(
'Invalid argument specified for ee.List(): %s' % arg)
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
apifunction.ApiFunction.importApi(cls, 'List', 'List')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class."""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
@staticmethod
def name():
return 'List'
def encode(self, opt_encoder=None):
if isinstance(self._list, (list, tuple)):
return [opt_encoder(elem) for elem in self._list]
else:
return super(List, self).encode(opt_encoder)
|
piotroxp/scibibscan
|
refs/heads/master
|
scib/lib/python3.5/site-packages/astropy/io/fits/tests/test_image.py
|
1
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
from __future__ import division, with_statement
import math
import os
import time
import warnings
import numpy as np
from ....io import fits
from ....utils.exceptions import (AstropyDeprecationWarning,
AstropyPendingDeprecationWarning)
from ....tests.helper import pytest, raises, catch_warnings
from ..hdu.compressed import SUBTRACTIVE_DITHER_1, DITHER_SEED_CHECKSUM
from .test_table import comparerecords
from . import FitsTestCase
from .util import ignore_warnings
class TestImageFunctions(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = fits.ImageHDU(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = fits.ImageHDU(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr['FILENAME'] = 'labq01i3q_rawtag.fits'
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert phdr['FILENAME'] == 'labq01i3q_rawtag.fits'
@raises(ValueError)
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data('test0.fits')) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
r[1].data[:2, :2]
def test_open_2(self):
r = fits.open(self.data('test0.fits'))
info = ([(0, 'PRIMARY', 'PrimaryHDU', 138, (), '', '')] +
[(x, 'SCI', 'ImageHDU', 61, (40, 40), 'int16', '')
for x in range(1, 5)])
try:
assert r.info(output=False) == info
finally:
r.close()
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([('EXTNAME', 'XPRIMARY'), ('EXTVER', 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert 'EXTNAME' in hdul[0].header
assert hdul[0].name == 'XPRIMARY'
assert hdul[0].name == hdul[0].header['EXTNAME']
info = [(0, 'XPRIMARY', 'PrimaryHDU', 5, (), '', '')]
assert hdul.info(output=False) == info
assert hdul['PRIMARY'] is hdul['XPRIMARY']
assert hdul['PRIMARY'] is hdul[('XPRIMARY', 1)]
hdul[0].name = 'XPRIMARY2'
assert hdul[0].header['EXTNAME'] == 'XPRIMARY2'
hdul.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].name == 'XPRIMARY2'
@ignore_warnings(AstropyDeprecationWarning)
def test_io_manipulation(self):
# This legacy test also tests numerous deprecated interfaces for
# backwards compatibility
# Get a keyword value. An extension can be referred by name or by
# number. Both extension and keyword names are case insensitive.
with fits.open(self.data('test0.fits')) as r:
assert r['primary'].header['naxis'] == 0
assert r[0].header['naxis'] == 0
# If there are more than one extension with the same EXTNAME value,
# the EXTVER can be used (as the second argument) to distinguish
# the extension.
assert r['sci', 1].header['detector'] == 1
# append (using "update()") a new card
r[0].header['xxx'] = 1.234e56
assert (str(r[0].header.ascard[-3:]) ==
"EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n"
"FILENAME= 'vtest3.fits' / File name \n"
"XXX = 1.234E+56 ")
# rename a keyword
r[0].header.rename_key('filename', 'fname')
pytest.raises(ValueError, r[0].header.rename_key, 'fname',
'history')
pytest.raises(ValueError, r[0].header.rename_key, 'fname',
'simple')
r[0].header.rename_key('fname', 'filename')
# get a subsection of data
assert np.array_equal(r[2].data[:3, :3],
np.array([[349, 349, 348],
[349, 349, 347],
[347, 350, 349]], dtype=np.int16))
# We can create a new FITS file by opening a new file with "append"
# mode.
with fits.open(self.temp('test_new.fits'), mode='append') as n:
# Append the primary header and the 2nd extension to the new
# file.
n.append(r[0])
n.append(r[2])
# The flush method will write the current HDUList object back
# to the newly created file on disk. The HDUList is still open
# and can be further operated.
n.flush()
assert n[1].data[1, 1] == 349
# modify a data point
n[1].data[1, 1] = 99
# When the file is closed, the most recent additions of
# extension(s) since last flush() will be appended, but any HDU
# already existed at the last flush will not be modified
del n
# If an existing file is opened with "append" mode, like the
# readonly mode, the HDU's will be read into the HDUList which can
# be modified in memory but can not be written back to the original
# file. A file opened with append mode can only add new HDU's.
os.rename(self.temp('test_new.fits'),
self.temp('test_append.fits'))
with fits.open(self.temp('test_append.fits'), mode='append') as a:
# The above change did not take effect since this was made
# after the flush().
assert a[1].data[1, 1] == 349
a.append(r[1])
del a
# When changes are made to an HDUList which was opened with
# "update" mode, they will be written back to the original file
# when a flush/close is called.
os.rename(self.temp('test_append.fits'),
self.temp('test_update.fits'))
with fits.open(self.temp('test_update.fits'), mode='update') as u:
# When the changes do not alter the size structures of the
# original (or since last flush) HDUList, the changes are
# written back "in place".
assert u[0].header['rootname'] == 'U2EQ0201T'
u[0].header['rootname'] = 'abc'
assert u[1].data[1, 1] == 349
u[1].data[1, 1] = 99
u.flush()
# If the changes affect the size structure, e.g. adding or
# deleting HDU(s), header was expanded or reduced beyond
# existing number of blocks (2880 bytes in each block), or
# change the data size, the HDUList is written to a temporary
# file, the original file is deleted, and the temporary file is
# renamed to the original file name and reopened in the update
# mode. To a user, these two kinds of updating writeback seem
# to be the same, unless the optional argument in flush or
# close is set to 1.
del u[2]
u.flush()
# the write method in HDUList class writes the current HDUList,
# with all changes made up to now, to a new file. This method
# works the same disregard the mode the HDUList was opened
# with.
u.append(r[3])
u.writeto(self.temp('test_new.fits'))
del u
# Another useful new HDUList method is readall. It will "touch" the
# data parts in all HDUs, so even if the HDUList is closed, we can
# still operate on the data.
with fits.open(self.data('test0.fits')) as r:
r.readall()
assert r[1].data[1, 1] == 315
# create an HDU with data only
data = np.ones((3, 5), dtype=np.float32)
hdu = fits.ImageHDU(data=data, name='SCI')
assert np.array_equal(hdu.data,
np.array([[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]],
dtype=np.float32))
# create an HDU with header and data
# notice that the header has the right NAXIS's since it is constructed
# with ImageHDU
hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2],
dtype='int32'))
assert (str(hdu2.header.ascard[1:5]) ==
"BITPIX = 32 / array data type \n"
"NAXIS = 1 / number of array dimensions \n"
"NAXIS1 = 2 \n"
"PCOUNT = 0 / number of parameters ")
def test_memory_mapping(self):
# memory mapping
f1 = fits.open(self.data('test0.fits'), memmap=1)
f1.close()
def test_verification_on_output(self):
# verification on output
# make a defect HDUList first
x = fits.ImageHDU()
hdu = fits.HDUList(x) # HDUList can take a list or one single HDU
with catch_warnings() as w:
hdu.verify()
text = "HDUList's 0th element is not a primary HDU."
assert len(w) == 3
assert text in str(w[1].message)
with catch_warnings() as w:
hdu.writeto(self.temp('test_new2.fits'), 'fix')
text = ("HDUList's 0th element is not a primary HDU. "
"Fixed by inserting one as 0th HDU.")
assert len(w) == 3
assert text in str(w[1].message)
def test_section(self):
# section testing
fs = fits.open(self.data('arange.fits'))
assert np.array_equal(fs[0].section[3, 2, 5], 357)
assert np.array_equal(
fs[0].section[3, 2, :],
np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, 4:],
np.array([356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, :8],
np.array([352, 353, 354, 355, 356, 357, 358, 359]))
assert np.array_equal(fs[0].section[3, 2, -8:8],
np.array([355, 356, 357, 358, 359]))
assert np.array_equal(
fs[0].section[3, 2:5, :],
np.array([[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362],
[363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373],
[374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384]]))
assert np.array_equal(fs[0].section[3, :, :][:3, :3],
np.array([[330, 331, 332],
[341, 342, 343],
[352, 353, 354]]))
dat = fs[0].data
assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8])
assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3])
assert np.array_equal(fs[0].section[3:6, :, :][:3, :3, :3],
np.array([[[330, 331, 332],
[341, 342, 343],
[352, 353, 354]],
[[440, 441, 442],
[451, 452, 453],
[462, 463, 464]],
[[550, 551, 552],
[561, 562, 563],
[572, 573, 574]]]))
assert np.array_equal(fs[0].section[:, :, :][:3, :2, :2],
np.array([[[0, 1],
[11, 12]],
[[110, 111],
[121, 122]],
[[220, 221],
[231, 232]]]))
assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :])
assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :])
assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :])
assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :])
assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2])
assert np.array_equal(fs[0].section[:, [1, 2, 4], 3],
dat[:, [1, 2, 4], 3])
assert np.array_equal(
fs[0].section[:, np.array([True, False, True]), :],
dat[:, np.array([True, False, True]), :])
assert np.array_equal(
fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...])
assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2])
assert np.array_equal(fs[0].section[..., [1, 2, 4], 3],
dat[..., [1, 2, 4], 3])
def test_section_data_single(self):
a = np.array([1])
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
sec = hdul[0].section
dat = hdul[0].data
assert np.array_equal(sec[0], dat[0])
assert np.array_equal(sec[...], dat[...])
assert np.array_equal(sec[..., 0], dat[..., 0])
assert np.array_equal(sec[0, ...], dat[0, ...])
def test_section_data_square(self):
a = np.arange(4).reshape((2, 2))
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
def test_section_data_cube(self):
a = np.arange(18).reshape((2, 3, 3))
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
# TODO: Generate these perumtions instead of having them all written
# out, yeesh!
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :] == dat[0, :, :]).all()
assert (d.section[1, :, :] == dat[1, :, :]).all()
assert (d.section[0, 0, :] == dat[0, 0, :]).all()
assert (d.section[0, 1, :] == dat[0, 1, :]).all()
assert (d.section[0, 2, :] == dat[0, 2, :]).all()
assert (d.section[1, 0, :] == dat[1, 0, :]).all()
assert (d.section[1, 1, :] == dat[1, 1, :]).all()
assert (d.section[1, 2, :] == dat[1, 2, :]).all()
assert (d.section[0, 0, 0] == dat[0, 0, 0]).all()
assert (d.section[0, 0, 1] == dat[0, 0, 1]).all()
assert (d.section[0, 0, 2] == dat[0, 0, 2]).all()
assert (d.section[0, 1, 0] == dat[0, 1, 0]).all()
assert (d.section[0, 1, 1] == dat[0, 1, 1]).all()
assert (d.section[0, 1, 2] == dat[0, 1, 2]).all()
assert (d.section[0, 2, 0] == dat[0, 2, 0]).all()
assert (d.section[0, 2, 1] == dat[0, 2, 1]).all()
assert (d.section[0, 2, 2] == dat[0, 2, 2]).all()
assert (d.section[1, 0, 0] == dat[1, 0, 0]).all()
assert (d.section[1, 0, 1] == dat[1, 0, 1]).all()
assert (d.section[1, 0, 2] == dat[1, 0, 2]).all()
assert (d.section[1, 1, 0] == dat[1, 1, 0]).all()
assert (d.section[1, 1, 1] == dat[1, 1, 1]).all()
assert (d.section[1, 1, 2] == dat[1, 1, 2]).all()
assert (d.section[1, 2, 0] == dat[1, 2, 0]).all()
assert (d.section[1, 2, 1] == dat[1, 2, 1]).all()
assert (d.section[1, 2, 2] == dat[1, 2, 2]).all()
assert (d.section[:, 0, 0] == dat[:, 0, 0]).all()
assert (d.section[:, 0, 1] == dat[:, 0, 1]).all()
assert (d.section[:, 0, 2] == dat[:, 0, 2]).all()
assert (d.section[:, 1, 0] == dat[:, 1, 0]).all()
assert (d.section[:, 1, 1] == dat[:, 1, 1]).all()
assert (d.section[:, 1, 2] == dat[:, 1, 2]).all()
assert (d.section[:, 2, 0] == dat[:, 2, 0]).all()
assert (d.section[:, 2, 1] == dat[:, 2, 1]).all()
assert (d.section[:, 2, 2] == dat[:, 2, 2]).all()
assert (d.section[0, :, 0] == dat[0, :, 0]).all()
assert (d.section[0, :, 1] == dat[0, :, 1]).all()
assert (d.section[0, :, 2] == dat[0, :, 2]).all()
assert (d.section[1, :, 0] == dat[1, :, 0]).all()
assert (d.section[1, :, 1] == dat[1, :, 1]).all()
assert (d.section[1, :, 2] == dat[1, :, 2]).all()
assert (d.section[:, :, 0] == dat[:, :, 0]).all()
assert (d.section[:, :, 1] == dat[:, :, 1]).all()
assert (d.section[:, :, 2] == dat[:, :, 2]).all()
assert (d.section[:, 0, :] == dat[:, 0, :]).all()
assert (d.section[:, 1, :] == dat[:, 1, :]).all()
assert (d.section[:, 2, :] == dat[:, 2, :]).all()
assert (d.section[:, :, 0:1] == dat[:, :, 0:1]).all()
assert (d.section[:, :, 0:2] == dat[:, :, 0:2]).all()
assert (d.section[:, :, 0:3] == dat[:, :, 0:3]).all()
assert (d.section[:, :, 1:2] == dat[:, :, 1:2]).all()
assert (d.section[:, :, 1:3] == dat[:, :, 1:3]).all()
assert (d.section[:, :, 2:3] == dat[:, :, 2:3]).all()
assert (d.section[0:1, 0:1, 0:1] == dat[0:1, 0:1, 0:1]).all()
assert (d.section[0:1, 0:1, 0:2] == dat[0:1, 0:1, 0:2]).all()
assert (d.section[0:1, 0:1, 0:3] == dat[0:1, 0:1, 0:3]).all()
assert (d.section[0:1, 0:1, 1:2] == dat[0:1, 0:1, 1:2]).all()
assert (d.section[0:1, 0:1, 1:3] == dat[0:1, 0:1, 1:3]).all()
assert (d.section[0:1, 0:1, 2:3] == dat[0:1, 0:1, 2:3]).all()
assert (d.section[0:1, 0:2, 0:1] == dat[0:1, 0:2, 0:1]).all()
assert (d.section[0:1, 0:2, 0:2] == dat[0:1, 0:2, 0:2]).all()
assert (d.section[0:1, 0:2, 0:3] == dat[0:1, 0:2, 0:3]).all()
assert (d.section[0:1, 0:2, 1:2] == dat[0:1, 0:2, 1:2]).all()
assert (d.section[0:1, 0:2, 1:3] == dat[0:1, 0:2, 1:3]).all()
assert (d.section[0:1, 0:2, 2:3] == dat[0:1, 0:2, 2:3]).all()
assert (d.section[0:1, 0:3, 0:1] == dat[0:1, 0:3, 0:1]).all()
assert (d.section[0:1, 0:3, 0:2] == dat[0:1, 0:3, 0:2]).all()
assert (d.section[0:1, 0:3, 0:3] == dat[0:1, 0:3, 0:3]).all()
assert (d.section[0:1, 0:3, 1:2] == dat[0:1, 0:3, 1:2]).all()
assert (d.section[0:1, 0:3, 1:3] == dat[0:1, 0:3, 1:3]).all()
assert (d.section[0:1, 0:3, 2:3] == dat[0:1, 0:3, 2:3]).all()
assert (d.section[0:1, 1:2, 0:1] == dat[0:1, 1:2, 0:1]).all()
assert (d.section[0:1, 1:2, 0:2] == dat[0:1, 1:2, 0:2]).all()
assert (d.section[0:1, 1:2, 0:3] == dat[0:1, 1:2, 0:3]).all()
assert (d.section[0:1, 1:2, 1:2] == dat[0:1, 1:2, 1:2]).all()
assert (d.section[0:1, 1:2, 1:3] == dat[0:1, 1:2, 1:3]).all()
assert (d.section[0:1, 1:2, 2:3] == dat[0:1, 1:2, 2:3]).all()
assert (d.section[0:1, 1:3, 0:1] == dat[0:1, 1:3, 0:1]).all()
assert (d.section[0:1, 1:3, 0:2] == dat[0:1, 1:3, 0:2]).all()
assert (d.section[0:1, 1:3, 0:3] == dat[0:1, 1:3, 0:3]).all()
assert (d.section[0:1, 1:3, 1:2] == dat[0:1, 1:3, 1:2]).all()
assert (d.section[0:1, 1:3, 1:3] == dat[0:1, 1:3, 1:3]).all()
assert (d.section[0:1, 1:3, 2:3] == dat[0:1, 1:3, 2:3]).all()
assert (d.section[1:2, 0:1, 0:1] == dat[1:2, 0:1, 0:1]).all()
assert (d.section[1:2, 0:1, 0:2] == dat[1:2, 0:1, 0:2]).all()
assert (d.section[1:2, 0:1, 0:3] == dat[1:2, 0:1, 0:3]).all()
assert (d.section[1:2, 0:1, 1:2] == dat[1:2, 0:1, 1:2]).all()
assert (d.section[1:2, 0:1, 1:3] == dat[1:2, 0:1, 1:3]).all()
assert (d.section[1:2, 0:1, 2:3] == dat[1:2, 0:1, 2:3]).all()
assert (d.section[1:2, 0:2, 0:1] == dat[1:2, 0:2, 0:1]).all()
assert (d.section[1:2, 0:2, 0:2] == dat[1:2, 0:2, 0:2]).all()
assert (d.section[1:2, 0:2, 0:3] == dat[1:2, 0:2, 0:3]).all()
assert (d.section[1:2, 0:2, 1:2] == dat[1:2, 0:2, 1:2]).all()
assert (d.section[1:2, 0:2, 1:3] == dat[1:2, 0:2, 1:3]).all()
assert (d.section[1:2, 0:2, 2:3] == dat[1:2, 0:2, 2:3]).all()
assert (d.section[1:2, 0:3, 0:1] == dat[1:2, 0:3, 0:1]).all()
assert (d.section[1:2, 0:3, 0:2] == dat[1:2, 0:3, 0:2]).all()
assert (d.section[1:2, 0:3, 0:3] == dat[1:2, 0:3, 0:3]).all()
assert (d.section[1:2, 0:3, 1:2] == dat[1:2, 0:3, 1:2]).all()
assert (d.section[1:2, 0:3, 1:3] == dat[1:2, 0:3, 1:3]).all()
assert (d.section[1:2, 0:3, 2:3] == dat[1:2, 0:3, 2:3]).all()
assert (d.section[1:2, 1:2, 0:1] == dat[1:2, 1:2, 0:1]).all()
assert (d.section[1:2, 1:2, 0:2] == dat[1:2, 1:2, 0:2]).all()
assert (d.section[1:2, 1:2, 0:3] == dat[1:2, 1:2, 0:3]).all()
assert (d.section[1:2, 1:2, 1:2] == dat[1:2, 1:2, 1:2]).all()
assert (d.section[1:2, 1:2, 1:3] == dat[1:2, 1:2, 1:3]).all()
assert (d.section[1:2, 1:2, 2:3] == dat[1:2, 1:2, 2:3]).all()
assert (d.section[1:2, 1:3, 0:1] == dat[1:2, 1:3, 0:1]).all()
assert (d.section[1:2, 1:3, 0:2] == dat[1:2, 1:3, 0:2]).all()
assert (d.section[1:2, 1:3, 0:3] == dat[1:2, 1:3, 0:3]).all()
assert (d.section[1:2, 1:3, 1:2] == dat[1:2, 1:3, 1:2]).all()
assert (d.section[1:2, 1:3, 1:3] == dat[1:2, 1:3, 1:3]).all()
assert (d.section[1:2, 1:3, 2:3] == dat[1:2, 1:3, 2:3]).all()
def test_section_data_four(self):
a = np.arange(256).reshape((4, 4, 4, 4))
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :, :, :] == dat[:, :, :, :]).all()
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :, :] == dat[0, :, :, :]).all()
assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all()
assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all()
assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all()
assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all()
def test_section_data_scaled(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143
This is like test_section_data_square but uses a file containing scaled
image data, to test that sections can work correctly with scaled data.
"""
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
# Test without having accessed the full data first
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
assert not d._data_loaded
def test_do_not_scale_image_data(self):
hdul = fits.open(self.data('scale.fits'), do_not_scale_image_data=True)
assert hdul[0].data.dtype == np.dtype('>i2')
hdul = fits.open(self.data('scale.fits'))
assert hdul[0].data.dtype == np.dtype('float32')
def test_append_uint_data(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56
(BZERO and BSCALE added in the wrong location when appending scaled
data)
"""
fits.writeto(self.temp('test_new.fits'), data=np.array([],
dtype='uint8'))
d = np.zeros([100, 100]).astype('uint16')
fits.append(self.temp('test_new.fits'), data=d)
f = fits.open(self.temp('test_new.fits'), uint=True)
assert f[1].data.dtype == 'uint16'
def test_uint_header_consistency(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2305
This ensures that an HDU containing unsigned integer data always has
the apppriate BZERO value in its header.
"""
for int_size in (16, 32, 64):
# Just make an array of some unsigned ints that wouldn't fit in a
# signed int array of the same bit width
max_uint = (2 ** int_size) - 1
if int_size == 64:
# Otherwise may get an overflow error, at least on Python 2
max_uint = np.uint64(int_size)
dtype = 'uint%d' % int_size
arr = np.empty(100, dtype=dtype)
arr.fill(max_uint)
arr -= np.arange(100, dtype=dtype)
uint_hdu = fits.PrimaryHDU(data=arr)
assert np.all(uint_hdu.data == arr)
assert uint_hdu.data.dtype.name == 'uint%d' % int_size
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
filename = 'uint%d.fits' % int_size
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename), uint=True) as hdul:
new_uint_hdu = hdul[0]
assert np.all(new_uint_hdu.data == arr)
assert new_uint_hdu.data.dtype.name == 'uint%d' % int_size
assert 'BZERO' in new_uint_hdu.header
assert new_uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
def test_blanks(self):
"""Test image data with blank spots in it (which should show up as
NaNs in the data array.
"""
arr = np.zeros((10, 10), dtype=np.int32)
# One row will be blanks
arr[1] = 999
hdu = fits.ImageHDU(data=arr)
hdu.header['BLANK'] = 999
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
assert np.isnan(hdul[1].data[1]).all()
def test_invalid_blanks(self):
"""
Test that invalid use of the BLANK keyword leads to an appropriate
warning, and that the BLANK keyword is ignored when returning the
HDU data.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
arr = np.arange(5, dtype=np.float64)
hdu = fits.PrimaryHDU(data=arr)
hdu.header['BLANK'] = 2
with catch_warnings() as w:
hdu.writeto(self.temp('test_new.fits'))
# Allow the HDU to be written, but there should be a warning
# when writing a header with BLANK when then data is not
# int
assert len(w) == 1
assert "Invalid 'BLANK' keyword in header" in str(w[0].message)
# Should also get a warning when opening the file, and the BLANK
# value should not be applied
with catch_warnings() as w:
with fits.open(self.temp('test_new.fits')) as h:
assert len(w) == 1
assert "Invalid 'BLANK' keyword in header" in str(w[0].message)
assert np.all(arr == h[0].data)
def test_scale_back_with_blanks(self):
"""
Test that when auto-rescaling integer data with "blank" values (where
the blanks are replaced by NaN in the float data), that the "BLANK"
keyword is removed from the header.
Further, test that when using the ``scale_back=True`` option the blank
values are restored properly.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
# Make the sample file
arr = np.arange(5, dtype=np.int32)
hdu = fits.PrimaryHDU(data=arr)
hdu.scale('int16', bscale=1.23)
# Creating data that uses BLANK is currently kludgy--a separate issue
# TODO: Rewrite this test when scaling with blank support is better
# supported
# Let's just add a value to the data that should be converted to NaN
# when it is read back in:
hdu.data[0] = 9999
hdu.header['BLANK'] = 9999
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
data = hdul[0].data
assert np.isnan(data[0])
hdul.writeto(self.temp('test2.fits'))
# Now reopen the newly written file. It should not have a 'BLANK'
# keyword
with catch_warnings() as w:
with fits.open(self.temp('test2.fits')) as hdul2:
assert len(w) == 0
assert 'BLANK' not in hdul2[0].header
data = hdul2[0].data
assert np.isnan(data[0])
# Finally, test that scale_back keeps the BLANKs correctly
with fits.open(self.temp('test.fits'), scale_back=True,
mode='update') as hdul3:
data = hdul3[0].data
assert np.isnan(data[0])
with fits.open(self.temp('test.fits'),
do_not_scale_image_data=True) as hdul4:
assert hdul4[0].header['BLANK'] == 9999
assert hdul4[0].header['BSCALE'] == 1.23
assert hdul4[0].data[0] == 9999
def test_bzero_with_floats(self):
"""Test use of the BZERO keyword in an image HDU containing float
data.
"""
arr = np.zeros((10, 10)) - 1
hdu = fits.ImageHDU(data=arr)
hdu.header['BZERO'] = 1.0
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
arr += 1
assert (hdul[1].data == arr).all()
def test_rewriting_large_scaled_image(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101
"""
hdul = fits.open(self.data('fixed-1890.fits'))
orig_data = hdul[0].data
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), clobber=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.data('fixed-1890.fits'))
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), clobber=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), clobber=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[0].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul = fits.open(self.temp('test_new.fits'))
hdul.close()
def test_image_update_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105
Replacing the original header to an image HDU and saving should update
the NAXISn keywords appropriately and save the image data correctly.
"""
# Copy the original file before saving to it
self.copy_file('test0.fits')
with fits.open(self.temp('test0.fits'), mode='update') as hdul:
orig_data = hdul[1].data.copy()
hdr_copy = hdul[1].header.copy()
del hdr_copy['NAXIS*']
hdul[1].header = hdr_copy
with fits.open(self.temp('test0.fits')) as hdul:
assert (orig_data == hdul[1].data).all()
def test_open_scaled_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119
(Don't update scaled image data if the data is not read)
This ensures that merely opening and closing a file containing scaled
image data does not cause any change to the data (or the header).
Changes should only occur if the data is accessed.
"""
# Copy the original file before making any possible changes to it
self.copy_file('scale.fits')
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
orig_data = hdul[0].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
assert (orig_data == hdul[0].data).all()
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preseved properly
hdul[0].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[0].shape == (42, 10)
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
def test_scale_back(self):
"""A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120
The scale_back feature for image HDUs.
"""
self.copy_file('scale.fits')
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[0].header['BITPIX']
orig_bzero = hdul[0].header['BZERO']
orig_bscale = hdul[0].header['BSCALE']
orig_data = hdul[0].data.copy()
hdul[0].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[0].header['BITPIX'] == orig_bitpix
assert hdul[0].header['BZERO'] == orig_bzero
assert hdul[0].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[0].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[0].data[1:] == orig_data[1:]).all()
def test_image_none(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('test0.fits')) as h:
h[1].data
h[1].data = None
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].data is None
assert h[1].header['NAXIS'] == 0
assert 'NAXIS1' not in h[1].header
assert 'NAXIS2' not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header['BLANK'] = 'nan'
hdu.writeto(self.temp('test.fits'))
with catch_warnings() as w:
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
class TestCompressedImage(FitsTestCase):
def test_empty(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2595
"""
hdu = fits.CompImageHDU()
assert hdu.data is None
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode='update') as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert hdul[1].data is None
# Now test replacing the empty data with an array and see what
# happens
hdul[1].data = np.arange(100, dtype=np.int32)
with fits.open(self.temp('test.fits')) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert np.all(hdul[1].data == np.arange(100, dtype=np.int32))
@pytest.mark.parametrize(
('data', 'compression_type', 'quantize_level', 'byte_order'),
sum([[(np.zeros((2, 10, 10), dtype=np.float32), 'RICE_1', 16, bo),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_1', -0.01, bo),
(np.zeros((100, 100)) + 1, 'HCOMPRESS_1', 16, bo)]
for bo in ('<', '>')], []))
def test_comp_image(self, data, compression_type, quantize_level,
byte_order):
data = data.newbyteorder(byte_order)
primary_hdu = fits.PrimaryHDU()
ofd = fits.HDUList(primary_hdu)
chdu = fits.CompImageHDU(data, name='SCI',
compressionType=compression_type,
quantizeLevel=quantize_level)
ofd.append(chdu)
ofd.writeto(self.temp('test_new.fits'), clobber=True)
ofd.close()
with fits.open(self.temp('test_new.fits')) as fd:
assert (fd[1].data == data).all()
assert fd[1].header['NAXIS'] == chdu.header['NAXIS']
assert fd[1].header['NAXIS1'] == chdu.header['NAXIS1']
assert fd[1].header['NAXIS2'] == chdu.header['NAXIS2']
assert fd[1].header['BITPIX'] == chdu.header['BITPIX']
@ignore_warnings(AstropyPendingDeprecationWarning)
def test_comp_image_hcompression_1_invalid_data(self):
"""
Tests compression with the HCOMPRESS_1 algorithm with data that is
not 2D and has a non-2D tile size.
"""
pytest.raises(ValueError, fits.CompImageHDU,
np.zeros((2, 10, 10), dtype=np.float32), name='SCI',
compressionType='HCOMPRESS_1', quantizeLevel=16,
tileSize=[2, 10, 10])
@ignore_warnings(AstropyPendingDeprecationWarning)
def test_comp_image_hcompress_image_stack(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171
Tests that data containing more than two dimensions can be
compressed with HCOMPRESS_1 so long as the user-supplied tile size can
be flattened to two dimensions.
"""
cube = np.arange(300, dtype=np.float32).reshape((3, 10, 10))
hdu = fits.CompImageHDU(data=cube, name='SCI',
compressionType='HCOMPRESS_1',
quantizeLevel=16, tileSize=[5, 5, 1])
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert (hdul['SCI'].data == cube).all()
def test_subtractive_dither_seed(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/32
Ensure that when floating point data is compressed with the
SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed
is added to the header, and that the data can be correctly
decompressed.
"""
array = np.arange(100.0).reshape(10, 10)
csum = (array[0].view('uint8').sum() % 10000) + 1
hdu = fits.CompImageHDU(data=array,
quantize_method=SUBTRACTIVE_DITHER_1,
dither_seed=DITHER_SEED_CHECKSUM)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
assert 'ZQUANTIZ' in hdul[1]._header
assert hdul[1]._header['ZQUANTIZ'] == 'SUBTRACTIVE_DITHER_1'
assert 'ZDITHER0' in hdul[1]._header
assert hdul[1]._header['ZDITHER0'] == csum
assert np.all(hdul[1].data == array)
def test_disable_image_compression(self):
with catch_warnings():
# No warnings should be displayed in this case
warnings.simplefilter('error')
with fits.open(self.data('comp.fits'),
disable_image_compression=True) as hdul:
# The compressed image HDU should show up as a BinTableHDU, but
# *not* a CompImageHDU
assert isinstance(hdul[1], fits.BinTableHDU)
assert not isinstance(hdul[1], fits.CompImageHDU)
with fits.open(self.data('comp.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
def test_open_comp_image_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167
Similar to test_open_scaled_in_update_mode(), but specifically for
compressed images.
"""
# Copy the original file before making any possible changes to it
self.copy_file('comp.fits')
mtime = os.stat(self.temp('comp.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('comp.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('comp.fits')).st_mtime
def test_open_scaled_in_update_mode_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2
Identical to test_open_scaled_in_update_mode() but with a compressed
version of the scaled image.
"""
# Copy+compress the original file before making any possible changes to
# it
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
hdul[1].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preseved properly
hdul[1].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[1].shape == (42, 10)
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
def test_write_comp_hdu_direct_from_existing(self):
with fits.open(self.data('comp.fits')) as hdul:
hdul[1].writeto(self.temp('test.fits'))
with fits.open(self.data('comp.fits')) as hdul1:
with fits.open(self.temp('test.fits')) as hdul2:
assert np.all(hdul1[1].data == hdul2[1].data)
assert comparerecords(hdul1[1].compressed_data,
hdul2[1].compressed_data)
def test_rewriting_large_scaled_image_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1
Identical to test_rewriting_large_scaled_image() but with a compressed
image.
"""
with fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('fixed-1890-z.fits'))
hdul = fits.open(self.temp('fixed-1890-z.fits'))
orig_data = hdul[1].data
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), clobber=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.temp('fixed-1890-z.fits'))
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), clobber=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.temp('fixed-1890-z.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), clobber=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[1].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul = fits.open(self.temp('test_new.fits'))
hdul.close()
def test_scale_back_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3
Identical to test_scale_back() but uses a compressed image.
"""
# Create a compressed version of the scaled image
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[1].header['BITPIX']
orig_bzero = hdul[1].header['BZERO']
orig_bscale = hdul[1].header['BSCALE']
orig_data = hdul[1].data.copy()
hdul[1].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[1].header['BITPIX'] == orig_bitpix
assert hdul[1].header['BZERO'] == orig_bzero
assert hdul[1].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[1].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[1].data[1:] == orig_data[1:]).all()
# Extra test to ensure that after everything the data is still the
# same as in the original uncompressed version of the image
with fits.open(self.data('scale.fits')) as hdul2:
# Recall we made the same modification to the data in hdul
# above
hdul2[0].data[0] = 0
assert (hdul[1].data == hdul2[0].data).all()
def test_lossless_gzip_compression(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198"""
noise = np.random.normal(size=(1000, 1000))
chdu1 = fits.CompImageHDU(data=noise, compressionType='GZIP_1')
# First make a test image with lossy compression and make sure it
# wasn't compressed perfectly. This shouldn't happen ever, but just to
# make sure the test non-trivial.
chdu1.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert np.abs(noise - h[1].data).max() > 0.0
del h
chdu2 = fits.CompImageHDU(data=noise, compressionType='GZIP_1',
quantizeLevel=0.0) # No quantization
with ignore_warnings():
chdu2.writeto(self.temp('test.fits'), clobber=True)
with fits.open(self.temp('test.fits')) as h:
assert (noise == h[1].data).all()
def test_compression_column_tforms(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199"""
# Some interestingly tiled data so that some of it is quantized and
# some of it ends up just getting gzip-compressed
data2 = ((np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] +
np.arange(1, 7))
np.random.seed(1337)
data1 = np.random.uniform(size=(6 * 4, 7 * 4))
data1[:data2.shape[0], :data2.shape[1]] = data2
chdu = fits.CompImageHDU(data1, compressionType='RICE_1',
tileSize=(6, 7))
chdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'),
disable_image_compression=True) as h:
assert h[1].header['TFORM1'] == '1PB(30)'
assert h[1].header['TFORM2'] == '1PB(359)'
def test_compression_update_header(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/23
"""
self.copy_file('comp.fits')
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
hdul[1].header['test1'] = 'test'
hdul[1]._header['test2'] = 'test2'
with fits.open(self.temp('comp.fits')) as hdul:
assert 'test1' in hdul[1].header
assert hdul[1].header['test1'] == 'test'
assert 'test2' in hdul[1].header
assert hdul[1].header['test2'] == 'test2'
# Test update via index now:
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
hdr[hdr.index('TEST1')] = 'foo'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['TEST1'] == 'foo'
# Test slice updates
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header['TEST*'] = 'qux'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['qux', 'qux']
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
idx = hdr.index('TEST1')
hdr[idx:idx + 2] = 'bar'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['bar', 'bar']
# Test updating a specific COMMENT card duplicate
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header[('COMMENT', 1)] = 'I am fire. I am death!'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['COMMENT'][1] == 'I am fire. I am death!'
assert hdul[1]._header['COMMENT'][1] == 'I am fire. I am death!'
# Test deleting by keyword and by slice
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
del hdr['COMMENT']
idx = hdr.index('TEST1')
del hdr[idx:idx + 2]
with fits.open(self.temp('comp.fits')) as hdul:
assert 'COMMENT' not in hdul[1].header
assert 'COMMENT' not in hdul[1]._header
assert 'TEST1' not in hdul[1].header
assert 'TEST1' not in hdul[1]._header
assert 'TEST2' not in hdul[1].header
assert 'TEST2' not in hdul[1]._header
def test_compression_update_header_with_reserved(self):
"""
Ensure that setting reserved keywords related to the table data
structure on CompImageHDU image headers fails.
"""
def test_set_keyword(hdr, keyword, value):
with catch_warnings() as w:
hdr[keyword] = value
assert len(w) == 1
assert str(w[0].message).startswith(
"Keyword %r is reserved" % keyword)
assert keyword not in hdr
with fits.open(self.data('comp.fits')) as hdul:
hdr = hdul[1].header
test_set_keyword(hdr, 'TFIELDS', 8)
test_set_keyword(hdr, 'TTYPE1', 'Foo')
test_set_keyword(hdr, 'ZCMPTYPE', 'ASDF')
test_set_keyword(hdr, 'ZVAL1', 'Foo')
def test_compression_header_append(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with catch_warnings() as w:
imghdr.append('TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
imghdr.append(('FOO', 'bar', 'qux'), end=True)
assert 'FOO' in imghdr
assert imghdr[-1] == 'bar'
assert 'FOO' in tblhdr
assert tblhdr[-1] == 'bar'
imghdr.append(('CHECKSUM', 'abcd1234'))
assert 'CHECKSUM' in imghdr
assert imghdr['CHECKSUM'] == 'abcd1234'
assert 'CHECKSUM' not in tblhdr
assert 'ZHECKSUM' in tblhdr
assert tblhdr['ZHECKSUM'] == 'abcd1234'
def test_compression_header_insert(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
# First try inserting a restricted keyword
with catch_warnings() as w:
imghdr.insert(1000, 'TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
assert tblhdr.count('TFIELDS') == 1
# First try keyword-relative insert
imghdr.insert('TELESCOP', ('OBSERVER', 'Phil Plait'))
assert 'OBSERVER' in imghdr
assert imghdr.index('OBSERVER') == imghdr.index('TELESCOP') - 1
assert 'OBSERVER' in tblhdr
assert tblhdr.index('OBSERVER') == tblhdr.index('TELESCOP') - 1
# Next let's see if an index-relative insert winds up being
# sensible
idx = imghdr.index('OBSERVER')
imghdr.insert('OBSERVER', ('FOO',))
assert 'FOO' in imghdr
assert imghdr.index('FOO') == idx
assert 'FOO' in tblhdr
assert tblhdr.index('FOO') == tblhdr.index('OBSERVER') - 1
def test_compression_header_set_before_after(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with catch_warnings() as w:
imghdr.set('ZBITPIX', 77, 'asdf', after='XTENSION')
assert len(w) == 1
assert 'ZBITPIX' not in imghdr
assert tblhdr.count('ZBITPIX') == 1
assert tblhdr['ZBITPIX'] != 77
# Move GCOUNT before PCOUNT (not that there's any reason you'd
# *want* to do that, but it's just a test...)
imghdr.set('GCOUNT', 99, before='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') - 1
assert imghdr['GCOUNT'] == 99
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') - 1
assert tblhdr['ZGCOUNT'] == 99
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
imghdr.set('GCOUNT', 2, after='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') + 1
assert imghdr['GCOUNT'] == 2
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') + 1
assert tblhdr['ZGCOUNT'] == 2
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
def test_compression_header_append_commentary(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2363
"""
hdu = fits.CompImageHDU(np.array([0], dtype=np.int32))
hdu.header['COMMENT'] = 'hello world'
assert hdu.header['COMMENT'] == ['hello world']
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['COMMENT'] == ['hello world']
def test_compression_with_gzip_column(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/71
"""
arr = np.zeros((2, 7000), dtype='float32')
# The first row (which will be the first compressed tile) has a very
# wide range of values that will be difficult to quantize, and should
# result in use of a GZIP_COMPRESSED_DATA column
arr[0] = np.linspace(0, 1, 7000)
arr[1] = np.random.normal(size=7000)
hdu = fits.CompImageHDU(data=arr)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
comp_hdu = hdul[1]
# GZIP-compressed tile should compare exactly
assert np.all(comp_hdu.data[0] == arr[0])
# The second tile uses lossy compression and may be somewhat off,
# so we don't bother comparing it exactly
def test_duplicate_compression_header_keywords(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2750
Tests that the fake header (for the compressed image) can still be read
even if the real header contained a duplicate ZTENSION keyword (the
issue applies to any keyword specific to the compression convention,
however).
"""
arr = np.arange(100, dtype=np.int32)
hdu = fits.CompImageHDU(data=arr)
header = hdu._header
# append the duplicate keyword
hdu._header.append(('ZTENSION', 'IMAGE'))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert header == hdul[1]._header
# There's no good reason to have a duplicate keyword, but
# technically it isn't invalid either :/
assert hdul[1]._header.count('ZTENSION') == 2
|
sivasankariit/linux-rl
|
refs/heads/rl
|
tools/perf/scripts/python/net_dropmonitor.py
|
4235
|
# Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
|
zouyapeng/horizon
|
refs/heads/stable/juno
|
openstack_dashboard/dashboards/project/data_processing/data_sources/__init__.py
|
12133432
| |
AndrewGrossman/django
|
refs/heads/master
|
django/core/management/commands/check.py
|
316
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.core import checks
from django.core.checks.registry import registry
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Checks the entire Django project for potential problems."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*')
parser.add_argument('--tag', '-t', action='append', dest='tags',
help='Run only checks labeled with given tag.')
parser.add_argument('--list-tags', action='store_true', dest='list_tags',
help='List available tags.')
parser.add_argument('--deploy', action='store_true', dest='deploy',
help='Check deployment settings.')
def handle(self, *app_labels, **options):
include_deployment_checks = options['deploy']
if options.get('list_tags'):
self.stdout.write('\n'.join(sorted(registry.tags_available(include_deployment_checks))))
return
if app_labels:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
else:
app_configs = None
tags = options.get('tags')
if tags:
try:
invalid_tag = next(
tag for tag in tags if not checks.tag_exists(tag, include_deployment_checks)
)
except StopIteration:
# no invalid tags
pass
else:
raise CommandError('There is no system check with the "%s" tag.' % invalid_tag)
self.check(
app_configs=app_configs,
tags=tags,
display_num_errors=True,
include_deployment_checks=include_deployment_checks,
)
|
ribag/ganeti-experiments
|
refs/heads/topic-cli-quote
|
qa/qa_logging.py
|
3
|
#
#
# Copyright (C) 2014 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
""" Handles the logging of messages with appropriate coloring.
"""
import sys
_INFO_SEQ = None
_WARNING_SEQ = None
_ERROR_SEQ = None
_RESET_SEQ = None
def _SetupColours():
"""Initializes the colour constants.
"""
# pylint: disable=W0603
# due to global usage
global _INFO_SEQ, _WARNING_SEQ, _ERROR_SEQ, _RESET_SEQ
# Don't use colours if stdout isn't a terminal
if not sys.stdout.isatty():
return
try:
import curses
except ImportError:
# Don't use colours if curses module can't be imported
return
curses.setupterm()
_RESET_SEQ = curses.tigetstr("op")
setaf = curses.tigetstr("setaf")
_INFO_SEQ = curses.tparm(setaf, curses.COLOR_GREEN)
_WARNING_SEQ = curses.tparm(setaf, curses.COLOR_YELLOW)
_ERROR_SEQ = curses.tparm(setaf, curses.COLOR_RED)
_SetupColours()
def _FormatWithColor(text, seq):
if not seq:
return text
return "%s%s%s" % (seq, text, _RESET_SEQ)
FormatWarning = lambda text: _FormatWithColor(text, _WARNING_SEQ)
FormatError = lambda text: _FormatWithColor(text, _ERROR_SEQ)
FormatInfo = lambda text: _FormatWithColor(text, _INFO_SEQ)
|
CSC301H-Fall2013/JuakStore
|
refs/heads/master
|
site-packages/tests/regressiontests/comment_tests/tests/comment_view_tests.py
|
44
|
from __future__ import absolute_import, unicode_literals
import re
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.comments import signals
from django.contrib.comments.models import Comment
from . import CommentTestCase
from ..models import Article, Book
post_redirect_re = re.compile(r'^http://testserver/posted/\?c=(?P<pk>\d+$)')
class CommentViewTests(CommentTestCase):
def testPostCommentHTTPMethods(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.get("/post/", data)
self.assertEqual(response.status_code, 405)
self.assertEqual(response["Allow"], "POST")
def testPostCommentMissingCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["content_type"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["content_type"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentMissingObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["object_pk"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["object_pk"] = "14"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidIntegerPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
data["object_pk"] = '\ufffd'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidDecimalPK(self):
b = Book.objects.get(pk='12.34')
data = self.getValidData(b)
data["comment"] = "This is another comment"
data["object_pk"] = 'cookies'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testCommentPreview(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["preview"] = "Preview"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "comments/preview.html")
def testHashTampering(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testDebugCommentErrors(self):
"""The debug error template should be shown only if DEBUG is True"""
olddebug = settings.DEBUG
settings.DEBUG = True
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateUsed(response, "comments/400-debug.html")
settings.DEBUG = False
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateNotUsed(response, "comments/400-debug.html")
settings.DEBUG = olddebug
def testCreateValidComment(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, "1.2.3.4")
self.assertEqual(c.comment, "This is my comment")
def testPostAsAuthenticatedUser(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data['name'] = data['email'] = ''
self.client.login(username="normaluser", password="normaluser")
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, "1.2.3.4")
u = User.objects.get(username='normaluser')
self.assertEqual(c.user, u)
self.assertEqual(c.user_name, u.get_full_name())
self.assertEqual(c.user_email, u.email)
def testPostAsAuthenticatedUserWithoutFullname(self):
"""
Check that the user's name in the comment is populated for
authenticated users without first_name and last_name.
"""
user = User.objects.create_user(username='jane_other',
email='jane@example.com', password='jane_other')
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data['name'] = data['email'] = ''
self.client.login(username="jane_other", password="jane_other")
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
c = Comment.objects.get(user=user)
self.assertEqual(c.ip_address, "1.2.3.4")
self.assertEqual(c.user_name, 'jane_other')
user.delete()
def testPreventDuplicateComments(self):
"""Prevent posting the exact same comment twice"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.client.post("/post/", data)
self.client.post("/post/", data)
self.assertEqual(Comment.objects.count(), 1)
# This should not trigger the duplicate prevention
self.client.post("/post/", dict(data, comment="My second comment."))
self.assertEqual(Comment.objects.count(), 2)
def testCommentSignals(self):
"""Test signals emitted by the comment posting view"""
# callback
def receive(sender, **kwargs):
self.assertEqual(kwargs['comment'].comment, "This is my comment")
self.assertTrue('request' in kwargs)
received_signals.append(kwargs.get('signal'))
# Connect signals and keep track of handled ones
received_signals = []
expected_signals = [
signals.comment_will_be_posted, signals.comment_was_posted
]
for signal in expected_signals:
signal.connect(receive)
# Post a comment and check the signals
self.testCreateValidComment()
self.assertEqual(received_signals, expected_signals)
for signal in expected_signals:
signal.disconnect(receive)
def testWillBePostedSignal(self):
"""
Test that the comment_will_be_posted signal can prevent the comment from
actually getting saved
"""
def receive(sender, **kwargs): return False
signals.comment_will_be_posted.connect(receive, dispatch_uid="comment-test")
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Comment.objects.count(), 0)
signals.comment_will_be_posted.disconnect(dispatch_uid="comment-test")
def testWillBePostedSignalModifyComment(self):
"""
Test that the comment_will_be_posted signal can modify a comment before
it gets posted
"""
def receive(sender, **kwargs):
# a bad but effective spam filter :)...
kwargs['comment'].is_public = False
signals.comment_will_be_posted.connect(receive)
self.testCreateValidComment()
c = Comment.objects.all()[0]
self.assertFalse(c.is_public)
def testCommentNext(self):
"""Test the different "next" actions the comment view can take"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
data["next"] = "/somewhere/else/"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?c=\d+$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
data["next"] = "http://badserver/somewhere/else/"
data["comment"] = "This is another comment with an unsafe next url"
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertTrue(match != None, "Unsafe redirection to: %s" % location)
def testCommentDoneView(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
pk = int(match.group('pk'))
response = self.client.get(location)
self.assertTemplateUsed(response, "comments/posted.html")
self.assertEqual(response.context[0]["comment"], Comment.objects.get(pk=pk))
def testCommentNextWithQueryString(self):
"""
The `next` key needs to handle already having a query string (#10585)
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/?foo=bar"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?foo=bar&c=\d+$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
def testCommentPostRedirectWithInvalidIntegerPK(self):
"""
Tests that attempting to retrieve the location specified in the
post redirect, after adding some invalid data to the expected
querystring it ends with, doesn't cause a server error.
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
broken_location = location + "\ufffd"
response = self.client.get(broken_location)
self.assertEqual(response.status_code, 200)
def testCommentNextWithQueryStringAndAnchor(self):
"""
The `next` key needs to handle already having an anchor. Refs #13411.
"""
# With a query string also.
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/?foo=bar#baz"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?foo=bar&c=\d+#baz$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
# Without a query string
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/#baz"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?c=\d+#baz$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.