repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
bencarle/odl-learning-labs | src/settings/pod2-5.py | 1 | 2262 | # Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from __future__ import print_function
odl_server_hostname = '172.17.11.5'
odl_server_url_prefix = "http://%s:8181/restconf/" % odl_server_hostname
config = {'odl_server' : {
'url_prefix' : odl_server_url_prefix,
'username' : 'admin',
'password' : 'admin'
},
'network_device': {
'iosxrv-1': {
'address': '172.17.11.51',
'port': 830,
'password': 'cisco',
'username': 'cisco'
},
'iosxrv-2': {
'address': '172.17.11.52',
'port': 830,
'password': 'cisco',
'username': 'cisco'
},
'iosxrv-3': {
'address': '172.17.11.53',
'port': 830,
'password': 'cisco',
'username': 'cisco'
},
'iosxrv-4': {
'address': '172.17.11.54',
'port': 830,
'password': 'cisco',
'username': 'cisco'
},
}
}
| apache-2.0 |
aljscott/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/openbugs_unittest.py | 124 | 2412 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.commands.commandtest import CommandsTest
from webkitpy.tool.commands.openbugs import OpenBugs
class OpenBugsTest(CommandsTest):
find_bugs_in_string_expectations = [
["123", []],
["1234", ["1234"]],
["12345", ["12345"]],
["123456", ["123456"]],
["1234567", []],
[" 123456 234567", ["123456", "234567"]],
]
def test_find_bugs_in_string(self):
openbugs = OpenBugs()
for expectation in self.find_bugs_in_string_expectations:
self.assertEqual(openbugs._find_bugs_in_string(expectation[0]), expectation[1])
def test_args_parsing(self):
expected_logs = "2 bugs found in input.\nMOCK: user.open_url: http://example.com/12345\nMOCK: user.open_url: http://example.com/23456\n"
self.assert_execute_outputs(OpenBugs(), ["12345\n23456"], expected_logs=expected_logs)
| bsd-3-clause |
jacobsenanaizabel/shoop | shoop/admin/modules/categories/__init__.py | 4 | 1967 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.db.models import Q
from shoop.admin.base import AdminModule, MenuEntry, SearchResult
from django.utils.translation import ugettext_lazy as _
from shoop.admin.utils.urls import get_model_url, derive_model_url, get_edit_and_list_urls
from shoop.core.models import Category
import six
class CategoryModule(AdminModule):
name = _("Products")
category = name
breadcrumbs_menu_entry = MenuEntry(text=name, url="shoop_admin:category.list")
def get_urls(self):
return get_edit_and_list_urls(
url_prefix="^categories",
view_template="shoop.admin.modules.categories.views.Category%sView",
name_template="category.%s"
)
def get_menu_entries(self, request):
return [
MenuEntry(
text=_("Categories"), icon="fa fa-sitemap",
url="shoop_admin:category.list", category=self.category
)
]
def get_search_results(self, request, query):
minimum_query_length = 3
if len(query) >= minimum_query_length:
categories = Category.objects.filter(
Q(translations__name__icontains=query) |
Q(identifier__icontains=query)
).distinct().order_by("tree_id", "lft")
for i, category in enumerate(categories[:10]):
relevance = 100 - i
yield SearchResult(
text=six.text_type(category),
url=get_model_url(category),
category=self.category,
relevance=relevance
)
def get_model_url(self, object, kind):
return derive_model_url(Category, "shoop_admin:category", object, kind)
| agpl-3.0 |
braingram/pizco | tests/test_protocol.py | 3 | 1663 |
import unittest
from pizco.protocol import Protocol
class TestProtocol(unittest.TestCase):
def test_protocol(self):
prot = Protocol()
self.assertRaises(ValueError, prot.parse, [])
msg = prot.format('friend', 'bla', 'here goes the content')
sender, topic, content, msgid = prot.parse(msg)
self.assertEqual(sender, 'friend')
self.assertEqual(topic, 'bla')
self.assertEqual(content, 'here goes the content')
real_id = msg[1]
msg[1] = 'newid'.encode('utf-8')
self.assertRaises(ValueError, prot.parse, msg, check_msgid='wrong id')
self.assertRaises(ValueError, prot.parse, msg, check_sender='another')
msg[-1] = 'fake signature'.encode('utf-8')
msg[1] = real_id
self.assertEqual(sender, 'friend')
self.assertEqual(topic, 'bla')
self.assertEqual(content, 'here goes the content')
def test_protocol_key(self):
prot = Protocol(hmac_key='have a key')
msg = prot.format('friend', 'bla', 'here goes the content')
sender, topic, content, msgid = prot.parse(msg)
self.assertEqual(sender, 'friend')
self.assertEqual(topic, 'bla')
self.assertEqual(content, 'here goes the content')
real_id = msg[1]
msg[1] = 'newid'.encode('utf-8')
self.assertRaises(ValueError, prot.parse, msg, check_msgid='wrong id')
self.assertRaises(ValueError, prot.parse, msg, check_sender='another')
msg[-1] = 'fake signature'.encode('utf-8')
msg[1] = real_id
self.assertRaises(ValueError, prot.parse, msg)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
sangoma/aiosip | aiosip/dialog.py | 1 | 14964 | import asyncio
import enum
import logging
from collections import defaultdict
from multidict import CIMultiDict
from async_timeout import timeout as Timeout
from . import utils
from .message import Request, Response
from .transaction import UnreliableTransaction, ProxyTransaction
from .auth import Auth
LOG = logging.getLogger(__name__)
class CallState(enum.Enum):
Calling = enum.auto()
Proceeding = enum.auto()
Completed = enum.auto()
Terminated = enum.auto()
class DialogBase:
def __init__(self,
app,
method,
from_details,
to_details,
call_id,
peer,
contact_details,
*,
headers=None,
payload=None,
password=None,
cseq=0,
inbound=False):
self.app = app
self.from_details = from_details
self.to_details = to_details
self.contact_details = contact_details
self.call_id = call_id
self.peer = peer
self.password = password
self.cseq = cseq
self.inbound = inbound
self.transactions = defaultdict(dict)
# TODO: Needs to be last because we need the above attributes set
self.original_msg = self._prepare_request(method, headers=headers, payload=payload)
self._closed = False
self._closing = None
@property
def dialog_id(self):
return frozenset((self.original_msg.to_details.details,
self.original_msg.from_details.details,
self.call_id))
def _receive_response(self, msg):
try:
transaction = self.transactions[msg.method][msg.cseq]
transaction._incoming(msg)
except KeyError:
if msg.method != 'ACK':
# TODO: Hack to suppress warning on ACK messages,
# since we don't quite handle them correctly. They're
# ignored, for now...
LOG.debug('Response without Request. The Transaction may already be closed. \n%s', msg)
def _prepare_request(self, method, contact_details=None, headers=None, payload=None, cseq=None, to_details=None):
self.from_details.add_tag()
if not cseq:
self.cseq += 1
if contact_details:
self.contact_details = contact_details
headers = CIMultiDict(headers or {})
if 'User-Agent' not in headers:
headers['User-Agent'] = self.app.defaults['user_agent']
headers['Call-ID'] = self.call_id
msg = Request(
method=method,
cseq=cseq or self.cseq,
from_details=self.from_details,
to_details=to_details or self.to_details,
contact_details=self.contact_details,
headers=headers,
payload=payload,
)
return msg
async def start(self, *, expires=None):
# TODO: this is a hack
headers = self.original_msg.headers
if expires is not None:
headers['Expires'] = expires
return await self.request(self.original_msg.method, headers=headers, payload=self.original_msg.payload)
def ack(self, msg, headers=None, *args, **kwargs):
headers = CIMultiDict(headers or {})
headers['Via'] = msg.headers['Via']
ack = self._prepare_request('ACK', cseq=msg.cseq, to_details=msg.to_details, headers=headers, *args, **kwargs)
self.peer.send_message(ack)
async def unauthorized(self, msg):
self._nonce = utils.gen_str(10)
headers = CIMultiDict()
headers['WWW-Authenticate'] = str(Auth(nonce=self._nonce, algorithm='md5', realm='sip'))
await self.reply(msg, status_code=401, headers=headers)
def validate_auth(self, msg, password):
if msg.auth and msg.auth.validate(password, self._nonce):
self._nonce = None
return True
elif msg.method == 'CANCEL':
return True
else:
return False
def close_later(self, delay=None):
if delay is None:
delay = self.app.defaults['dialog_closing_delay']
if self._closing:
self._closing.cancel()
async def closure():
await asyncio.sleep(delay)
await self.close()
self._closing = asyncio.ensure_future(closure())
def _maybe_close(self, msg):
if msg.method in ('REGISTER', 'SUBSCRIBE') and not self.inbound:
expire = int(msg.headers.get('Expires', 0))
delay = int(expire * 1.1) if expire else None
self.close_later(delay)
elif msg.method == 'NOTIFY':
pass
else:
self.close_later()
def _close(self):
LOG.debug('Closing: %s', self)
if self._closing:
self._closing.cancel()
for transactions in self.transactions.values():
for transaction in transactions.values():
transaction.close()
# Should not be necessary once dialog are correctly tracked
try:
del self.app._dialogs[self.dialog_id]
except KeyError as e:
pass
def _connection_lost(self):
for transactions in self.transactions.values():
for transaction in transactions.values():
transaction._error(ConnectionError)
async def start_unreliable_transaction(self, msg, method=None):
transaction = UnreliableTransaction(self, original_msg=msg, loop=self.app.loop)
self.transactions[method or msg.method][msg.cseq] = transaction
return await transaction.start()
async def start_proxy_transaction(self, msg, timeout=5):
if msg.cseq not in self.transactions[msg.method]:
transaction = ProxyTransaction(dialog=self, original_msg=msg, loop=self.app.loop, timeout=timeout)
self.transactions[msg.method][msg.cseq] = transaction
async for response in transaction.start():
yield response
else:
LOG.debug('Message already transmitted: %s %s, %s', msg.cseq, msg.method, msg.headers['Call-ID'])
self.transactions[msg.method][msg.cseq].retransmit()
return
def end_transaction(self, transaction):
to_delete = list()
for method, values in self.transactions.items():
for cseq, t in values.items():
if transaction is t:
transaction.close()
to_delete.append((method, cseq))
for item in to_delete:
del self.transactions[item[0]][item[1]]
async def request(self, method, contact_details=None, headers=None, payload=None, timeout=None):
msg = self._prepare_request(method, contact_details, headers, payload)
if msg.method != 'ACK':
async with Timeout(timeout):
return await self.start_unreliable_transaction(msg)
else:
self.peer.send_message(msg)
async def reply(self, request, status_code, status_message=None, payload=None, headers=None, contact_details=None):
msg = self._prepare_response(request, status_code, status_message, payload, headers, contact_details)
self.peer.send_message(msg)
def _prepare_response(self, request, status_code, status_message=None, payload=None, headers=None,
contact_details=None):
self.from_details.add_tag()
if contact_details:
self.contact_details = contact_details
headers = CIMultiDict(headers or {})
if 'User-Agent' not in headers:
headers['User-Agent'] = self.app.defaults['user_agent']
headers['Call-ID'] = self.call_id
headers['Via'] = request.headers['Via']
msg = Response(
status_code=status_code,
status_message=status_message,
headers=headers,
from_details=self.to_details,
to_details=self.from_details,
contact_details=self.contact_details,
payload=payload,
cseq=request.cseq,
method=request.method
)
return msg
def __repr__(self):
return f'<{self.__class__.__name__} call_id={self.call_id}, peer={self.peer}>'
async def __aenter__(self):
return self
async def __aexit__(self, *exc_info):
await self.close()
async def __aiter__(self):
return self
async def __anext__(self):
return await self.recv()
class Dialog(DialogBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._nonce = None
self._incoming = asyncio.Queue()
async def receive_message(self, msg):
if self._closing:
self._closing.cancel()
if self.cseq < msg.cseq:
self.cseq = msg.cseq
if isinstance(msg, Response) or msg.method == 'ACK':
return self._receive_response(msg)
else:
return await self._receive_request(msg)
async def _receive_request(self, msg):
if 'tag' in msg.from_details['params']:
self.to_details['params']['tag'] = msg.from_details['params']['tag']
await self._incoming.put(msg)
self._maybe_close(msg)
async def refresh(self, headers=None, expires=1800, *args, **kwargs):
headers = CIMultiDict(headers or {})
if 'Expires' not in headers:
headers['Expires'] = int(expires)
return await self.request(self.original_msg.method, headers=headers, *args, **kwargs)
async def close(self, headers=None, *args, **kwargs):
if not self._closed:
self._closed = True
result = None
if not self.inbound and self.original_msg.method in ('REGISTER', 'SUBSCRIBE'):
headers = CIMultiDict(headers or {})
if 'Expires' not in headers:
headers['Expires'] = 0
try:
result = await self.request(self.original_msg.method, headers=headers, *args, **kwargs)
finally:
self._close()
self._close()
return result
async def notify(self, *args, headers=None, **kwargs):
headers = CIMultiDict(headers or {})
if 'Event' not in headers:
headers['Event'] = 'dialog'
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/dialog-info+xml'
if 'Subscription-State' not in headers:
headers['Subscription-State'] = 'active'
return await self.request('NOTIFY', *args, headers=headers, **kwargs)
def cancel(self, *args, **kwargs):
cancel = self._prepare_request('CANCEL', *args, **kwargs)
self.peer.send_message(cancel)
async def recv(self):
return await self._incoming.get()
class InviteDialog(DialogBase):
def __init__(self, *args, **kwargs):
super().__init__(method="INVITE", *args, **kwargs)
self._queue = asyncio.Queue()
self._state = CallState.Calling
self._waiter = asyncio.Future()
async def receive_message(self, msg): # noqa: C901
async def set_result(msg):
self.ack(msg)
if not self._waiter.done():
self._waiter.set_result(msg)
async def handle_calling_state(msg):
if 100 <= msg.status_code < 200:
self._state = CallState.Proceeding
elif msg.status_code == 200:
self._state = CallState.Terminated
await set_result(msg)
elif 300 <= msg.status_code < 700:
self._state = CallState.Completed
await set_result(msg)
async def handle_proceeding_state(msg):
if 100 <= msg.status_code < 200:
pass
elif msg.status_code == 200:
self._state = CallState.Terminated
await set_result(msg)
elif 300 <= msg.status_code < 700:
self._state = CallState.Completed
await set_result(msg)
async def handle_completed_state(msg):
# Any additional messages in this state MUST be acked but
# are NOT to be passed up
self.ack(msg)
await self._queue.put(msg)
# TODO: sip timers and flip to Terminated after timeout
if self._state == CallState.Calling:
await handle_calling_state(msg)
elif self._state == CallState.Proceeding:
await handle_proceeding_state(msg)
elif self._state == CallState.Completed:
await handle_completed_state(msg)
elif self._state == CallState.Terminated:
if isinstance(msg, Response) or msg.method == 'ACK':
return self._receive_response(msg)
else:
return await self._receive_request(msg)
async def _receive_request(self, msg):
if 'tag' in msg.from_details['params']:
self.to_details['params']['tag'] = msg.from_details['params']['tag']
if msg.method == 'BYE':
self._closed = True
self._maybe_close(msg)
@property
def state(self):
return self._state
async def start(self, *, expires=None):
# TODO: this is a hack
self.peer.send_message(self.original_msg)
async def recv(self):
return await self._queue.get()
async def wait_for_terminate(self):
while not self._waiter.done():
yield await self._queue.get()
async def ready(self):
msg = await self._waiter
if msg.status_code != 200:
raise RuntimeError("INVITE failed with {}".format(msg.status_code))
def end_transaction(self, transaction):
to_delete = list()
for method, values in self.transactions.items():
for cseq, t in values.items():
if transaction is t:
transaction.close()
to_delete.append((method, cseq))
for item in to_delete:
del self.transactions[item[0]][item[1]]
async def close(self, timeout=None):
if not self._closed:
self._closed = True
msg = None
if self._state == CallState.Terminated:
msg = self._prepare_request('BYE')
elif self._state != CallState.Completed:
msg = self._prepare_request('CANCEL')
if msg:
transaction = UnreliableTransaction(self, original_msg=msg, loop=self.app.loop)
self.transactions[msg.method][msg.cseq] = transaction
try:
async with Timeout(timeout):
await transaction.start()
finally:
self._close()
self._close()
| apache-2.0 |
nrgaway/qubes-core-admin | qubes/vm/templatevm.py | 1 | 4118 | #
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2014-2016 Wojtek Porczyk <woju@invisiblethingslab.com>
# Copyright (C) 2016 Marek Marczykowski <marmarek@invisiblethingslab.com>)
# Copyright (C) 2016 Bahtiar `kalkin-` Gadimov <bahtiar@gadimov.de>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
#
''' This module contains the TemplateVM implementation '''
import qubes
import qubes.config
import qubes.vm.qubesvm
import qubes.vm.mix.net
from qubes.config import defaults
from qubes.vm.qubesvm import QubesVM
class TemplateVM(QubesVM):
'''Template for AppVM'''
dir_path_prefix = qubes.config.system_path['qubes_templates_dir']
@property
def appvms(self):
''' Returns a generator containing all domains based on the current
TemplateVM.
'''
for vm in self.app.domains:
if hasattr(vm, 'template') and vm.template is self:
yield vm
netvm = qubes.VMProperty('netvm', load_stage=4, allow_none=True,
default=None,
# pylint: disable=protected-access
setter=qubes.vm.qubesvm.QubesVM.netvm._setter,
doc='VM that provides network connection to this domain. When '
'`None`, machine is disconnected.')
def __init__(self, *args, **kwargs):
assert 'template' not in kwargs, "A TemplateVM can not have a template"
self.volume_config = {
'root': {
'name': 'root',
'snap_on_start': False,
'save_on_stop': True,
'rw': True,
'source': None,
'size': defaults['root_img_size'],
},
'private': {
'name': 'private',
'snap_on_start': False,
'save_on_stop': True,
'rw': True,
'source': None,
'size': defaults['private_img_size'],
'revisions_to_keep': 0,
},
'volatile': {
'name': 'volatile',
'size': defaults['root_img_size'],
'snap_on_start': False,
'save_on_stop': False,
'rw': True,
},
'kernel': {
'name': 'kernel',
'snap_on_start': False,
'save_on_stop': False,
'rw': False
}
}
super(TemplateVM, self).__init__(*args, **kwargs)
@qubes.events.handler('property-set:default_user',
'property-set:kernel',
'property-set:kernelopts',
'property-set:vcpus',
'property-set:memory',
'property-set:maxmem',
'property-set:qrexec_timeout',
'property-set:shutdown_timeout',
'property-set:management_dispvm')
def on_property_set_child(self, _event, name, newvalue, oldvalue=None):
"""Send event about default value change to child VMs
(which use default inherited from the template).
This handler is supposed to be set for properties using
`_default_with_template()` function for the default value.
"""
if newvalue == oldvalue:
return
for vm in self.appvms:
if not vm.property_is_default(name):
continue
vm.fire_event('property-reset:' + name, name=name)
| gpl-2.0 |
tasoc/photometry | notes/tests_time_offset_ffis_ecsv.py | 1 | 4345 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Create input data for tests of time offset of FFIs.
.. codeauthor:: Rasmus Handberg <rasmush@phys.au.dk>
"""
import sys
import os.path
import numpy as np
from astropy.io import fits
from astropy.table import Table
import itertools
import gzip
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from photometry import utilities
#--------------------------------------------------------------------------------------------------
def s20_lc():
with fits.open('/aadc/tasoc/archive/S20_DR27/spoc_lightcurves/tess2019357164649-s0020-0000000004164018-0165-s_lc.fits.gz') as hdu:
hdr1 = hdu[0].header
time1 = np.atleast_2d(hdu[1].data['TIME']).T
with fits.open('/aadc/tasoc/archive/S20_DR27v2/spoc_lightcurves/tess2019357164649-s0020-0000000004164018-0165-s_lc.fits.gz') as hdu:
time2 = np.atleast_2d(hdu[1].data['TIME']).T
A = np.concatenate((time1, time2), axis=1)
tab = Table(data=A, names=('time1', 'time2'), dtype=('float64', 'float64'))
tab.meta = dict(hdr1)
tab.write('time_offset_s20.ecsv', delimiter=',', format='ascii.ecsv')
print(tab)
with open('time_offset_s20.ecsv', 'rb') as src:
with gzip.open('time_offset_s20.ecsv.gz', 'wb') as dst:
dst.writelines(src)
os.remove('time_offset_s20.ecsv')
#--------------------------------------------------------------------------------------------------
def s21_lc():
with fits.open('/aadc/tasoc/archive/S21_DR29/spoc_lightcurves/tess2020020091053-s0021-0000000001096672-0167-s_lc.fits.gz') as hdu:
hdr1 = hdu[0].header
time1 = np.atleast_2d(hdu[1].data['TIME']).T
with fits.open('/aadc/tasoc/archive/S21_DR29v2/spoc_lightcurves/tess2020020091053-s0021-0000000001096672-0167-s_lc.fits.gz') as hdu:
time2 = np.atleast_2d(hdu[1].data['TIME']).T
A = np.concatenate((time1, time2), axis=1)
tab = Table(data=A, names=('time1', 'time2'), dtype=('float64', 'float64'))
tab.meta = dict(hdr1)
tab.write('time_offset_s21.ecsv', delimiter=',', format='ascii.ecsv')
print(tab)
with open('time_offset_s21.ecsv', 'rb') as src:
with gzip.open('time_offset_s21.ecsv.gz', 'wb') as dst:
dst.writelines(src)
os.remove('time_offset_s21.ecsv')
#--------------------------------------------------------------------------------------------------
def ffis():
np.random.seed(42)
folders = [
(20, '/aadc/tasoc/archive/S20_DR27/ffi/', '/aadc/tasoc/archive/S20_DR27v2/ffi/'),
(21, '/aadc/tasoc/archive/S21_DR29/ffi/', '/aadc/tasoc/archive/S21_DR29v2/ffi/'),
(14, '/aadc/tasoc/archive/S14_DR19/ffi/', '/aadc/tasoc/archive/S14_DR30/ffi/'),
(15, '/aadc/tasoc/archive/S15_DR21/ffi/', '/aadc/tasoc/archive/S15_DR30/ffi/'),
(16, '/aadc/tasoc/archive/S16_DR22/ffi/', '/aadc/tasoc/archive/S16_DR30/ffi/')
]
print(folders)
ffis = []
for (sector, dpath, dpath_corr), camera, ccd in itertools.product(folders, (1,2,3,4), (1,2,3,4)):
# Find all the files from this CCD:
files = utilities.find_ffi_files(dpath, sector=sector, camera=camera, ccd=ccd)
# Pick 2 random files from this CCD:
pick = np.random.choice(files, 2, replace=False)
for fpath in pick:
fpath_corrected = os.path.join(dpath_corr, os.path.relpath(fpath, dpath))
with fits.open(fpath_corrected, mode='readonly', memmap=True) as hdu:
time2_start = hdu[1].header['TSTART'] - hdu[1].header['BARYCORR']
time2_stop = hdu[1].header['TSTOP'] - hdu[1].header['BARYCORR']
with fits.open(fpath, mode='readonly', memmap=True) as hdu:
time1_start = hdu[1].header['TSTART'] - hdu[1].header['BARYCORR']
time1_stop = hdu[1].header['TSTOP'] - hdu[1].header['BARYCORR']
ffis.append(dict(
sector=sector,
data_rel=hdu[0].header['DATA_REL'],
procver=hdu[0].header['PROCVER'],
ffiindex=hdu[0].header['FFIINDEX'],
camera=hdu[1].header['CAMERA'],
ccd=hdu[1].header['CCD'],
time_start=time1_start,
time_stop=time1_stop,
time_start_corrected=time2_start,
time_mid_corrected=0.5*(time2_start + time2_stop),
time_stop_corrected=time2_stop
))
# Create Astropy Table and save it as ECSV file:
tab = Table(rows=ffis)
tab.write('ffis.ecsv', format='ascii.ecsv', delimiter=',', overwrite=True)
print(tab)
#--------------------------------------------------------------------------------------------------
if __name__ == '__main__':
s20_lc()
s21_lc()
ffis()
| gpl-3.0 |
diagramsoftware/odoo | addons/l10n_ca/__openerp__.py | 260 | 3087 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Canada - Accounting',
'version': '1.2',
'author': 'Savoir-faire Linux',
'website': 'http://www.savoirfairelinux.com',
'category': 'Localization/Account Charts',
'description': """
This is the module to manage the English and French - Canadian accounting chart in OpenERP.
===========================================================================================
Canadian accounting charts and localizations.
Fiscal positions
----------------
When considering taxes to be applied, it is the province where the delivery occurs that matters.
Therefore we decided to implement the most common case in the fiscal positions: delivery is the
responsibility of the supplier and done at the customer location.
Some examples:
1) You have a customer from another province and you deliver to his location.
On the customer, set the fiscal position to his province.
2) You have a customer from another province. However this customer comes to your location
with their truck to pick up products. On the customer, do not set any fiscal position.
3) An international supplier doesn't charge you any tax. Taxes are charged at customs
by the customs broker. On the supplier, set the fiscal position to International.
4) An international supplier charge you your provincial tax. They are registered with your
provincial government and remit taxes themselves. On the supplier, do not set any fiscal
position.
""",
'depends': [
'base',
'account',
'base_iban',
'base_vat',
'account_chart',
'account_anglo_saxon'
],
'data': [
'account_chart_en.xml',
'account_tax_code_en.xml',
'account_chart_template_en.xml',
'account_tax_en.xml',
'fiscal_templates_en.xml',
'account_chart_fr.xml',
'account_tax_code_fr.xml',
'account_chart_template_fr.xml',
'account_tax_fr.xml',
'fiscal_templates_fr.xml',
'l10n_ca_wizard.xml'
],
'demo': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
objmagic/heron | third_party/pex/pex/resolver_options.py | 52 | 5775 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import print_function
from pkg_resources import safe_name
from .crawler import Crawler
from .fetcher import Fetcher, PyPIFetcher
from .http import Context
from .installer import EggInstaller, WheelInstaller
from .iterator import Iterator
from .package import EggPackage, SourcePackage, WheelPackage
from .sorter import Sorter
from .translator import ChainedTranslator, EggTranslator, SourceTranslator, WheelTranslator
class ResolverOptionsInterface(object):
def get_context(self):
raise NotImplemented
def get_crawler(self):
raise NotImplemented
def get_sorter(self):
raise NotImplemented
def get_translator(self, interpreter, platform):
raise NotImplemented
def get_iterator(self):
raise NotImplemented
class ResolverOptionsBuilder(object):
"""A helper that processes options into a ResolverOptions object.
Used by command-line and requirements.txt processors to configure a resolver.
"""
def __init__(self,
fetchers=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
precedence=None,
context=None):
self._fetchers = fetchers if fetchers is not None else [PyPIFetcher()]
self._allow_all_external = allow_all_external
self._allow_external = allow_external if allow_external is not None else set()
self._allow_unverified = allow_unverified if allow_unverified is not None else set()
self._precedence = precedence if precedence is not None else Sorter.DEFAULT_PACKAGE_PRECEDENCE
self._context = context or Context.get()
def clone(self):
return ResolverOptionsBuilder(
fetchers=self._fetchers[:],
allow_all_external=self._allow_all_external,
allow_external=self._allow_external.copy(),
allow_unverified=self._allow_unverified.copy(),
precedence=self._precedence[:],
context=self._context,
)
def add_index(self, index):
fetcher = PyPIFetcher(index)
if fetcher not in self._fetchers:
self._fetchers.append(fetcher)
return self
def set_index(self, index):
self._fetchers = [PyPIFetcher(index)]
return self
def add_repository(self, repo):
fetcher = Fetcher([repo])
if fetcher not in self._fetchers:
self._fetchers.append(fetcher)
return self
def clear_indices(self):
self._fetchers = [fetcher for fetcher in self._fetchers if not isinstance(fetcher, PyPIFetcher)]
return self
def allow_all_external(self):
self._allow_all_external = True
return self
def allow_external(self, key):
self._allow_external.add(safe_name(key).lower())
return self
def allow_unverified(self, key):
self._allow_unverified.add(safe_name(key).lower())
return self
def use_wheel(self):
if WheelPackage not in self._precedence:
self._precedence = (WheelPackage,) + self._precedence
return self
def no_use_wheel(self):
self._precedence = tuple(
[precedent for precedent in self._precedence if precedent is not WheelPackage])
return self
def allow_builds(self):
if SourcePackage not in self._precedence:
self._precedence = self._precedence + (SourcePackage,)
return self
def no_allow_builds(self):
self._precedence = tuple(
[precedent for precedent in self._precedence if precedent is not SourcePackage])
return self
def build(self, key):
return ResolverOptions(
fetchers=self._fetchers,
allow_external=self._allow_all_external or key in self._allow_external,
allow_unverified=key in self._allow_unverified,
precedence=self._precedence,
context=self._context,
)
class ResolverOptions(ResolverOptionsInterface):
def __init__(self,
fetchers=None,
allow_external=False,
allow_unverified=False,
precedence=None,
context=None):
self._fetchers = fetchers if fetchers is not None else [PyPIFetcher()]
self._allow_external = allow_external
self._allow_unverified = allow_unverified
self._precedence = precedence if precedence is not None else Sorter.DEFAULT_PACKAGE_PRECEDENCE
self._context = context or Context.get()
# TODO(wickman) Revisit with Github #58
def get_context(self):
return self._context
def get_crawler(self):
return Crawler(self.get_context())
# get_sorter and get_translator are arguably options that should be global
# except that --no-use-wheel fucks this shit up. hm.
def get_sorter(self):
return Sorter(self._precedence)
def get_translator(self, interpreter, platform):
translators = []
# TODO(wickman) This is not ideal -- consider an explicit link between a Package
# and its Installer type rather than mapping this here, precluding the ability to
# easily add new package types (or we just forego that forever.)
for package in self._precedence:
if package is WheelPackage:
translators.append(WheelTranslator(interpreter=interpreter, platform=platform))
elif package is EggPackage:
translators.append(EggTranslator(interpreter=interpreter, platform=platform))
elif package is SourcePackage:
installer_impl = WheelInstaller if WheelPackage in self._precedence else EggInstaller
translators.append(SourceTranslator(installer_impl=installer_impl, interpreter=interpreter))
return ChainedTranslator(*translators)
def get_iterator(self):
return Iterator(
fetchers=self._fetchers,
crawler=self.get_crawler(),
follow_links=self._allow_external,
)
| apache-2.0 |
slohse/ansible | test/units/modules/network/f5/test_bigip_routedomain.py | 27 | 3915 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_routedomain import ApiParameters
from library.modules.bigip_routedomain import ModuleParameters
from library.modules.bigip_routedomain import ModuleManager
from library.modules.bigip_routedomain import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_routedomain import ApiParameters
from ansible.modules.network.f5.bigip_routedomain import ModuleParameters
from ansible.modules.network.f5.bigip_routedomain import ModuleManager
from ansible.modules.network.f5.bigip_routedomain import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
id='1234',
description='my description',
strict=True,
parent='parent1',
vlans=['vlan1', 'vlan2'],
routing_protocol=['BFD', 'BGP'],
bwc_policy='bwc1',
connection_limit=200,
flow_eviction_policy='evict1',
service_policy='service1'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.id == 1234
assert p.description == 'my description'
assert p.strict is True
assert p.connection_limit == 200
def test_api_parameters(self):
args = load_fixture('load_net_route_domain_1.json')
p = ApiParameters(params=args)
assert len(p.vlans) == 5
assert p.id == 0
assert p.strict is True
assert p.connection_limit == 0
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
id=1234,
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['id'] == 1234
| gpl-3.0 |
trhd/meson | mesonbuild/modules/qt.py | 2 | 8978 | # Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .. import mlog
from .. import build
from ..mesonlib import MesonException, Popen_safe, extract_as_list
from ..dependencies import Qt4Dependency, Qt5Dependency
import xml.etree.ElementTree as ET
from . import ModuleReturnValue, get_include_args
from ..interpreterbase import permittedKwargs
_QT_DEPS_LUT = {
4: Qt4Dependency,
5: Qt5Dependency
}
class QtBaseModule:
tools_detected = False
def __init__(self, qt_version=5):
self.qt_version = qt_version
def _detect_tools(self, env, method):
if self.tools_detected:
return
mlog.log('Detecting Qt{version} tools'.format(version=self.qt_version))
# FIXME: We currently require QtX to exist while importing the module.
# We should make it gracefully degrade and not create any targets if
# the import is marked as 'optional' (not implemented yet)
kwargs = {'required': 'true', 'modules': 'Core', 'silent': 'true', 'method': method}
qt = _QT_DEPS_LUT[self.qt_version](env, kwargs)
# Get all tools and then make sure that they are the right version
self.moc, self.uic, self.rcc, self.lrelease = qt.compilers_detect()
# Moc, uic and rcc write their version strings to stderr.
# Moc and rcc return a non-zero result when doing so.
# What kind of an idiot thought that was a good idea?
for compiler, compiler_name in ((self.moc, "Moc"), (self.uic, "Uic"), (self.rcc, "Rcc"), (self.lrelease, "lrelease")):
if compiler.found():
# Workaround since there is no easy way to know which tool/version support which flag
for flag in ['-v', '-version']:
p, stdout, stderr = Popen_safe(compiler.get_command() + [flag])[0:3]
if p.returncode == 0:
break
stdout = stdout.strip()
stderr = stderr.strip()
if 'Qt {}'.format(self.qt_version) in stderr:
compiler_ver = stderr
elif 'version {}.'.format(self.qt_version) in stderr:
compiler_ver = stderr
elif ' {}.'.format(self.qt_version) in stdout:
compiler_ver = stdout
else:
raise MesonException('{name} preprocessor is not for Qt {version}. Output:\n{stdo}\n{stderr}'.format(
name=compiler_name, version=self.qt_version, stdo=stdout, stderr=stderr))
mlog.log(' {}:'.format(compiler_name.lower()), mlog.green('YES'), '({path}, {version})'.format(
path=compiler.get_path(), version=compiler_ver.split()[-1]))
else:
mlog.log(' {}:'.format(compiler_name.lower()), mlog.red('NO'))
self.tools_detected = True
def parse_qrc(self, state, fname):
abspath = os.path.join(state.environment.source_dir, state.subdir, fname)
relative_part = os.path.split(fname)[0]
try:
tree = ET.parse(abspath)
root = tree.getroot()
result = []
for child in root[0]:
if child.tag != 'file':
mlog.warning("malformed rcc file: ", os.path.join(state.subdir, fname))
break
else:
result.append(os.path.join(state.subdir, relative_part, child.text))
return result
except Exception:
return []
@permittedKwargs({'moc_headers', 'moc_sources', 'moc_extra_arguments', 'include_directories', 'ui_files', 'qresources', 'method'})
def preprocess(self, state, args, kwargs):
rcc_files, ui_files, moc_headers, moc_sources, moc_extra_arguments, sources, include_directories \
= extract_as_list(kwargs, 'qresources', 'ui_files', 'moc_headers', 'moc_sources', 'moc_extra_arguments', 'sources', 'include_directories', pop = True)
sources += args[1:]
method = kwargs.get('method', 'auto')
self._detect_tools(state.environment, method)
err_msg = "{0} sources specified and couldn't find {1}, " \
"please check your qt{2} installation"
if len(moc_headers) + len(moc_sources) > 0 and not self.moc.found():
raise MesonException(err_msg.format('MOC', 'moc-qt{}'.format(self.qt_version), self.qt_version))
if len(rcc_files) > 0:
if not self.rcc.found():
raise MesonException(err_msg.format('RCC', 'rcc-qt{}'.format(self.qt_version), self.qt_version))
qrc_deps = []
for i in rcc_files:
qrc_deps += self.parse_qrc(state, i)
# custom output name set? -> one output file, multiple otherwise
if len(args) > 0:
name = args[0]
rcc_kwargs = {'input': rcc_files,
'output': name + '.cpp',
'command': [self.rcc, '-name', name, '-o', '@OUTPUT@', '@INPUT@'],
'depend_files': qrc_deps}
res_target = build.CustomTarget(name, state.subdir, state.subproject, rcc_kwargs)
sources.append(res_target)
else:
for rcc_file in rcc_files:
basename = os.path.split(rcc_file)[1]
name = 'qt' + str(self.qt_version) + '-' + basename.replace('.', '_')
rcc_kwargs = {'input': rcc_file,
'output': name + '.cpp',
'command': [self.rcc, '-name', '@BASENAME@', '-o', '@OUTPUT@', '@INPUT@'],
'depend_files': qrc_deps}
res_target = build.CustomTarget(name, state.subdir, state.subproject, rcc_kwargs)
sources.append(res_target)
if len(ui_files) > 0:
if not self.uic.found():
raise MesonException(err_msg.format('UIC', 'uic-qt' + self.qt_version))
ui_kwargs = {'output': 'ui_@BASENAME@.h',
'arguments': ['-o', '@OUTPUT@', '@INPUT@']}
ui_gen = build.Generator([self.uic], ui_kwargs)
ui_output = ui_gen.process_files('Qt{} ui'.format(self.qt_version), ui_files, state)
sources.append(ui_output)
inc = get_include_args(include_dirs=include_directories)
if len(moc_headers) > 0:
arguments = moc_extra_arguments + inc + ['@INPUT@', '-o', '@OUTPUT@']
moc_kwargs = {'output': 'moc_@BASENAME@.cpp',
'arguments': arguments}
moc_gen = build.Generator([self.moc], moc_kwargs)
moc_output = moc_gen.process_files('Qt{} moc header'.format(self.qt_version), moc_headers, state)
sources.append(moc_output)
if len(moc_sources) > 0:
arguments = moc_extra_arguments + inc + ['@INPUT@', '-o', '@OUTPUT@']
moc_kwargs = {'output': '@BASENAME@.moc',
'arguments': arguments}
moc_gen = build.Generator([self.moc], moc_kwargs)
moc_output = moc_gen.process_files('Qt{} moc source'.format(self.qt_version), moc_sources, state)
sources.append(moc_output)
return ModuleReturnValue(sources, sources)
@permittedKwargs({'ts_files', 'install', 'install_dir', 'build_by_default', 'method'})
def compile_translations(self, state, args, kwargs):
ts_files, install_dir = extract_as_list(kwargs, 'ts_files', 'install_dir', pop=True)
self._detect_tools(state.environment, kwargs.get('method', 'auto'))
translations = []
for ts in ts_files:
cmd = [self.lrelease, '@INPUT@', '-qm', '@OUTPUT@']
lrelease_kwargs = {'output': '@BASENAME@.qm',
'input': ts,
'install': kwargs.get('install', False),
'build_by_default': kwargs.get('build_by_default', False),
'command': cmd}
if install_dir is not None:
lrelease_kwargs['install_dir'] = install_dir
lrelease_target = build.CustomTarget('qt{}-compile-{}'.format(self.qt_version, ts), state.subdir, state.subproject, lrelease_kwargs)
translations.append(lrelease_target)
return ModuleReturnValue(translations, translations)
| apache-2.0 |
wfxiang08/ansible | lib/ansible/plugins/inventory/ini.py | 90 | 2220 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from . import InventoryParser
class InventoryIniParser(InventoryAggregateParser):
def __init__(self, inven_directory):
directory = inven_directory
names = os.listdir(inven_directory)
filtered_names = []
# Clean up the list of filenames
for filename in names:
# Skip files that end with certain extensions or characters
if any(filename.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
continue
# Skip hidden files
if filename.startswith('.') and not filename.startswith('.{0}'.format(os.path.sep)):
continue
# These are things inside of an inventory basedir
if filename in ("host_vars", "group_vars", "vars_plugins"):
continue
fullpath = os.path.join(directory, filename)
new_names.append(fullpath)
super(InventoryDirectoryParser, self).__init__(new_names)
def parse(self):
return super(InventoryDirectoryParser, self).parse()
def _before_comment(self, msg):
''' what's the part of a string before a comment? '''
msg = msg.replace("\#","**NOT_A_COMMENT**")
msg = msg.split("#")[0]
msg = msg.replace("**NOT_A_COMMENT**","#")
return msg
| gpl-3.0 |
mverwe/diall | test/PYTHIA_2760GeV_GEN_SIM_PU_cfg.py | 2 | 14472 | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: Pyquen_ZeemumuJets_pt10_2760GeV_cfi.py --conditions MCHI2_75_V2 -s GEN,SIM --pileup_input das:/Hydjet_Quenched_MinBias_5020GeV/HiFall14-START71_V1-v2/GEN-SIM -n 10 --eventcontent FEVTDEBUG --scenario HeavyIons --pileup HiMixGEN --datatier GEN-SIM --beamspot MatchHI --customise SLHCUpgradeSimulations/Configuration/postLS1Customs.customisePostLS1_HI --magField 38T_PostLS1 --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('SIM')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.EventContent.EventContentHeavyIons_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.Geometry.GeometrySimDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('GeneratorInterface.HiGenCommon.VtxSmearedMatchHI_cff')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
#parse command line arguments
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing('analysis')
options.register ('hardProc',
'pythiaTTbar',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"Hard process to simulate with PYTHIA6 : pythiaTTbar/pythiaZjets")
options.register ('jobSeed',
1,
VarParsing.multiplicity.singleton,
VarParsing.varType.int,
"Randmo seed to use for the job")
options.parseArguments()
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(options.maxEvents)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('PYTHIA at sqrt(s) = 2.76TeV'),
name = cms.untracked.string('$Source: /local/projects/CMSSW/rep/CMSSW/Configuration/Generator/python/Pyquen_ZeemumuJets_pt10_2760GeV_cfi.py,v $'),
version = cms.untracked.string('$Revision: 1.3 $')
)
# Output definition
process.FEVTDEBUGoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('Events_%d.root' % options.jobSeed),
outputCommands = process.FEVTDEBUGEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCHI2_75_V2', '')
process.generator = cms.EDFilter("Pythia6GeneratorFilter",
ExternalDecays = cms.PSet(Tauola = cms.untracked.PSet(UseTauolaPolarization = cms.bool(True),
InputCards = cms.PSet(mdtau = cms.int32(0),
pjak2 = cms.int32(0),
pjak1 = cms.int32(0)
)
),
parameterSets = cms.vstring('Tauola')
),
UseExternalGenerators = cms.untracked.bool(True),
pythiaPylistVerbosity = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(True),
comEnergy = cms.double(2760.0),
maxEventsToPrint = cms.untracked.int32(0),
crossSection = cms.untracked.double(1),
PythiaParameters = cms.PSet(processParameters = cms.vstring('MSEL=0 ! User defined processes',
'MSUB(81) = 1 ! qqbar to QQbar',
'MSUB(82) = 1 ! gg to QQbar',
'MSTP(7) = 6 ! flavor = top',
'PMAS(5,1)=4.8 ! b quark mass',
'PMAS(6,1)=172.5 ! t quark mass',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off'),
kinematics = cms.vstring('CKIN(3)=10',
'CKIN(4)=9999',
'CKIN(7)=-2.',
'CKIN(8)=2.'),
parameterSets = cms.vstring('pythiaUESettings',
'%s' % options.hardProc,
'pythiaWtoLeptons',
'pythiaZtoLeptons',
'kinematics'),
ppDefault = cms.vstring('MSEL=1 ! QCD hight pT processes (only jets)',
'CKIN(3)=6.',
'MSTP(81)=0'),
ppJets = cms.vstring('MSEL=1 ! QCD hight pT processes'),
pythiaBottomoniumNRQCD = cms.vstring('MSUB(461) = 1',
'MSUB(462) = 1',
'MSUB(463) = 1',
'MSUB(464) = 1',
'MSUB(465) = 1',
'MSUB(466) = 1',
'MSUB(467) = 1',
'MSUB(468) = 1',
'MSUB(469) = 1',
'MSUB(470) = 1',
'MSUB(471) = 1',
'MSUB(472) = 1',
'MSUB(473) = 1',
'MSUB(474) = 1',
'MSUB(475) = 1',
'MSUB(476) = 1',
'MSUB(477) = 1',
'MSUB(478) = 1',
'MSUB(479) = 1'),
pythiaCharmoniumNRQCD = cms.vstring('MSUB(421) = 1',
'MSUB(422) = 1',
'MSUB(423) = 1',
'MSUB(424) = 1',
'MSUB(425) = 1',
'MSUB(426) = 1',
'MSUB(427) = 1',
'MSUB(428) = 1',
'MSUB(429) = 1',
'MSUB(430) = 1',
'MSUB(431) = 1',
'MSUB(432) = 1',
'MSUB(433) = 1',
'MSUB(434) = 1',
'MSUB(435) = 1',
'MSUB(436) = 1',
'MSUB(437) = 1',
'MSUB(438) = 1',
'MSUB(439) = 1'),
pythiaHirootDefault = cms.vstring('MSEL=0',
'MSTU(21)=1',
'PARU(14)=1.',
'MSTP(81)=0',
'PMAS(5,1)=4.8',
'PMAS(6,1)=175.0',
'CKIN(3)=7.',
'MSTJ(22)=2',
'PARJ(71)=10.',
'PARP(67)=1.',
'PARP(82)=1.9',
'PARP(85)=0.33',
'PARP(86)=0.66',
'PARP(89)=1000.',
'PARP(91)=1.0',
'MSTJ(11)=3',
'MSTJ(22)=2'),
pythiaJets = cms.vstring('MSUB(11)=1',
'MSUB(12)=1',
'MSUB(13)=1',
'MSUB(28)=1',
'MSUB(53)=1',
'MSUB(68)=1'),
pythiaJpsiToMuons = cms.vstring('BRAT(858) = 0 ',
'BRAT(859) = 1 ',
'BRAT(860) = 0 ',
'MDME(858,1) = 0 ',
'MDME(859,1) = 1 ',
'MDME(860,1) = 0 '),
pythiaMuonCandidates = cms.vstring('CKIN(3)=20',
'MSTJ(22)=2',
'PARJ(71)=40.'),
pythiaPromptPhotons = cms.vstring('MSUB(14)=1',
'MSUB(18)=1',
'MSUB(29)=1',
'MSUB(114)=1',
'MSUB(115)=1'),
pythiaQuarkoniaSettings = cms.vstring('PARP(141)=1.16',
'PARP(142)=0.0119',
'PARP(143)=0.01',
'PARP(144)=0.01',
'PARP(145)=0.05',
'PARP(146)=9.28',
'PARP(147)=0.15',
'PARP(148)=0.02',
'PARP(149)=0.02',
'PARP(150)=0.085',
'PARJ(13)=0.60',
'PARJ(14)=0.162',
'PARJ(15)=0.018',
'PARJ(16)=0.054',
'MSTP(145)=0',
'MSTP(146)=0',
'MSTP(147)=0',
'MSTP(148)=1',
'MSTP(149)=1',
'BRAT(861)=0.202',
'BRAT(862)=0.798',
'BRAT(1501)=0.013',
'BRAT(1502)=0.987',
'BRAT(1555)=0.356',
'BRAT(1556)=0.644'),
pythiaUESettings = cms.vstring('MSTJ(11)=3 ! Choice of the fragmentation function',
'MSTJ(22)=2 ! Decay those unstable particles',
'PARJ(71)=10 . ! for which ctau 10 mm',
'MSTP(2)=1 ! which order running alphaS',
'MSTP(33)=0 ! no K factors in hard cross sections',
'MSTP(51)=10042 ! structure function chosen (external PDF CTEQ6L1)',
'MSTP(52)=2 ! work with LHAPDF',
'MSTP(81)=1 ! multiple parton interactions 1 is Pythia default',
'MSTP(82)=4 ! Defines the multi-parton model',
'MSTU(21)=1 ! Check on possible errors during program execution',
'PARP(82)=1.8387 ! pt cutoff for multiparton interactions',
'PARP(89)=1960. ! sqrts for which PARP82 is set',
'PARP(83)=0.5 ! Multiple interactions: matter distrbn parameter',
'PARP(84)=0.4 ! Multiple interactions: matter distribution parameter',
'PARP(90)=0.16 ! Multiple interactions: rescaling power',
'PARP(67)=2.5 ! amount of initial-state radiation',
'PARP(85)=1.0 ! gluon prod. mechanism in MI',
'PARP(86)=1.0 ! gluon prod. mechanism in MI',
'PARP(62)=1.25 ! ',
'PARP(64)=0.2 ! ',
'MSTP(91)=1 !',
'PARP(91)=2.1 ! kt distribution',
'PARP(93)=15.0 ! '),
pythiaUpsilonToMuons = cms.vstring('BRAT(1034) = 0 ',
'BRAT(1035) = 1 ',
'BRAT(1036) = 0 ',
'BRAT(1037) = 0 ',
'BRAT(1038) = 0 ',
'BRAT(1039) = 0 ',
'BRAT(1040) = 0 ',
'BRAT(1041) = 0 ',
'BRAT(1042) = 0 ',
'MDME(1034,1) = 0 ',
'MDME(1035,1) = 1 ',
'MDME(1036,1) = 0 ',
'MDME(1037,1) = 0 ',
'MDME(1038,1) = 0 ',
'MDME(1039,1) = 0 ',
'MDME(1040,1) = 0 ',
'MDME(1041,1) = 0 ',
'MDME(1042,1) = 0 '),
pythiaWeakBosons = cms.vstring('MSUB(1)=1',
'MSUB(2)=1'),
pythiaZjets = cms.vstring('MSUB(15)=1',
'MSUB(30)=1'),
pythiaTTbar = cms.vstring('MSUB(81) = 1 ! qqbar to QQbar',
'MSUB(82) = 1 ! gg to QQbar',
'MSTP(7) = 6 ! flavor = top',
'PMAS(6,1) = 172.5 ! top quark mass'),
pythiaZtoLeptons = cms.vstring('MDME(174,1)=0',
'MDME(175,1)=0',
'MDME(176,1)=0',
'MDME(177,1)=0',
'MDME(178,1)=0',
'MDME(179,1)=0',
'MDME(182,1)=1',
'MDME(183,1)=0',
'MDME(184,1)=1',
'MDME(185,1)=0',
'MDME(186,1)=1',
'MDME(187,1)=0'),
pythiaWtoLeptons = cms.vstring('MDME(190,1) = 0 !W decay into dbar u',
'MDME(191,1) = 0 !W decay into dbar c',
'MDME(192,1) = 0 !W decay into dbar t',
'MDME(194,1) = 0 !W decay into sbar u',
'MDME(195,1) = 0 !W decay into sbar c',
'MDME(196,1) = 0 !W decay into sbar t',
'MDME(198,1) = 0 !W decay into bbar u',
'MDME(199,1) = 0 !W decay into bbar c',
'MDME(200,1) = 0 !W decay into bbar t',
'MDME(206,1) = 1 !W decay into e+ nu_e',
'MDME(207,1) = 1 !W decay into mu+ nu_mu',
'MDME(208,1) = 0 !W decay into tau+ nu_tau')
)
)
process.ProductionFilterSequence = cms.Sequence(process.generator)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGoutput_step = cms.EndPath(process.FEVTDEBUGoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.simulation_step,process.endjob_step,process.FEVTDEBUGoutput_step)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq
# customisation of the process.
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.postLS1Customs
from SLHCUpgradeSimulations.Configuration.postLS1Customs import customisePostLS1_HI
#call to customisation function customisePostLS1_HI imported from SLHCUpgradeSimulations.Configuration.postLS1Customs
process = customisePostLS1_HI(process)
process.RandomNumberGeneratorService.generator.initialSeed=cms.untracked.uint32(options.jobSeed)
process.MessageLogger.cerr.FwkReport.reportEvery = 5000
# End of customisation functions
| cc0-1.0 |
TNosredna/CouchPotatoServer | couchpotato/core/downloaders/pneumatic/main.py | 6 | 1955 | from __future__ import with_statement
from couchpotato.core.downloaders.base import Downloader
from couchpotato.core.logger import CPLog
import os
import traceback
log = CPLog(__name__)
class Pneumatic(Downloader):
type = ['nzb']
strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s'
def download(self, data = {}, movie = {}, filedata = None):
directory = self.conf('directory')
if not directory or not os.path.isdir(directory):
log.error('No directory set for .strm downloads.')
else:
try:
if not filedata or len(filedata) < 50:
log.error('No nzb available!')
return False
fullPath = os.path.join(directory, self.createFileName(data, filedata, movie))
try:
if not os.path.isfile(fullPath):
log.info('Downloading %s to %s.', (data.get('type'), fullPath))
with open(fullPath, 'wb') as f:
f.write(filedata)
nzb_name = self.createNzbName(data, movie)
strm_path = os.path.join(directory, nzb_name)
strm_file = open(strm_path + '.strm', 'wb')
strmContent = self.strm_syntax % (fullPath, nzb_name)
strm_file.write(strmContent)
strm_file.close()
return True
else:
log.info('File %s already exists.', fullPath)
return True
except:
log.error('Failed to download .strm: %s', traceback.format_exc())
pass
except:
log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc()))
return False
return False
| gpl-3.0 |
noba3/KoTos | addons/plugin.video.youtube/resources/lib/kodion/constants/const_sort_methods.py | 7 | 2196 | __author__ = 'bromix'
_xbmc = True
try:
from xbmcplugin import *
except:
_xbmc = False
_count = 0
pass
def _const(name):
if _xbmc:
return eval(name)
else:
global _count
_count += 1
return _count
pass
ALBUM = _const('SORT_METHOD_ALBUM')
ALBUM_IGNORE_THE = _const('SORT_METHOD_ALBUM_IGNORE_THE')
ARTIST = _const('SORT_METHOD_ARTIST')
ARTIST_IGNORE_THE = _const('SORT_METHOD_ARTIST_IGNORE_THE')
BIT_RATE = _const('SORT_METHOD_BITRATE')
# CHANNEL = _const('SORT_METHOD_CHANNEL')
# COUNTRY = _const('SORT_METHOD_COUNTRY')
DATE = _const('SORT_METHOD_DATE')
# DATE_ADDED = _const('SORT_METHOD_DATEADDED')
# DATE_TAKEN = _const('SORT_METHOD_DATE_TAKEN')
DRIVE_TYPE = _const('SORT_METHOD_DRIVE_TYPE')
DURATION = _const('SORT_METHOD_DURATION')
EPISODE = _const('SORT_METHOD_EPISODE')
FILE = _const('SORT_METHOD_FILE')
# FULL_PATH = _const('SORT_METHOD_FULLPATH')
GENRE = _const('SORT_METHOD_GENRE')
LABEL = _const('SORT_METHOD_LABEL')
# LABEL_IGNORE_FOLDERS = _const('SORT_METHOD_LABEL_IGNORE_FOLDERS')
LABEL_IGNORE_THE = _const('SORT_METHOD_LABEL_IGNORE_THE')
# LAST_PLAYED = _const('SORT_METHOD_LASTPLAYED')
LISTENERS = _const('SORT_METHOD_LISTENERS')
MPAA_RATING = _const('SORT_METHOD_MPAA_RATING')
NONE = _const('SORT_METHOD_NONE')
# PLAY_COUNT = _const('SORT_METHOD_PLAYCOUNT')
PLAYLIST_ORDER = _const('SORT_METHOD_PLAYLIST_ORDER')
PRODUCTION_CODE = _const('SORT_METHOD_PRODUCTIONCODE')
PROGRAM_COUNT = _const('SORT_METHOD_PROGRAM_COUNT')
SIZE = _const('SORT_METHOD_SIZE')
SONG_RATING = _const('SORT_METHOD_SONG_RATING')
STUDIO = _const('SORT_METHOD_STUDIO')
STUDIO_IGNORE_THE = _const('SORT_METHOD_STUDIO_IGNORE_THE')
TITLE = _const('SORT_METHOD_TITLE')
TITLE_IGNORE_THE = _const('SORT_METHOD_TITLE_IGNORE_THE')
TRACK_NUMBER = _const('SORT_METHOD_TRACKNUM')
UNSORTED = _const('SORT_METHOD_UNSORTED')
VIDEO_RATING = _const('SORT_METHOD_VIDEO_RATING')
VIDEO_RUNTIME = _const('SORT_METHOD_VIDEO_RUNTIME')
VIDEO_SORT_TITLE = _const('SORT_METHOD_VIDEO_SORT_TITLE')
VIDEO_SORT_TITLE_IGNORE_THE = _const('SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE')
VIDEO_TITLE = _const('SORT_METHOD_VIDEO_TITLE')
VIDEO_YEAR = _const('SORT_METHOD_VIDEO_YEAR')
| gpl-2.0 |
jck/myhdl | myhdl/test/core/test_always_seq.py | 5 | 1187 | import myhdl
from myhdl import *
from myhdl import Signal
from myhdl._always_seq import AlwaysSeqError, _error, always_seq
from helpers import raises_kind
def test_clock():
""" check the edge parameter """
# should fail without a valid Signal
clock = Signal(bool(0))
reset = ResetSignal(0, active=0, isasync=True)
with raises_kind(AlwaysSeqError, _error.EdgeType):
@always_seq(clock, reset=reset)
def logic1():
pass
# should work with a valid Signal
clock = Signal(bool(0))
try:
@always_seq(clock.posedge, reset=reset)
def logic2():
pass
except:
assert False
def test_reset():
""" check the reset parameter """
# should fail without a valid ResetSignal
clock = Signal(bool(0))
reset = Signal(bool(0))
with raises_kind(AlwaysSeqError, _error.ResetType):
@always_seq(clock.posedge, reset=reset)
def logic():
pass
# should work with a valid Signal
reset = ResetSignal(0, active=0, isasync=True)
try:
@always_seq(clock.posedge, reset=reset)
def logic2():
pass
except:
assert False
| lgpl-2.1 |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/platform/gsutil/third_party/boto/tests/unit/ec2/test_instancestatus.py | 114 | 1811 | #!/usr/bin/env python
from tests.compat import mock, unittest
from boto.ec2.connection import EC2Connection
INSTANCE_STATUS_RESPONSE = br"""<?xml version="1.0" encoding="UTF-8"?>
<DescribeInstanceStatusResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
<requestId>3be1508e-c444-4fef-89cc-0b1223c4f02fEXAMPLE</requestId>
<nextToken>page-2</nextToken>
<instanceStatusSet />
</DescribeInstanceStatusResponse>
"""
class TestInstanceStatusResponseParsing(unittest.TestCase):
def test_next_token(self):
ec2 = EC2Connection(aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key')
mock_response = mock.Mock()
mock_response.read.return_value = INSTANCE_STATUS_RESPONSE
mock_response.status = 200
ec2.make_request = mock.Mock(return_value=mock_response)
all_statuses = ec2.get_all_instance_status()
self.assertNotIn('IncludeAllInstances', ec2.make_request.call_args[0][1])
self.assertEqual(all_statuses.next_token, 'page-2')
def test_include_all_instances(self):
ec2 = EC2Connection(aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key')
mock_response = mock.Mock()
mock_response.read.return_value = INSTANCE_STATUS_RESPONSE
mock_response.status = 200
ec2.make_request = mock.Mock(return_value=mock_response)
all_statuses = ec2.get_all_instance_status(include_all_instances=True)
self.assertIn('IncludeAllInstances', ec2.make_request.call_args[0][1])
self.assertEqual('true', ec2.make_request.call_args[0][1]['IncludeAllInstances'])
self.assertEqual(all_statuses.next_token, 'page-2')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mateon1/servo | etc/ci/performance/submit_to_perfherder.py | 55 | 11981 | #!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
from functools import partial, reduce
import json
import operator
import os
import random
import string
from thclient import (TreeherderClient, TreeherderResultSetCollection,
TreeherderJobCollection)
import time
from runner import format_result_summary
def geometric_mean(iterable):
filtered = list(filter(lambda x: x > 0, iterable))
return (reduce(operator.mul, filtered)) ** (1.0 / len(filtered))
def format_testcase_name(name):
temp = name.replace('http://localhost:8000/page_load_test/', '')
temp = temp.replace('http://localhost:8000/tp6/', '')
temp = temp.split('/')[0]
temp = temp[0:80]
return temp
def format_perf_data(perf_json, engine='servo'):
suites = []
measurement = "domComplete" # Change this to an array when we have more
def get_time_from_nav_start(timings, measurement):
return timings[measurement] - timings['navigationStart']
measurementFromNavStart = partial(get_time_from_nav_start,
measurement=measurement)
if (engine == 'gecko'):
name = 'gecko.{}'.format(measurement)
else:
name = measurement
suite = {
"name": name,
"value": geometric_mean(map(measurementFromNavStart, perf_json)),
"subtests": []
}
for testcase in perf_json:
if measurementFromNavStart(testcase) < 0:
value = -1
# print('Error: test case has negative timing. Test timeout?')
else:
value = measurementFromNavStart(testcase)
suite["subtests"].append({
"name": format_testcase_name(testcase["testcase"]),
"value": value
})
suites.append(suite)
return {
"performance_data": {
# https://bugzilla.mozilla.org/show_bug.cgi?id=1271472
"framework": {"name": "servo-perf"},
"suites": suites
}
}
def create_resultset_collection(dataset):
print("[DEBUG] ResultSet Collection:")
print(dataset)
trsc = TreeherderResultSetCollection()
for data in dataset:
trs = trsc.get_resultset()
trs.add_push_timestamp(data['push_timestamp'])
trs.add_revision(data['revision'])
trs.add_author(data['author'])
# TODO: figure out where type is used
# trs.add_type(data['type'])
revisions = []
for rev in data['revisions']:
tr = trs.get_revision()
tr.add_revision(rev['revision'])
tr.add_author(rev['author'])
tr.add_comment(rev['comment'])
tr.add_repository(rev['repository'])
revisions.append(tr)
trs.add_revisions(revisions)
trsc.add(trs)
return trsc
def create_job_collection(dataset):
print("[DEBUG] Job Collection:")
print(dataset)
tjc = TreeherderJobCollection()
for data in dataset:
tj = tjc.get_job()
tj.add_revision(data['revision'])
tj.add_project(data['project'])
tj.add_coalesced_guid(data['job']['coalesced'])
tj.add_job_guid(data['job']['job_guid'])
tj.add_job_name(data['job']['name'])
tj.add_job_symbol(data['job']['job_symbol'])
tj.add_group_name(data['job']['group_name'])
tj.add_group_symbol(data['job']['group_symbol'])
tj.add_description(data['job']['desc'])
tj.add_product_name(data['job']['product_name'])
tj.add_state(data['job']['state'])
tj.add_result(data['job']['result'])
tj.add_reason(data['job']['reason'])
tj.add_who(data['job']['who'])
tj.add_tier(data['job']['tier'])
tj.add_submit_timestamp(data['job']['submit_timestamp'])
tj.add_start_timestamp(data['job']['start_timestamp'])
tj.add_end_timestamp(data['job']['end_timestamp'])
tj.add_machine(data['job']['machine'])
tj.add_build_info(
data['job']['build_platform']['os_name'],
data['job']['build_platform']['platform'],
data['job']['build_platform']['architecture']
)
tj.add_machine_info(
data['job']['machine_platform']['os_name'],
data['job']['machine_platform']['platform'],
data['job']['machine_platform']['architecture']
)
tj.add_option_collection(data['job']['option_collection'])
for artifact_data in data['job']['artifacts']:
tj.add_artifact(
artifact_data['name'],
artifact_data['type'],
artifact_data['blob']
)
tjc.add(tj)
return tjc
# TODO: refactor this big function to smaller chunks
def submit(perf_data, failures, revision, summary, engine):
print("[DEBUG] failures:")
print(list(map(lambda x: x['testcase'], failures)))
author = "{} <{}>".format(revision['author']['name'],
revision['author']['email'])
dataset = [
{
# The top-most revision in the list of commits for a push.
'revision': revision['commit'],
'author': author,
'push_timestamp': int(revision['author']['timestamp']),
'type': 'push',
# a list of revisions associated with the resultset. There should
# be at least one.
'revisions': [
{
'comment': revision['subject'],
'revision': revision['commit'],
'repository': 'servo',
'author': author
}
]
}
]
trsc = create_resultset_collection(dataset)
result = "success"
# TODO: verify a failed test won't affect Perfherder visualization
# if len(failures) > 0:
# result = "testfailed"
hashlen = len(revision['commit'])
job_guid = ''.join(
random.choice(string.ascii_letters + string.digits) for i in range(hashlen)
)
if (engine == "gecko"):
project = "servo"
job_symbol = 'PLG'
group_symbol = 'SPG'
group_name = 'Servo Perf on Gecko'
else:
project = "servo"
job_symbol = 'PL'
group_symbol = 'SP'
group_name = 'Servo Perf'
dataset = [
{
'project': project,
'revision': revision['commit'],
'job': {
'job_guid': job_guid,
'product_name': project,
'reason': 'scheduler',
# TODO: What is `who` for?
'who': 'Servo',
'desc': 'Servo Page Load Time Tests',
'name': 'Servo Page Load Time',
# The symbol representing the job displayed in
# treeherder.allizom.org
'job_symbol': job_symbol,
# The symbol representing the job group in
# treeherder.allizom.org
'group_symbol': group_symbol,
'group_name': group_name,
# TODO: get the real timing from the test runner
'submit_timestamp': str(int(time.time())),
'start_timestamp': str(int(time.time())),
'end_timestamp': str(int(time.time())),
'state': 'completed',
'result': result, # "success" or "testfailed"
'machine': 'local-machine',
# TODO: read platform from test result
'build_platform': {
'platform': 'linux64',
'os_name': 'linux',
'architecture': 'x86_64'
},
'machine_platform': {
'platform': 'linux64',
'os_name': 'linux',
'architecture': 'x86_64'
},
'option_collection': {'opt': True},
# jobs can belong to different tiers
# setting the tier here will determine which tier the job
# belongs to. However, if a job is set as Tier of 1, but
# belongs to the Tier 2 profile on the server, it will still
# be saved as Tier 2.
'tier': 1,
# the ``name`` of the log can be the default of "buildbot_text"
# however, you can use a custom name. See below.
# TODO: point this to the log when we have them uploaded to S3
'log_references': [
{
'url': 'TBD',
'name': 'test log'
}
],
# The artifact can contain any kind of structured data
# associated with a test.
'artifacts': [
{
'type': 'json',
'name': 'performance_data',
# TODO: include the job_guid when the runner actually
# generates one
# 'job_guid': job_guid,
'blob': perf_data
},
{
'type': 'json',
'name': 'Job Info',
# 'job_guid': job_guid,
"blob": {
"job_details": [
{
"content_type": "raw_html",
"title": "Result Summary",
"value": summary
}
]
}
}
],
# List of job guids that were coalesced to this job
'coalesced': []
}
}
]
tjc = create_job_collection(dataset)
# TODO: extract this read credential code out of this function.
cred = {
'client_id': os.environ['TREEHERDER_CLIENT_ID'],
'secret': os.environ['TREEHERDER_CLIENT_SECRET']
}
client = TreeherderClient(server_url='https://treeherder.mozilla.org',
client_id=cred['client_id'],
secret=cred['secret'])
# data structure validation is automatically performed here, if validation
# fails a TreeherderClientError is raised
client.post_collection('servo', trsc)
client.post_collection('servo', tjc)
def main():
parser = argparse.ArgumentParser(
description=("Submit Servo performance data to Perfherder. "
"Remember to set your Treeherder credential as environment"
" variable \'TREEHERDER_CLIENT_ID\' and "
"\'TREEHERDER_CLIENT_SECRET\'"))
parser.add_argument("perf_json",
help="the output json from runner")
parser.add_argument("revision_json",
help="the json containing the servo revision data")
parser.add_argument("--engine",
type=str,
default='servo',
help=("The engine to run the tests on. Currently only"
" servo and gecko are supported."))
args = parser.parse_args()
with open(args.perf_json, 'r') as f:
result_json = json.load(f)
with open(args.revision_json, 'r') as f:
revision = json.load(f)
perf_data = format_perf_data(result_json, args.engine)
failures = list(filter(lambda x: x['domComplete'] == -1, result_json))
summary = format_result_summary(result_json).replace('\n', '<br/>')
submit(perf_data, failures, revision, summary, args.engine)
print("Done!")
if __name__ == "__main__":
main()
| mpl-2.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/pywebsocket/src/mod_pywebsocket/__init__.py | 552 | 8263 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket extension for Apache HTTP Server.
mod_pywebsocket is a WebSocket extension for Apache HTTP Server
intended for testing or experimental purposes. mod_python is required.
Installation
============
0. Prepare an Apache HTTP Server for which mod_python is enabled.
1. Specify the following Apache HTTP Server directives to suit your
configuration.
If mod_pywebsocket is not in the Python path, specify the following.
<websock_lib> is the directory where mod_pywebsocket is installed.
PythonPath "sys.path+['<websock_lib>']"
Always specify the following. <websock_handlers> is the directory where
user-written WebSocket handlers are placed.
PythonOption mod_pywebsocket.handler_root <websock_handlers>
PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
To limit the search for WebSocket handlers to a directory <scan_dir>
under <websock_handlers>, configure as follows:
PythonOption mod_pywebsocket.handler_scan <scan_dir>
<scan_dir> is useful in saving scan time when <websock_handlers>
contains many non-WebSocket handler files.
If you want to allow handlers whose canonical path is not under the root
directory (i.e. symbolic link is in root directory but its target is not),
configure as follows:
PythonOption mod_pywebsocket.allow_handlers_outside_root_dir On
Example snippet of httpd.conf:
(mod_pywebsocket is in /websock_lib, WebSocket handlers are in
/websock_handlers, port is 80 for ws, 443 for wss.)
<IfModule python_module>
PythonPath "sys.path+['/websock_lib']"
PythonOption mod_pywebsocket.handler_root /websock_handlers
PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
</IfModule>
2. Tune Apache parameters for serving WebSocket. We'd like to note that at
least TimeOut directive from core features and RequestReadTimeout
directive from mod_reqtimeout should be modified not to kill connections
in only a few seconds of idle time.
3. Verify installation. You can use example/console.html to poke the server.
Writing WebSocket handlers
==========================
When a WebSocket request comes in, the resource name
specified in the handshake is considered as if it is a file path under
<websock_handlers> and the handler defined in
<websock_handlers>/<resource_name>_wsh.py is invoked.
For example, if the resource name is /example/chat, the handler defined in
<websock_handlers>/example/chat_wsh.py is invoked.
A WebSocket handler is composed of the following three functions:
web_socket_do_extra_handshake(request)
web_socket_transfer_data(request)
web_socket_passive_closing_handshake(request)
where:
request: mod_python request.
web_socket_do_extra_handshake is called during the handshake after the
headers are successfully parsed and WebSocket properties (ws_location,
ws_origin, and ws_resource) are added to request. A handler
can reject the request by raising an exception.
A request object has the following properties that you can use during the
extra handshake (web_socket_do_extra_handshake):
- ws_resource
- ws_origin
- ws_version
- ws_location (HyBi 00 only)
- ws_extensions (HyBi 06 and later)
- ws_deflate (HyBi 06 and later)
- ws_protocol
- ws_requested_protocols (HyBi 06 and later)
The last two are a bit tricky. See the next subsection.
Subprotocol Negotiation
-----------------------
For HyBi 06 and later, ws_protocol is always set to None when
web_socket_do_extra_handshake is called. If ws_requested_protocols is not
None, you must choose one subprotocol from this list and set it to
ws_protocol.
For HyBi 00, when web_socket_do_extra_handshake is called,
ws_protocol is set to the value given by the client in
Sec-WebSocket-Protocol header or None if
such header was not found in the opening handshake request. Finish extra
handshake with ws_protocol untouched to accept the request subprotocol.
Then, Sec-WebSocket-Protocol header will be sent to
the client in response with the same value as requested. Raise an exception
in web_socket_do_extra_handshake to reject the requested subprotocol.
Data Transfer
-------------
web_socket_transfer_data is called after the handshake completed
successfully. A handler can receive/send messages from/to the client
using request. mod_pywebsocket.msgutil module provides utilities
for data transfer.
You can receive a message by the following statement.
message = request.ws_stream.receive_message()
This call blocks until any complete text frame arrives, and the payload data
of the incoming frame will be stored into message. When you're using IETF
HyBi 00 or later protocol, receive_message() will return None on receiving
client-initiated closing handshake. When any error occurs, receive_message()
will raise some exception.
You can send a message by the following statement.
request.ws_stream.send_message(message)
Closing Connection
------------------
Executing the following statement or just return-ing from
web_socket_transfer_data cause connection close.
request.ws_stream.close_connection()
close_connection will wait
for closing handshake acknowledgement coming from the client. When it
couldn't receive a valid acknowledgement, raises an exception.
web_socket_passive_closing_handshake is called after the server receives
incoming closing frame from the client peer immediately. You can specify
code and reason by return values. They are sent as a outgoing closing frame
from the server. A request object has the following properties that you can
use in web_socket_passive_closing_handshake.
- ws_close_code
- ws_close_reason
Threading
---------
A WebSocket handler must be thread-safe if the server (Apache or
standalone.py) is configured to use threads.
Configuring WebSocket Extension Processors
------------------------------------------
See extensions.py for supported WebSocket extensions. Note that they are
unstable and their APIs are subject to change substantially.
A request object has these extension processing related attributes.
- ws_requested_extensions:
A list of common.ExtensionParameter instances representing extension
parameters received from the client in the client's opening handshake.
You shouldn't modify it manually.
- ws_extensions:
A list of common.ExtensionParameter instances representing extension
parameters to send back to the client in the server's opening handshake.
You shouldn't touch it directly. Instead, call methods on extension
processors.
- ws_extension_processors:
A list of loaded extension processors. Find the processor for the
extension you want to configure from it, and call its methods.
"""
# vi:sts=4 sw=4 et tw=72
| mit |
chdecultot/erpnext | erpnext/accounts/doctype/c_form/c_form.py | 49 | 2722 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import _
from frappe.model.document import Document
class CForm(Document):
def validate(self):
"""Validate invoice that c-form is applicable
and no other c-form is received for that"""
for d in self.get('invoices'):
if d.invoice_no:
inv = frappe.db.sql("""select c_form_applicable, c_form_no from
`tabSales Invoice` where name = %s and docstatus = 1""", d.invoice_no)
if inv and inv[0][0] != 'Yes':
frappe.throw(_("C-form is not applicable for Invoice: {0}".format(d.invoice_no)))
elif inv and inv[0][1] and inv[0][1] != self.name:
frappe.throw(_("""Invoice {0} is tagged in another C-form: {1}.
If you want to change C-form no for this invoice,
please remove invoice no from the previous c-form and then try again"""\
.format(d.invoice_no, inv[0][1])))
elif not inv:
frappe.throw(_("Row {0}: Invoice {1} is invalid, it might be cancelled / does not exist. \
Please enter a valid Invoice".format(d.idx, d.invoice_no)))
def on_update(self):
""" Update C-Form No on invoices"""
self.set_total_invoiced_amount()
def on_submit(self):
self.set_cform_in_sales_invoices()
def before_cancel(self):
# remove cform reference
frappe.db.sql("""update `tabSales Invoice` set c_form_no=null where c_form_no=%s""", self.name)
def set_cform_in_sales_invoices(self):
inv = [d.invoice_no for d in self.get('invoices')]
if inv:
frappe.db.sql("""update `tabSales Invoice` set c_form_no=%s, modified=%s where name in (%s)""" %
('%s', '%s', ', '.join(['%s'] * len(inv))), tuple([self.name, self.modified] + inv))
frappe.db.sql("""update `tabSales Invoice` set c_form_no = null, modified = %s
where name not in (%s) and ifnull(c_form_no, '') = %s""" %
('%s', ', '.join(['%s']*len(inv)), '%s'), tuple([self.modified] + inv + [self.name]))
else:
frappe.throw(_("Please enter atleast 1 invoice in the table"))
def set_total_invoiced_amount(self):
total = sum([flt(d.grand_total) for d in self.get('invoices')])
frappe.db.set(self, 'total_invoiced_amount', total)
def get_invoice_details(self, invoice_no):
""" Pull details from invoices for referrence """
if invoice_no:
inv = frappe.db.get_value("Sales Invoice", invoice_no,
["posting_date", "territory", "base_net_total", "base_grand_total"], as_dict=True)
return {
'invoice_date' : inv.posting_date,
'territory' : inv.territory,
'net_total' : inv.base_net_total,
'grand_total' : inv.base_grand_total
}
| gpl-3.0 |
ProjectSWGCore/NGECore2 | scripts/mobiles/generic/faction/imperial/imp_invasion_at_at.py | 2 | 1389 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('at_at')
mobileTemplate.setLevel(90)
mobileTemplate.setDifficulty(Difficulty.BOSS)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("imperial")
mobileTemplate.setAssistRange(0)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("imperial")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_atat.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/vehicle/shared_vehicle_atst_ranged.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('imp_invasion_at_at', mobileTemplate)
return | lgpl-3.0 |
unicri/edx-platform | common/djangoapps/third_party_auth/tests/test_pipeline_integration.py | 78 | 8194 | """Integration tests for pipeline.py."""
import unittest
from django.conf import settings
from django import test
from django.contrib.auth import models
from third_party_auth import pipeline, provider
from third_party_auth.tests import testutil
from social.apps.django_app.default import models as social_models
# Get Django User model by reference from python-social-auth. Not a type
# constant, pylint.
User = social_models.DjangoStorage.user.user_model() # pylint: disable-msg=invalid-name
class TestCase(testutil.TestCase, test.TestCase):
"""Base test case."""
def setUp(self):
super(TestCase, self).setUp()
self.enabled_provider_name = provider.GoogleOauth2.NAME
provider.Registry.configure_once([self.enabled_provider_name])
self.enabled_provider = provider.Registry.get(self.enabled_provider_name)
@unittest.skipUnless(
testutil.AUTH_FEATURES_KEY in settings.FEATURES, testutil.AUTH_FEATURES_KEY + ' not in settings.FEATURES')
class GetAuthenticatedUserTestCase(TestCase):
"""Tests for get_authenticated_user."""
def setUp(self):
super(GetAuthenticatedUserTestCase, self).setUp()
self.user = social_models.DjangoStorage.user.create_user(username='username', password='password')
def get_by_username(self, username):
"""Gets a User by username."""
return social_models.DjangoStorage.user.user_model().objects.get(username=username)
def test_raises_does_not_exist_if_user_missing(self):
with self.assertRaises(models.User.DoesNotExist):
pipeline.get_authenticated_user('new_' + self.user.username, 'backend')
def test_raises_does_not_exist_if_user_found_but_no_association(self):
backend_name = 'backend'
self.assertIsNotNone(self.get_by_username(self.user.username))
self.assertIsNone(provider.Registry.get_by_backend_name(backend_name))
with self.assertRaises(models.User.DoesNotExist):
pipeline.get_authenticated_user(self.user.username, 'backend')
def test_raises_does_not_exist_if_user_and_association_found_but_no_match(self):
self.assertIsNotNone(self.get_by_username(self.user.username))
social_models.DjangoStorage.user.create_social_auth(
self.user, 'uid', 'other_' + self.enabled_provider.BACKEND_CLASS.name)
with self.assertRaises(models.User.DoesNotExist):
pipeline.get_authenticated_user(self.user.username, self.enabled_provider.BACKEND_CLASS.name)
def test_returns_user_with_is_authenticated_and_backend_set_if_match(self):
social_models.DjangoStorage.user.create_social_auth(self.user, 'uid', self.enabled_provider.BACKEND_CLASS.name)
user = pipeline.get_authenticated_user(self.user.username, self.enabled_provider.BACKEND_CLASS.name)
self.assertEqual(self.user, user)
self.assertEqual(self.enabled_provider.get_authentication_backend(), user.backend)
@unittest.skipUnless(
testutil.AUTH_FEATURES_KEY in settings.FEATURES, testutil.AUTH_FEATURES_KEY + ' not in settings.FEATURES')
class GetProviderUserStatesTestCase(testutil.TestCase, test.TestCase):
"""Tests generation of ProviderUserStates."""
def setUp(self):
super(GetProviderUserStatesTestCase, self).setUp()
self.user = social_models.DjangoStorage.user.create_user(username='username', password='password')
def test_returns_empty_list_if_no_enabled_providers(self):
provider.Registry.configure_once([])
self.assertEquals([], pipeline.get_provider_user_states(self.user))
def test_state_not_returned_for_disabled_provider(self):
disabled_provider = provider.GoogleOauth2
enabled_provider = provider.LinkedInOauth2
provider.Registry.configure_once([enabled_provider.NAME])
social_models.DjangoStorage.user.create_social_auth(self.user, 'uid', disabled_provider.BACKEND_CLASS.name)
states = pipeline.get_provider_user_states(self.user)
self.assertEqual(1, len(states))
self.assertNotIn(disabled_provider, (state.provider for state in states))
def test_states_for_enabled_providers_user_has_accounts_associated_with(self):
provider.Registry.configure_once([provider.GoogleOauth2.NAME, provider.LinkedInOauth2.NAME])
social_models.DjangoStorage.user.create_social_auth(self.user, 'uid', provider.GoogleOauth2.BACKEND_CLASS.name)
social_models.DjangoStorage.user.create_social_auth(
self.user, 'uid', provider.LinkedInOauth2.BACKEND_CLASS.name)
states = pipeline.get_provider_user_states(self.user)
self.assertEqual(2, len(states))
google_state = [state for state in states if state.provider == provider.GoogleOauth2][0]
linkedin_state = [state for state in states if state.provider == provider.LinkedInOauth2][0]
self.assertTrue(google_state.has_account)
self.assertEqual(provider.GoogleOauth2, google_state.provider)
self.assertEqual(self.user, google_state.user)
self.assertTrue(linkedin_state.has_account)
self.assertEqual(provider.LinkedInOauth2, linkedin_state.provider)
self.assertEqual(self.user, linkedin_state.user)
def test_states_for_enabled_providers_user_has_no_account_associated_with(self):
provider.Registry.configure_once([provider.GoogleOauth2.NAME, provider.LinkedInOauth2.NAME])
states = pipeline.get_provider_user_states(self.user)
self.assertEqual([], [x for x in social_models.DjangoStorage.user.objects.all()])
self.assertEqual(2, len(states))
google_state = [state for state in states if state.provider == provider.GoogleOauth2][0]
linkedin_state = [state for state in states if state.provider == provider.LinkedInOauth2][0]
self.assertFalse(google_state.has_account)
self.assertEqual(provider.GoogleOauth2, google_state.provider)
self.assertEqual(self.user, google_state.user)
self.assertFalse(linkedin_state.has_account)
self.assertEqual(provider.LinkedInOauth2, linkedin_state.provider)
self.assertEqual(self.user, linkedin_state.user)
@unittest.skipUnless(
testutil.AUTH_FEATURES_KEY in settings.FEATURES, testutil.AUTH_FEATURES_KEY + ' not in settings.FEATURES')
class UrlFormationTestCase(TestCase):
"""Tests formation of URLs for pipeline hook points."""
def test_complete_url_raises_value_error_if_provider_not_enabled(self):
provider_name = 'not_enabled'
self.assertIsNone(provider.Registry.get(provider_name))
with self.assertRaises(ValueError):
pipeline.get_complete_url(provider_name)
def test_complete_url_returns_expected_format(self):
complete_url = pipeline.get_complete_url(self.enabled_provider.BACKEND_CLASS.name)
self.assertTrue(complete_url.startswith('/auth/complete'))
self.assertIn(self.enabled_provider.BACKEND_CLASS.name, complete_url)
def test_disconnect_url_raises_value_error_if_provider_not_enabled(self):
provider_name = 'not_enabled'
self.assertIsNone(provider.Registry.get(provider_name))
with self.assertRaises(ValueError):
pipeline.get_disconnect_url(provider_name)
def test_disconnect_url_returns_expected_format(self):
disconnect_url = pipeline.get_disconnect_url(self.enabled_provider.NAME)
self.assertTrue(disconnect_url.startswith('/auth/disconnect'))
self.assertIn(self.enabled_provider.BACKEND_CLASS.name, disconnect_url)
def test_login_url_raises_value_error_if_provider_not_enabled(self):
provider_name = 'not_enabled'
self.assertIsNone(provider.Registry.get(provider_name))
with self.assertRaises(ValueError):
pipeline.get_login_url(provider_name, pipeline.AUTH_ENTRY_LOGIN)
def test_login_url_returns_expected_format(self):
login_url = pipeline.get_login_url(self.enabled_provider.NAME, pipeline.AUTH_ENTRY_LOGIN)
self.assertTrue(login_url.startswith('/auth/login'))
self.assertIn(self.enabled_provider.BACKEND_CLASS.name, login_url)
self.assertTrue(login_url.endswith(pipeline.AUTH_ENTRY_LOGIN))
| agpl-3.0 |
fpadoan/metasyntactic | protobuf-2.2.0/python/google/protobuf/internal/message_listener.py | 17 | 2635 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines a listener interface for observing certain
state transitions on Message objects.
Also defines a null implementation of this interface.
"""
__author__ = 'robinson@google.com (Will Robinson)'
class MessageListener(object):
"""Listens for transitions to nonempty and for invalidations of cached
byte sizes. Meant to be registered via Message._SetListener().
"""
def TransitionToNonempty(self):
"""Called the *first* time that this message becomes nonempty.
Implementations are free (but not required) to call this method multiple
times after the message has become nonempty.
"""
raise NotImplementedError
def ByteSizeDirty(self):
"""Called *every* time the cached byte size value
for this object is invalidated (transitions from being
"clean" to "dirty").
"""
raise NotImplementedError
class NullMessageListener(object):
"""No-op MessageListener implementation."""
def TransitionToNonempty(self):
pass
def ByteSizeDirty(self):
pass
| apache-2.0 |
globau/servo | components/script/dom/bindings/codegen/parser/tests/test_global_extended_attr.py | 107 | 2977 | def WebIDLTest(parser, harness):
parser.parse("""
[Global]
interface Foo : Bar {
getter any(DOMString name);
};
interface Bar {};
""")
results = parser.finish()
harness.ok(results[0].isOnGlobalProtoChain(),
"[Global] interface should be on global's proto chain")
harness.ok(results[1].isOnGlobalProtoChain(),
"[Global] interface should be on global's proto chain")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global]
interface Foo {
getter any(DOMString name);
setter void(DOMString name, any arg);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [Global] used on an interface with a "
"named setter")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global]
interface Foo {
getter any(DOMString name);
creator void(DOMString name, any arg);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [Global] used on an interface with a "
"named creator")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global]
interface Foo {
getter any(DOMString name);
deleter void(DOMString name);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [Global] used on an interface with a "
"named deleter")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global, OverrideBuiltins]
interface Foo {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [Global] used on an interface with a "
"[OverrideBuiltins]")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global]
interface Foo : Bar {
};
[OverrideBuiltins]
interface Bar {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [Global] used on an interface with an "
"[OverrideBuiltins] ancestor")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global]
interface Foo {
};
interface Bar : Foo {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [Global] used on an interface with a "
"descendant")
| mpl-2.0 |
keithhamilton/blackmaas | lib/python2.7/site-packages/pip/_vendor/requests/auth.py | 294 | 6173 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import logging
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header
log = logging.getLogger(__name__)
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
setattr(self, 'num_401_calls', num_401_calls + 1)
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
setattr(self, 'num_401_calls', 1)
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
pass
r.register_hook('response', self.handle_401)
return r
| bsd-3-clause |
mattcaldwell/boto | boto/ec2/elb/healthcheck.py | 185 | 3775 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class HealthCheck(object):
"""
Represents an EC2 Access Point Health Check. See
:ref:`elb-configuring-a-health-check` for a walkthrough on configuring
load balancer health checks.
"""
def __init__(self, access_point=None, interval=30, target=None,
healthy_threshold=3, timeout=5, unhealthy_threshold=5):
"""
:ivar str access_point: The name of the load balancer this
health check is associated with.
:ivar int interval: Specifies how many seconds there are between
health checks.
:ivar str target: Determines what to check on an instance. See the
Amazon HealthCheck_ documentation for possible Target values.
.. _HealthCheck: http://docs.amazonwebservices.com/ElasticLoadBalancing/latest/APIReference/API_HealthCheck.html
"""
self.access_point = access_point
self.interval = interval
self.target = target
self.healthy_threshold = healthy_threshold
self.timeout = timeout
self.unhealthy_threshold = unhealthy_threshold
def __repr__(self):
return 'HealthCheck:%s' % self.target
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Interval':
self.interval = int(value)
elif name == 'Target':
self.target = value
elif name == 'HealthyThreshold':
self.healthy_threshold = int(value)
elif name == 'Timeout':
self.timeout = int(value)
elif name == 'UnhealthyThreshold':
self.unhealthy_threshold = int(value)
else:
setattr(self, name, value)
def update(self):
"""
In the case where you have accessed an existing health check on a
load balancer, this method applies this instance's health check
values to the load balancer it is attached to.
.. note:: This method will not do anything if the :py:attr:`access_point`
attribute isn't set, as is the case with a newly instantiated
HealthCheck instance.
"""
if not self.access_point:
return
new_hc = self.connection.configure_health_check(self.access_point,
self)
self.interval = new_hc.interval
self.target = new_hc.target
self.healthy_threshold = new_hc.healthy_threshold
self.unhealthy_threshold = new_hc.unhealthy_threshold
self.timeout = new_hc.timeout
| mit |
2013Commons/HUE-SHARK | desktop/core/ext-py/Django-1.2.3/build/lib.linux-i686-2.7/django/contrib/gis/gdal/field.py | 264 | 6059 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"A class that wraps an OGR Field, needs to be instantiated from a Feature object."
#### Python 'magic' routines ####
def __init__(self, feat, index):
"""
Initializes on the feature pointer and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat, index)
if not fld_ptr:
raise OGRException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
#### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat, self._index)
def as_int(self):
"Retrieves the Field's value as an integer."
return capi.get_field_as_integer(self._feat, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
return capi.get_field_as_string(self._feat, self._index)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(self._feat, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise OGRException('Unable to retrieve date & time information from the field.')
#### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
return capi.get_field_name(self.ptr)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
@property
def value(self):
"Returns an integer contained in this field."
return self.as_int()
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field): pass
class OFTWideString(Field): pass
class OFTBinary(Field): pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, OGRException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.maptools.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
# List fields are also just subclasses
class OFTIntegerList(Field): pass
class OFTRealList(Field): pass
class OFTStringList(Field): pass
class OFTWideStringList(Field): pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = { 0 : OFTInteger,
1 : OFTIntegerList,
2 : OFTReal,
3 : OFTRealList,
4 : OFTString,
5 : OFTStringList,
6 : OFTWideString,
7 : OFTWideStringList,
8 : OFTBinary,
9 : OFTDate,
10 : OFTTime,
11 : OFTDateTime,
}
ROGRFieldTypes = dict([(cls, num) for num, cls in OGRFieldTypes.items()])
| apache-2.0 |
ujenmr/ansible | lib/ansible/modules/network/fortios/fortios_log_memory_setting.py | 23 | 7514 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_memory_setting
short_description: Settings for memory buffer in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_memory feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_memory_setting:
description:
- Settings for memory buffer.
default: null
suboptions:
diskfull:
description:
- Action to take when memory is full.
choices:
- overwrite
status:
description:
- Enable/disable logging to the FortiGate's memory.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Settings for memory buffer.
fortios_log_memory_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_memory_setting:
diskfull: "overwrite"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_memory_setting_data(json):
option_list = ['diskfull', 'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def log_memory_setting(data, fos):
vdom = data['vdom']
log_memory_setting_data = data['log_memory_setting']
flattened_data = flatten_multilists_attributes(log_memory_setting_data)
filtered_data = filter_log_memory_setting_data(flattened_data)
return fos.set('log.memory',
'setting',
data=filtered_data,
vdom=vdom)
def fortios_log_memory(data, fos):
login(data)
if data['log_memory_setting']:
resp = log_memory_setting(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_memory_setting": {
"required": False, "type": "dict",
"options": {
"diskfull": {"required": False, "type": "str",
"choices": ["overwrite"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_memory(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
chriscrosscutler/scikit-image | skimage/draw/draw.py | 15 | 5252 | # coding: utf-8
import numpy as np
from ._draw import _coords_inside_image
def _ellipse_in_shape(shape, center, radiuses):
"""Generate coordinates of points within ellipse bounded by shape."""
y, x = np.ogrid[0:float(shape[0]), 0:float(shape[1])]
cy, cx = center
ry, rx = radiuses
distances = ((y - cy) / ry) ** 2 + ((x - cx) / rx) ** 2
return np.nonzero(distances < 1)
def ellipse(cy, cx, yradius, xradius, shape=None):
"""Generate coordinates of pixels within ellipse.
Parameters
----------
cy, cx : double
Centre coordinate of ellipse.
yradius, xradius : double
Minor and major semi-axes. ``(x/xradius)**2 + (y/yradius)**2 = 1``.
shape : tuple, optional
Image shape which is used to determine the maximum extent of output pixel
coordinates. This is useful for ellipses which exceed the image size.
By default the full extent of the ellipse are used.
Returns
-------
rr, cc : ndarray of int
Pixel coordinates of ellipse.
May be used to directly index into an array, e.g.
``img[rr, cc] = 1``.
Examples
--------
>>> from skimage.draw import ellipse
>>> img = np.zeros((10, 10), dtype=np.uint8)
>>> rr, cc = ellipse(5, 5, 3, 4)
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
"""
center = np.array([cy, cx])
radiuses = np.array([yradius, xradius])
# The upper_left and lower_right corners of the
# smallest rectangle containing the ellipse.
upper_left = np.ceil(center - radiuses).astype(int)
lower_right = np.floor(center + radiuses).astype(int)
if shape is not None:
# Constrain upper_left and lower_right by shape boundary.
upper_left = np.maximum(upper_left, np.array([0, 0]))
lower_right = np.minimum(lower_right, np.array(shape[:2]) - 1)
shifted_center = center - upper_left
bounding_shape = lower_right - upper_left + 1
rr, cc = _ellipse_in_shape(bounding_shape, shifted_center, radiuses)
rr.flags.writeable = True
cc.flags.writeable = True
rr += upper_left[0]
cc += upper_left[1]
return rr, cc
def circle(cy, cx, radius, shape=None):
"""Generate coordinates of pixels within circle.
Parameters
----------
cy, cx : double
Centre coordinate of circle.
radius: double
Radius of circle.
shape : tuple, optional
Image shape which is used to determine the maximum extent of output pixel
coordinates. This is useful for circles which exceed the image size.
By default the full extent of the circle are used.
Returns
-------
rr, cc : ndarray of int
Pixel coordinates of circle.
May be used to directly index into an array, e.g.
``img[rr, cc] = 1``.
Notes
-----
This function is a wrapper for skimage.draw.ellipse()
Examples
--------
>>> from skimage.draw import circle
>>> img = np.zeros((10, 10), dtype=np.uint8)
>>> rr, cc = circle(4, 4, 5)
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
"""
return ellipse(cy, cx, radius, radius, shape)
def set_color(img, coords, color):
"""Set pixel color in the image at the given coordinates.
Coordinates that exceed the shape of the image will be ignored.
Parameters
----------
img : (M, N, D) ndarray
Image
coords : ((P,) ndarray, (P,) ndarray)
Coordinates of pixels to be colored.
color : (D,) ndarray
Color to be assigned to coordinates in the image.
Returns
-------
img : (M, N, D) ndarray
The updated image.
Examples
--------
>>> from skimage.draw import line, set_color
>>> img = np.zeros((10, 10), dtype=np.uint8)
>>> rr, cc = line(1, 1, 20, 20)
>>> set_color(img, (rr, cc), 1)
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=uint8)
"""
rr, cc = coords
rr, cc = _coords_inside_image(rr, cc, img.shape)
img[rr, cc] = color
| bsd-3-clause |
lebabouin/CouchPotatoServer-develop | libs/tornado/gen.py | 2 | 20329 | """``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
could be written with ``gen`` as::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
For functions that do not return ``Futures``, `Task` works with any
function that takes a ``callback`` keyword argument (most Tornado functions
can be used in either style, although the ``Future`` style is preferred
since it is both shorter and provides better exception handling)::
@gen.coroutine
def get(self):
yield gen.Task(AsyncHTTPClient().fetch, "http://example.com")
You can also yield a list or dict of ``Futures`` and/or ``Tasks``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. versionchanged:: 3.2
Dict support added.
For more complicated interfaces, `Task` can be split into two parts:
`Callback` and `Wait`::
class GenAsyncHandler2(RequestHandler):
@asynchronous
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=(yield gen.Callback("key"))
response = yield gen.Wait("key")
do_something_with_response(response)
self.render("template.html")
The ``key`` argument to `Callback` and `Wait` allows for multiple
asynchronous operations to be started at different times and proceed
in parallel: yield several callbacks with different keys, then wait
for them once all the async operations have started.
The result of a `Wait` or `Task` yield expression depends on how the callback
was run. If it was called with no arguments, the result is ``None``. If
it was called with one argument, the result is that argument. If it was
called with more than one argument or any keyword arguments, the result
is an `Arguments` object, which is a named tuple ``(args, kwargs)``.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import sys
import types
from tornado.concurrent import Future, TracebackFuture
from tornado.ioloop import IOLoop
from tornado.stack_context import ExceptionStackContext, wrap
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
runner = None
def handle_exception(typ, value, tb):
# if the function throws an exception before its first "yield"
# (or is not a generator at all), the Runner won't exist yet.
# However, in that case we haven't reached anything asynchronous
# yet, so we can just let the exception propagate.
if runner is not None:
return runner.handle_exception(typ, value, tb)
return False
with ExceptionStackContext(handle_exception) as deactivate:
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
else:
if isinstance(result, types.GeneratorType):
def final_callback(value):
if value is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: "
"%r" % (value,))
assert value is None
deactivate()
runner = Runner(result, final_callback)
runner.run()
return
if result is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(result,))
deactivate()
# no yield, so we're done
return wrapper
def coroutine(func):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
runner = None
future = TracebackFuture()
if 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
def handle_exception(typ, value, tb):
try:
if runner is not None and runner.handle_exception(typ, value, tb):
return True
except Exception:
typ, value, tb = sys.exc_info()
future.set_exc_info((typ, value, tb))
return True
with ExceptionStackContext(handle_exception) as deactivate:
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
except Exception:
deactivate()
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, types.GeneratorType):
def final_callback(value):
deactivate()
future.set_result(value)
runner = Runner(result, final_callback)
runner.run()
return future
deactivate()
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
Applications do not normally need to use this class, but it may be
subclassed to provide additional yielding behavior.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`."""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
class Task(YieldPoint):
"""Runs a single asynchronous operation.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
A `Task` is equivalent to a `Callback`/`Wait` pair (with a unique
key generated automatically)::
result = yield gen.Task(func, args)
func(args, callback=(yield gen.Callback(key)))
result = yield gen.Wait(key)
"""
def __init__(self, func, *args, **kwargs):
assert "callback" not in kwargs
self.args = args
self.kwargs = kwargs
self.func = func
def start(self, runner):
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.kwargs["callback"] = runner.result_callback(self.key)
self.func(*self.args, **self.kwargs)
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key).result()
class Multi(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
Takes a list of ``Tasks`` or other ``YieldPoints`` and returns a list of
their responses. It is not necessary to call `Multi` explicitly,
since the engine will do so automatically when the generator yields
a list of ``YieldPoints``.
"""
def __init__(self, children):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if isinstance(i, Future):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result = (i.get_result() for i in self.children)
if self.keys is not None:
return dict(zip(self.keys, result))
else:
return list(result)
class _NullYieldPoint(YieldPoint):
def start(self, runner):
pass
def is_ready(self):
return True
def get_result(self):
return None
_null_yield_point = _NullYieldPoint()
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
``final_callback`` is run after the generator exits.
"""
def __init__(self, gen, final_callback):
self.gen = gen
self.final_callback = final_callback
self.yield_point = _null_yield_point
self.pending_callbacks = set()
self.results = {}
self.running = False
self.finished = False
self.exc_info = None
self.had_exception = False
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
if self.exc_info is None:
try:
if not self.yield_point.is_ready():
return
next = self.yield_point.get_result()
self.yield_point = None
except Exception:
self.exc_info = sys.exc_info()
try:
if self.exc_info is not None:
self.had_exception = True
exc_info = self.exc_info
self.exc_info = None
yielded = self.gen.throw(*exc_info)
else:
yielded = self.gen.send(next)
except (StopIteration, Return) as e:
self.finished = True
self.yield_point = _null_yield_point
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.final_callback(getattr(e, 'value', None))
self.final_callback = None
return
except Exception:
self.finished = True
self.yield_point = _null_yield_point
raise
if isinstance(yielded, (list, dict)):
yielded = Multi(yielded)
elif isinstance(yielded, Future):
yielded = YieldFuture(yielded)
if isinstance(yielded, YieldPoint):
self.yield_point = yielded
try:
self.yield_point.start(self)
except Exception:
self.exc_info = sys.exc_info()
else:
self.exc_info = (BadYieldError(
"yielded unknown object %r" % (yielded,)),)
finally:
self.running = False
def result_callback(self, key):
def inner(*args, **kwargs):
if kwargs or len(args) > 1:
result = Arguments(args, kwargs)
elif args:
result = args[0]
else:
result = None
self.set_result(key, result)
return wrap(inner)
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.exc_info = (typ, value, tb)
self.run()
return True
else:
return False
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
| gpl-3.0 |
ESOedX/edx-platform | cms/djangoapps/contentstore/views/tests/test_group_configurations.py | 1 | 48420 | #-*- coding: utf-8 -*-
"""
Group Configuration Tests.
"""
from __future__ import absolute_import
import json
from operator import itemgetter
import ddt
import six
from mock import patch
from six.moves import range
from contentstore.course_group_config import CONTENT_GROUP_CONFIGURATION_NAME, ENROLLMENT_SCHEME, GroupConfiguration
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url, reverse_usage_url
from openedx.features.content_type_gating.helpers import CONTENT_GATING_PARTITION_ID
from openedx.features.content_type_gating.partitions import CONTENT_TYPE_GATING_SCHEME
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID, Group, UserPartition
from xmodule.validation import StudioValidation, StudioValidationMessage
GROUP_CONFIGURATION_JSON = {
u'name': u'Test name',
u'scheme': u'random',
u'description': u'Test description',
u'version': UserPartition.VERSION,
u'groups': [
{
u'name': u'Group A',
u'version': 1,
}, {
u'name': u'Group B',
u'version': 1,
},
],
}
# pylint: disable=no-member
class HelperMethods(object):
"""
Mixin that provides useful methods for Group Configuration tests.
"""
def _create_content_experiment(self, cid=-1, group_id=None, cid_for_problem=None,
name_suffix='', special_characters=''):
"""
Create content experiment.
Assign Group Configuration to the experiment if cid is provided.
Assigns a problem to the first group in the split test if group_id and cid_for_problem is provided.
"""
sequential = ItemFactory.create(
category='sequential',
parent_location=self.course.location,
display_name=u'Test Subsection {}'.format(name_suffix)
)
vertical = ItemFactory.create(
category='vertical',
parent_location=sequential.location,
display_name=u'Test Unit {}'.format(name_suffix)
)
c0_url = self.course.id.make_usage_key("vertical", "split_test_cond0")
c1_url = self.course.id.make_usage_key("vertical", "split_test_cond1")
c2_url = self.course.id.make_usage_key("vertical", "split_test_cond2")
split_test = ItemFactory.create(
category='split_test',
parent_location=vertical.location,
user_partition_id=cid,
display_name=u"Test Content Experiment {}{}".format(name_suffix, special_characters),
group_id_to_child={"0": c0_url, "1": c1_url, "2": c2_url}
)
ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 0 vertical",
location=c0_url,
)
c1_vertical = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 1 vertical",
location=c1_url,
)
ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 2 vertical",
location=c2_url,
)
problem = None
if group_id and cid_for_problem:
problem = ItemFactory.create(
category='problem',
parent_location=c1_vertical.location,
display_name=u"Test Problem"
)
self.client.ajax_post(
reverse_usage_url("xblock_handler", problem.location),
data={'metadata': {'group_access': {cid_for_problem: [group_id]}}}
)
c1_vertical.children.append(problem.location)
partitions_json = [p.to_json() for p in self.course.user_partitions]
self.client.ajax_post(
reverse_usage_url("xblock_handler", split_test.location),
data={'metadata': {'user_partitions': partitions_json}}
)
self.save_course()
return vertical, split_test, problem
def _create_problem_with_content_group(self, cid, group_id, name_suffix='', special_characters='', orphan=False):
"""
Create a problem
Assign content group to the problem.
"""
vertical_parent_location = self.course.location
if not orphan:
subsection = ItemFactory.create(
category='sequential',
parent_location=self.course.location,
display_name=u"Test Subsection {}".format(name_suffix)
)
vertical_parent_location = subsection.location
vertical = ItemFactory.create(
category='vertical',
parent_location=vertical_parent_location,
display_name=u"Test Unit {}".format(name_suffix)
)
problem = ItemFactory.create(
category='problem',
parent_location=vertical.location,
display_name=u"Test Problem {}{}".format(name_suffix, special_characters)
)
group_access_content = {'group_access': {cid: [group_id]}}
self.client.ajax_post(
reverse_usage_url("xblock_handler", problem.location),
data={'metadata': group_access_content}
)
if not orphan:
self.course.children.append(subsection.location)
self.save_course()
return vertical, problem
def _add_user_partitions(self, count=1, scheme_id="random"):
"""
Create user partitions for the course.
"""
partitions = [
UserPartition(
i, 'Name ' + str(i), 'Description ' + str(i),
[Group(0, 'Group A'), Group(1, 'Group B'), Group(2, 'Group C')],
scheme=None, scheme_id=scheme_id
) for i in range(count)
]
self.course.user_partitions = partitions
self.save_course()
# pylint: disable=no-member
class GroupConfigurationsBaseTestCase(object):
"""
Mixin with base test cases for the group configurations.
"""
def _remove_ids(self, content):
"""
Remove ids from the response. We cannot predict IDs, because they're
generated randomly.
We use this method to clean up response when creating new group configurations.
Returns a tuple that contains removed group configuration ID and group IDs.
"""
configuration_id = content.pop("id")
group_ids = [group.pop("id") for group in content["groups"]]
return (configuration_id, group_ids)
def test_required_fields_are_absent(self):
"""
Test required fields are absent.
"""
bad_jsons = [
# must have name of the configuration
{
u'description': 'Test description',
u'groups': [
{u'name': u'Group A'},
{u'name': u'Group B'},
],
},
# must have at least one group
{
u'name': u'Test name',
u'description': u'Test description',
u'groups': [],
},
# an empty json
{},
]
for bad_json in bad_jsons:
response = self.client.post(
self._url(),
data=json.dumps(bad_json),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("error", content)
def test_invalid_json(self):
"""
Test invalid json handling.
"""
# No property name.
invalid_json = u"{u'name': 'Test Name', []}"
response = self.client.post(
self._url(),
data=invalid_json,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("error", content)
@ddt.ddt
class GroupConfigurationsListHandlerTestCase(CourseTestCase, GroupConfigurationsBaseTestCase, HelperMethods):
"""
Test cases for group_configurations_list_handler.
"""
def _url(self):
"""
Return url for the handler.
"""
return reverse_course_url('group_configurations_list_handler', self.course.id)
def test_view_index_ok(self):
"""
Basic check that the groups configuration page responds correctly.
"""
# This creates a random UserPartition.
self.course.user_partitions = [
UserPartition(0, 'First name', 'First description', [Group(0, 'Group A'), Group(1, 'Group B'), Group(2, 'Group C')]),
]
self.save_course()
if 'split_test' not in self.course.advanced_modules:
self.course.advanced_modules.append('split_test')
self.store.update_item(self.course, self.user.id)
response = self.client.get(self._url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'First name', count=1)
self.assertContains(response, 'Group C')
self.assertContains(response, CONTENT_GROUP_CONFIGURATION_NAME)
def test_unsupported_http_accept_header(self):
"""
Test if not allowed header present in request.
"""
response = self.client.get(
self._url(),
HTTP_ACCEPT="text/plain",
)
self.assertEqual(response.status_code, 406)
def test_can_create_group_configuration(self):
"""
Test that you can create a group configuration.
"""
expected = {
u'description': u'Test description',
u'name': u'Test name',
u'scheme': u'random',
u'version': UserPartition.VERSION,
u'groups': [
{u'name': u'Group A', u'version': 1},
{u'name': u'Group B', u'version': 1},
],
u'parameters': {},
u'active': True
}
response = self.client.ajax_post(
self._url(),
data=GROUP_CONFIGURATION_JSON
)
self.assertEqual(response.status_code, 201)
self.assertIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
configuration_id, group_ids = self._remove_ids(content) # pylint: disable=unused-variable
self.assertEqual(content, expected)
# IDs are unique
self.assertEqual(len(group_ids), len(set(group_ids)))
self.assertEqual(len(group_ids), 2)
self.reload_course()
# Verify that user_partitions in the course contains the new group configuration.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, u'Test name')
self.assertEqual(len(user_partititons[0].groups), 2)
self.assertEqual(user_partititons[0].groups[0].name, u'Group A')
self.assertEqual(user_partititons[0].groups[1].name, u'Group B')
self.assertEqual(user_partititons[0].parameters, {})
def test_lazily_creates_cohort_configuration(self):
"""
Test that a cohort schemed user partition is NOT created by
default for the user.
"""
self.assertEqual(len(self.course.user_partitions), 0)
self.client.get(self._url())
self.reload_course()
self.assertEqual(len(self.course.user_partitions), 0)
@ddt.data('content_type_gate', 'enrollment_track')
def test_cannot_create_restricted_group_configuration(self, scheme_id):
"""
Test that you cannot create a restricted group configuration.
"""
group_config = dict(GROUP_CONFIGURATION_JSON)
group_config['scheme'] = scheme_id
group_config.setdefault('parameters', {})['course_id'] = six.text_type(self.course.id)
response = self.client.ajax_post(
self._url(),
data=group_config
)
self.assertEqual(response.status_code, 400)
@ddt.ddt
class GroupConfigurationsDetailHandlerTestCase(CourseTestCase, GroupConfigurationsBaseTestCase, HelperMethods):
"""
Test cases for group_configurations_detail_handler.
"""
ID = 0
def _url(self, cid=-1):
"""
Return url for the handler.
"""
cid = cid if cid > 0 else self.ID
return reverse_course_url(
'group_configurations_detail_handler',
self.course.id,
kwargs={'group_configuration_id': cid},
)
def test_can_create_new_content_group_if_it_does_not_exist(self):
"""
PUT new content group.
"""
expected = {
u'id': 666,
u'name': u'Test name',
u'scheme': u'cohort',
u'description': u'Test description',
u'version': UserPartition.VERSION,
u'groups': [
{u'id': 0, u'name': u'Group A', u'version': 1, u'usage': []},
{u'id': 1, u'name': u'Group B', u'version': 1, u'usage': []},
],
u'parameters': {},
u'active': True,
}
response = self.client.put(
self._url(cid=666),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.reload_course()
# Verify that user_partitions in the course contains the new group configuration.
user_partitions = self.course.user_partitions
self.assertEqual(len(user_partitions), 1)
self.assertEqual(user_partitions[0].name, u'Test name')
self.assertEqual(len(user_partitions[0].groups), 2)
self.assertEqual(user_partitions[0].groups[0].name, u'Group A')
self.assertEqual(user_partitions[0].groups[1].name, u'Group B')
self.assertEqual(user_partitions[0].parameters, {})
def test_can_edit_content_group(self):
"""
Edit content group and check its id and modified fields.
"""
self._add_user_partitions(scheme_id='cohort')
self.save_course()
expected = {
u'id': self.ID,
u'name': u'New Test name',
u'scheme': u'cohort',
u'description': u'New Test description',
u'version': UserPartition.VERSION,
u'groups': [
{u'id': 0, u'name': u'New Group Name', u'version': 1, u'usage': []},
{u'id': 2, u'name': u'Group C', u'version': 1, u'usage': []},
],
u'parameters': {},
u'active': True,
}
response = self.client.put(
self._url(),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.reload_course()
# Verify that user_partitions is properly updated in the course.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, u'New Test name')
self.assertEqual(len(user_partititons[0].groups), 2)
self.assertEqual(user_partititons[0].groups[0].name, u'New Group Name')
self.assertEqual(user_partititons[0].groups[1].name, u'Group C')
self.assertEqual(user_partititons[0].parameters, {})
def test_can_delete_content_group(self):
"""
Delete content group and check user partitions.
"""
self._add_user_partitions(count=1, scheme_id='cohort')
self.save_course()
details_url_with_group_id = self._url(cid=0) + '/1'
response = self.client.delete(
details_url_with_group_id,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.reload_course()
# Verify that group and partition is properly updated in the course.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, 'Name 0')
self.assertEqual(len(user_partititons[0].groups), 2)
self.assertEqual(user_partititons[0].groups[1].name, 'Group C')
def test_cannot_delete_used_content_group(self):
"""
Cannot delete content group if it is in use.
"""
self._add_user_partitions(count=1, scheme_id='cohort')
self._create_problem_with_content_group(cid=0, group_id=1)
details_url_with_group_id = self._url(cid=0) + '/1'
response = self.client.delete(
details_url_with_group_id,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content.decode('utf-8'))
self.assertTrue(content['error'])
self.reload_course()
# Verify that user_partitions and groups are still the same.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(len(user_partititons[0].groups), 3)
self.assertEqual(user_partititons[0].groups[1].name, 'Group B')
def test_cannot_delete_non_existent_content_group(self):
"""
Cannot delete content group if it is doesn't exist.
"""
self._add_user_partitions(count=1, scheme_id='cohort')
details_url_with_group_id = self._url(cid=0) + '/90'
response = self.client.delete(
details_url_with_group_id,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 404)
# Verify that user_partitions is still the same.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(len(user_partititons[0].groups), 3)
def test_can_create_new_group_configuration_if_it_does_not_exist(self):
"""
PUT new group configuration when no configurations exist in the course.
"""
expected = {
u'id': 999,
u'name': u'Test name',
u'scheme': u'random',
u'description': u'Test description',
u'version': UserPartition.VERSION,
u'groups': [
{u'id': 0, u'name': u'Group A', u'version': 1},
{u'id': 1, u'name': u'Group B', u'version': 1},
],
u'usage': [],
u'parameters': {},
u'active': True,
}
response = self.client.put(
self._url(cid=999),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.reload_course()
# Verify that user_partitions in the course contains the new group configuration.
user_partitions = self.course.user_partitions
self.assertEqual(len(user_partitions), 1)
self.assertEqual(user_partitions[0].name, u'Test name')
self.assertEqual(len(user_partitions[0].groups), 2)
self.assertEqual(user_partitions[0].groups[0].name, u'Group A')
self.assertEqual(user_partitions[0].groups[1].name, u'Group B')
self.assertEqual(user_partitions[0].parameters, {})
def test_can_edit_group_configuration(self):
"""
Edit group configuration and check its id and modified fields.
"""
self._add_user_partitions()
self.save_course()
expected = {
u'id': self.ID,
u'name': u'New Test name',
u'scheme': u'random',
u'description': u'New Test description',
u'version': UserPartition.VERSION,
u'groups': [
{u'id': 0, u'name': u'New Group Name', u'version': 1},
{u'id': 2, u'name': u'Group C', u'version': 1},
],
u'usage': [],
u'parameters': {},
u'active': True,
}
response = self.client.put(
self._url(),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.reload_course()
# Verify that user_partitions is properly updated in the course.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, u'New Test name')
self.assertEqual(len(user_partititons[0].groups), 2)
self.assertEqual(user_partititons[0].groups[0].name, u'New Group Name')
self.assertEqual(user_partititons[0].groups[1].name, u'Group C')
self.assertEqual(user_partititons[0].parameters, {})
def test_can_delete_group_configuration(self):
"""
Delete group configuration and check user partitions.
"""
self._add_user_partitions(count=2)
self.save_course()
response = self.client.delete(
self._url(cid=0),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.reload_course()
# Verify that user_partitions is properly updated in the course.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, 'Name 1')
def test_cannot_delete_used_group_configuration(self):
"""
Cannot delete group configuration if it is in use.
"""
self._add_user_partitions(count=2)
self._create_content_experiment(cid=0)
response = self.client.delete(
self._url(cid=0),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content.decode('utf-8'))
self.assertTrue(content['error'])
self.reload_course()
# Verify that user_partitions is still the same.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 2)
self.assertEqual(user_partititons[0].name, 'Name 0')
def test_cannot_delete_non_existent_group_configuration(self):
"""
Cannot delete group configuration if it is doesn't exist.
"""
self._add_user_partitions(count=2)
response = self.client.delete(
self._url(cid=999),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 404)
# Verify that user_partitions is still the same.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 2)
self.assertEqual(user_partititons[0].name, 'Name 0')
@ddt.data(CONTENT_TYPE_GATING_SCHEME, ENROLLMENT_SCHEME)
def test_cannot_create_restricted_group_configuration(self, scheme_id):
"""
Test that you cannot create a restricted group configuration.
"""
group_config = dict(GROUP_CONFIGURATION_JSON)
group_config['scheme'] = scheme_id
group_config.setdefault('parameters', {})['course_id'] = six.text_type(self.course.id)
response = self.client.ajax_post(
self._url(),
data=group_config
)
self.assertEqual(response.status_code, 400)
@ddt.data(
(CONTENT_TYPE_GATING_SCHEME, CONTENT_GATING_PARTITION_ID),
(ENROLLMENT_SCHEME, ENROLLMENT_TRACK_PARTITION_ID),
)
@ddt.unpack
def test_cannot_edit_restricted_group_configuration(self, scheme_id, partition_id):
"""
Test that you cannot edit a restricted group configuration.
"""
group_config = dict(GROUP_CONFIGURATION_JSON)
group_config['scheme'] = scheme_id
group_config.setdefault('parameters', {})['course_id'] = six.text_type(self.course.id)
response = self.client.put(
self._url(cid=partition_id),
data=json.dumps(group_config),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
@ddt.ddt
class GroupConfigurationsUsageInfoTestCase(CourseTestCase, HelperMethods):
"""
Tests for usage information of configurations and content groups.
"""
def _get_user_partition(self, scheme):
"""
Returns the first user partition with the specified scheme.
"""
for group in GroupConfiguration.get_all_user_partition_details(self.store, self.course):
if group['scheme'] == scheme:
return group
return None
def _get_expected_content_group(self, usage_for_group):
"""
Returns the expected configuration with particular usage.
"""
return {
'id': 0,
'name': 'Name 0',
'scheme': 'cohort',
'description': 'Description 0',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1, 'usage': []},
{'id': 1, 'name': 'Group B', 'version': 1, 'usage': usage_for_group},
{'id': 2, 'name': 'Group C', 'version': 1, 'usage': []},
],
u'parameters': {},
u'active': True,
}
def test_content_group_not_used(self):
"""
Test that right data structure will be created if content group is not used.
"""
self._add_user_partitions(scheme_id='cohort')
actual = self._get_user_partition('cohort')
expected = self._get_expected_content_group(usage_for_group=[])
self.assertEqual(actual, expected)
def test_can_get_correct_usage_info_when_special_characters_are_in_content(self):
"""
Test if content group json updated successfully with usage information.
"""
self._add_user_partitions(count=1, scheme_id='cohort')
vertical, __ = self._create_problem_with_content_group(
cid=0, group_id=1, name_suffix='0', special_characters=u"JOSÉ ANDRÉS"
)
actual = self._get_user_partition('cohort')
expected = self._get_expected_content_group(
usage_for_group=[
{
'url': u"/container/{}".format(vertical.location),
'label': u"Test Unit 0 / Test Problem 0JOSÉ ANDRÉS"
}
]
)
self.assertEqual(actual, expected)
def test_can_get_correct_usage_info_for_content_groups(self):
"""
Test if content group json updated successfully with usage information.
"""
self._add_user_partitions(count=1, scheme_id='cohort')
vertical, __ = self._create_problem_with_content_group(cid=0, group_id=1, name_suffix='0')
actual = self._get_user_partition('cohort')
expected = self._get_expected_content_group(usage_for_group=[
{
'url': '/container/{}'.format(vertical.location),
'label': 'Test Unit 0 / Test Problem 0'
}
])
self.assertEqual(actual, expected)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_can_get_correct_usage_info_with_orphan(self, module_store_type):
"""
Test if content group json updated successfully with usage information
even if there is an orphan in content group.
"""
self.course = CourseFactory.create(default_store=module_store_type)
self._add_user_partitions(count=1, scheme_id='cohort')
vertical, __ = self._create_problem_with_content_group(cid=0, group_id=1, name_suffix='0', orphan=True)
# Assert that there is an orphan in the course, and that it's the vertical
self.assertEqual(len(self.store.get_orphans(self.course.id)), 1)
self.assertIn(vertical.location, self.store.get_orphans(self.course.id))
# Get the expected content group information based on module store.
if module_store_type == ModuleStoreEnum.Type.mongo:
expected = self._get_expected_content_group(usage_for_group=[
{
'url': '/container/{}'.format(vertical.location),
'label': 'Test Unit 0 / Test Problem 0'
}
])
else:
expected = self._get_expected_content_group(usage_for_group=[])
# Get the actual content group information
actual = self._get_user_partition('cohort')
# Assert that actual content group information is same as expected one.
self.assertEqual(actual, expected)
def test_can_use_one_content_group_in_multiple_problems(self):
"""
Test if multiple problems are present in usage info when they use same
content group.
"""
self._add_user_partitions(scheme_id='cohort')
vertical, __ = self._create_problem_with_content_group(cid=0, group_id=1, name_suffix='0')
vertical1, __ = self._create_problem_with_content_group(cid=0, group_id=1, name_suffix='1')
actual = self._get_user_partition('cohort')
expected = self._get_expected_content_group(usage_for_group=[
{
'url': '/container/{}'.format(vertical1.location),
'label': 'Test Unit 1 / Test Problem 1'
},
{
'url': '/container/{}'.format(vertical.location),
'label': 'Test Unit 0 / Test Problem 0'
}
])
self.assertEqual(actual, expected)
def test_group_configuration_not_used(self):
"""
Test that right data structure will be created if group configuration is not used.
"""
self._add_user_partitions()
actual = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course)
expected = [{
'id': 0,
'name': 'Name 0',
'scheme': 'random',
'description': 'Description 0',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [],
'parameters': {},
'active': True,
}]
self.assertEqual(actual, expected)
def test_can_get_correct_usage_info_for_split_test(self):
"""
When a split test is created and content group access is set for a problem within a group,
the usage info should return a url to the split test, not to the group.
"""
# Create user partition for groups in the split test,
# and another partition to set group access for the problem within the split test.
self._add_user_partitions(count=1)
self.course.user_partitions += [
UserPartition(
id=1,
name='Cohort User Partition',
scheme=UserPartition.get_scheme('cohort'),
description='Cohort User Partition',
groups=[
Group(id=3, name="Problem Group")
],
),
]
self.store.update_item(self.course, ModuleStoreEnum.UserID.test)
__, split_test, problem = self._create_content_experiment(cid=0, name_suffix='0', group_id=3, cid_for_problem=1)
expected = {
'id': 1,
'name': 'Cohort User Partition',
'scheme': 'cohort',
'description': 'Cohort User Partition',
'version': UserPartition.VERSION,
'groups': [
{'id': 3, 'name': 'Problem Group', 'version': 1, 'usage': [
{
'url': '/container/{}'.format(split_test.location),
'label': 'Condition 1 vertical / Test Problem'
}
]},
],
u'parameters': {},
u'active': True,
}
actual = self._get_user_partition('cohort')
self.assertEqual(actual, expected)
def test_can_get_correct_usage_info_for_unit(self):
"""
When group access is set on the unit level, the usage info should return a url to the unit, not
the sequential parent of the unit.
"""
self.course.user_partitions = [
UserPartition(
id=0,
name='User Partition',
scheme=UserPartition.get_scheme('cohort'),
description='User Partition',
groups=[
Group(id=0, name="Group")
],
),
]
vertical, __ = self._create_problem_with_content_group(
cid=0, group_id=0, name_suffix='0'
)
self.client.ajax_post(
reverse_usage_url("xblock_handler", vertical.location),
data={'metadata': {'group_access': {0: [0]}}}
)
actual = self._get_user_partition('cohort')
# order of usage list is arbitrary, sort for reliable comparison
actual['groups'][0]['usage'].sort(key=itemgetter('label'))
expected = {
'id': 0,
'name': 'User Partition',
'scheme': 'cohort',
'description': 'User Partition',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group', 'version': 1, 'usage': [
{
'url': u"/container/{}".format(vertical.location),
'label': u"Test Subsection 0 / Test Unit 0"
},
{
'url': u"/container/{}".format(vertical.location),
'label': u"Test Unit 0 / Test Problem 0"
}
]},
],
u'parameters': {},
u'active': True,
}
self.maxDiff = None
assert actual == expected
def test_can_get_correct_usage_info(self):
"""
Test if group configurations json updated successfully with usage information.
"""
self._add_user_partitions(count=2)
__, split_test, __ = self._create_content_experiment(cid=0, name_suffix='0')
self._create_content_experiment(name_suffix='1')
actual = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course)
expected = [{
'id': 0,
'name': 'Name 0',
'scheme': 'random',
'description': 'Description 0',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [{
'url': '/container/{}'.format(split_test.location),
'label': 'Test Unit 0 / Test Content Experiment 0',
'validation': None,
}],
'parameters': {},
'active': True,
}, {
'id': 1,
'name': 'Name 1',
'scheme': 'random',
'description': 'Description 1',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [],
'parameters': {},
'active': True,
}]
self.assertEqual(actual, expected)
def test_can_get_usage_info_when_special_characters_are_used(self):
"""
Test if group configurations json updated successfully when special
characters are being used in content experiment
"""
self._add_user_partitions(count=1)
__, split_test, __ = self._create_content_experiment(cid=0, name_suffix='0', special_characters=u"JOSÉ ANDRÉS")
actual = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course, )
expected = [{
'id': 0,
'name': 'Name 0',
'scheme': 'random',
'description': 'Description 0',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [{
'url': reverse_usage_url("container_handler", split_test.location),
'label': u"Test Unit 0 / Test Content Experiment 0JOSÉ ANDRÉS",
'validation': None,
}],
'parameters': {},
'active': True,
}]
self.assertEqual(actual, expected)
def test_can_use_one_configuration_in_multiple_experiments(self):
"""
Test if multiple experiments are present in usage info when they use same
group configuration.
"""
self._add_user_partitions()
__, split_test, __ = self._create_content_experiment(cid=0, name_suffix='0')
__, split_test1, __ = self._create_content_experiment(cid=0, name_suffix='1')
actual = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course)
expected = [{
'id': 0,
'name': 'Name 0',
'scheme': 'random',
'description': 'Description 0',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [{
'url': '/container/{}'.format(split_test.location),
'label': 'Test Unit 0 / Test Content Experiment 0',
'validation': None,
}, {
'url': '/container/{}'.format(split_test1.location),
'label': 'Test Unit 1 / Test Content Experiment 1',
'validation': None,
}],
'parameters': {},
'active': True,
}]
self.assertEqual(actual, expected)
def test_can_handle_without_parent(self):
"""
Test if it possible to handle case when split_test has no parent.
"""
self._add_user_partitions()
# Create split test without parent.
with modulestore().branch_setting(ModuleStoreEnum.Branch.published_only):
orphan = modulestore().create_item(
ModuleStoreEnum.UserID.test,
self.course.id, 'split_test',
)
orphan.user_partition_id = 0
orphan.display_name = 'Test Content Experiment'
modulestore().update_item(orphan, ModuleStoreEnum.UserID.test)
self.save_course()
actual = GroupConfiguration.get_content_experiment_usage_info(self.store, self.course)
self.assertEqual(actual, {0: []})
def test_can_handle_multiple_partitions(self):
# Create the user partitions
self.course.user_partitions = [
UserPartition(
id=0,
name='Cohort user partition',
scheme=UserPartition.get_scheme('cohort'),
description='Cohorted user partition',
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
UserPartition(
id=1,
name='Random user partition',
scheme=UserPartition.get_scheme('random'),
description='Random user partition',
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
]
self.store.update_item(self.course, ModuleStoreEnum.UserID.test)
# Assign group access rules for multiple partitions, one of which is a cohorted partition
__, problem = self._create_problem_with_content_group(0, 1)
problem.group_access = {
0: [0],
1: [1],
}
self.store.update_item(problem, ModuleStoreEnum.UserID.test)
# This used to cause an exception since the code assumed that
# only one partition would be available.
actual = GroupConfiguration.get_partitions_usage_info(self.store, self.course)
self.assertEqual(list(actual.keys()), [0])
actual = GroupConfiguration.get_content_groups_items_usage_info(self.store, self.course)
self.assertEqual(list(actual.keys()), [0])
def test_can_handle_duplicate_group_ids(self):
# Create the user partitions
self.course.user_partitions = [
UserPartition(
id=0,
name='Cohort user partition 1',
scheme=UserPartition.get_scheme('cohort'),
description='Cohorted user partition',
groups=[
Group(id=2, name="Group 1A"),
Group(id=3, name="Group 1B"),
],
),
UserPartition(
id=1,
name='Cohort user partition 2',
scheme=UserPartition.get_scheme('cohort'),
description='Random user partition',
groups=[
Group(id=2, name="Group 2A"),
Group(id=3, name="Group 2B"),
],
),
]
self.store.update_item(self.course, ModuleStoreEnum.UserID.test)
# Assign group access rules for multiple partitions, one of which is a cohorted partition
self._create_problem_with_content_group(0, 2, name_suffix='0')
self._create_problem_with_content_group(1, 3, name_suffix='1')
# This used to cause an exception since the code assumed that
# only one partition would be available.
actual = GroupConfiguration.get_partitions_usage_info(self.store, self.course)
self.assertEqual(list(actual.keys()), [0, 1])
self.assertEqual(list(actual[0].keys()), [2])
self.assertEqual(list(actual[1].keys()), [3])
actual = GroupConfiguration.get_content_groups_items_usage_info(self.store, self.course)
self.assertEqual(list(actual.keys()), [0, 1])
self.assertEqual(list(actual[0].keys()), [2])
self.assertEqual(list(actual[1].keys()), [3])
class GroupConfigurationsValidationTestCase(CourseTestCase, HelperMethods):
"""
Tests for validation in Group Configurations.
"""
@patch('xmodule.split_test_module.SplitTestDescriptor.validate_split_test')
def verify_validation_add_usage_info(self, expected_result, mocked_message, mocked_validation_messages):
"""
Helper method for testing validation information present after add_usage_info.
"""
self._add_user_partitions()
split_test = self._create_content_experiment(cid=0, name_suffix='0')[1]
validation = StudioValidation(split_test.location)
validation.add(mocked_message)
mocked_validation_messages.return_value = validation
group_configuration = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course)[0]
self.assertEqual(expected_result.to_json(), group_configuration['usage'][0]['validation'])
def test_error_message_present(self):
"""
Tests if validation message is present (error case).
"""
mocked_message = StudioValidationMessage(StudioValidationMessage.ERROR, u"Validation message")
expected_result = StudioValidationMessage(
StudioValidationMessage.ERROR, u"This content experiment has issues that affect content visibility."
)
self.verify_validation_add_usage_info(expected_result, mocked_message) # pylint: disable=no-value-for-parameter
def test_warning_message_present(self):
"""
Tests if validation message is present (warning case).
"""
mocked_message = StudioValidationMessage(StudioValidationMessage.WARNING, u"Validation message")
expected_result = StudioValidationMessage(
StudioValidationMessage.WARNING, u"This content experiment has issues that affect content visibility."
)
self.verify_validation_add_usage_info(expected_result, mocked_message) # pylint: disable=no-value-for-parameter
@patch('xmodule.split_test_module.SplitTestDescriptor.validate_split_test')
def verify_validation_update_usage_info(self, expected_result, mocked_message, mocked_validation_messages):
"""
Helper method for testing validation information present after update_usage_info.
"""
self._add_user_partitions()
split_test = self._create_content_experiment(cid=0, name_suffix='0')[1]
validation = StudioValidation(split_test.location)
if mocked_message is not None:
validation.add(mocked_message)
mocked_validation_messages.return_value = validation
group_configuration = GroupConfiguration.update_usage_info(
self.store, self.course, self.course.user_partitions[0]
)
self.assertEqual(
expected_result.to_json() if expected_result is not None else None,
group_configuration['usage'][0]['validation']
)
def test_update_usage_info(self):
"""
Tests if validation message is present when updating usage info.
"""
mocked_message = StudioValidationMessage(StudioValidationMessage.WARNING, u"Validation message")
expected_result = StudioValidationMessage(
StudioValidationMessage.WARNING, u"This content experiment has issues that affect content visibility."
)
# pylint: disable=no-value-for-parameter
self.verify_validation_update_usage_info(expected_result, mocked_message)
def test_update_usage_info_no_message(self):
"""
Tests if validation message is not present when updating usage info.
"""
self.verify_validation_update_usage_info(None, None) # pylint: disable=no-value-for-parameter
| agpl-3.0 |
40223229/2015cdb_g9 | static/Brython3.1.1-20150328-091302/Lib/contextlib.py | 737 | 8788 | """Utilities for with-statement contexts. See PEP 343."""
import sys
from collections import deque
from functools import wraps
__all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack"]
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManager(ContextDecorator):
"""Helper for @contextmanager decorator."""
def __init__(self, func, *args, **kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, *self.args, **self.kwds)
def __enter__(self):
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, *args, **kwds)
return helper
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
_exit_wrapper.__self__ = cm
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection
_exit_wrapper.__wrapped__ = callback
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
while 1:
exc_context = new_exc.__context__
if exc_context in (None, frame_exc):
break
new_exc = exc_context
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
| gpl-2.0 |
tersmitten/ansible | lib/ansible/modules/network/cloudengine/ce_evpn_global.py | 31 | 6897 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_evpn_global
version_added: "2.4"
short_description: Manages global configuration of EVPN on HUAWEI CloudEngine switches.
description:
- Manages global configuration of EVPN on HUAWEI CloudEngine switches.
author: Zhijin Zhou (@QijunPan)
notes:
- Before configuring evpn_overlay_enable=disable, delete other EVPN configurations.
options:
evpn_overlay_enable:
description:
- Configure EVPN as the VXLAN control plane.
required: true
choices: ['enable','disable']
'''
EXAMPLES = '''
- name: evpn global module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure EVPN as the VXLAN control plan
ce_evpn_global:
evpn_overlay_enable: enable
provider: "{{ cli }}"
- name: Undo EVPN as the VXLAN control plan
ce_evpn_global:
evpn_overlay_enable: disable
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"evpn_overlay_enable": "enable"
}
existing:
description: k/v pairs of existing attributes on the device
returned: always
type: dict
sample: {
"evpn_overlay_enable": "disable"
}
end_state:
description: k/v pairs of end attributes on the interface
returned: always
type: dict
sample: {
"evpn_overlay_enable": "enable"
}
updates:
description: command list sent to the device
returned: always
type: list
sample: [
"evpn-overlay enable",
]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
class EvpnGlobal(object):
"""Manange global configuration of EVPN"""
def __init__(self, argument_spec, ):
self.spec = argument_spec
self.module = None
self.init_module()
# EVPN global configuration parameters
self.overlay_enable = self.module.params['evpn_overlay_enable']
self.commands = list()
self.global_info = dict()
self.conf_exist = False
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init_module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def get_evpn_global_info(self):
""" get current EVPN global configration"""
self.global_info['evpnOverLay'] = 'disable'
flags = list()
exp = " | include evpn-overlay enable"
flags.append(exp)
config = get_config(self.module, flags)
if config:
self.global_info['evpnOverLay'] = 'enable'
def get_existing(self):
"""get existing config"""
self.existing = dict(
evpn_overlay_enable=self.global_info['evpnOverLay'])
def get_proposed(self):
"""get proposed config"""
self.proposed = dict(evpn_overlay_enable=self.overlay_enable)
def get_end_state(self):
"""get end config"""
self.get_evpn_global_info()
self.end_state = dict(
evpn_overlay_enable=self.global_info['evpnOverLay'])
def show_result(self):
""" show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def judge_if_config_exist(self):
""" judge whether configuration has existed"""
if self.overlay_enable == self.global_info['evpnOverLay']:
return True
return False
def config_evnp_global(self):
""" set global EVPN configration"""
if not self.conf_exist:
if self.overlay_enable == 'enable':
self.cli_add_command('evpn-overlay enable')
else:
self.cli_add_command('evpn-overlay enable', True)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def work(self):
"""excute task"""
self.get_evpn_global_info()
self.get_existing()
self.get_proposed()
self.conf_exist = self.judge_if_config_exist()
self.config_evnp_global()
self.get_end_state()
self.show_result()
def main():
"""main function entry"""
argument_spec = dict(
evpn_overlay_enable=dict(
required=True, type='str', choices=['enable', 'disable']),
)
argument_spec.update(ce_argument_spec)
evpn_global = EvpnGlobal(argument_spec)
evpn_global.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
javelinanddart/Canuck | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
GhostThrone/django | tests/foreign_object/tests.py | 5 | 18904 | import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import models
from django.db.models.fields.related import ForeignObject
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.utils import translation
from .models import (
Article, ArticleIdea, ArticleTag, ArticleTranslation, Country, Friendship,
Group, Membership, NewsArticle, Person,
)
# Note that these tests are testing internal implementation details.
# ForeignObject is not part of public API.
class MultiColumnFKTests(TestCase):
def setUp(self):
# Creating countries
self.usa = Country.objects.create(name="United States of America")
self.soviet_union = Country.objects.create(name="Soviet Union")
Person()
# Creating People
self.bob = Person()
self.bob.name = 'Bob'
self.bob.person_country = self.usa
self.bob.save()
self.jim = Person.objects.create(name='Jim', person_country=self.usa)
self.george = Person.objects.create(name='George', person_country=self.usa)
self.jane = Person.objects.create(name='Jane', person_country=self.soviet_union)
self.mark = Person.objects.create(name='Mark', person_country=self.soviet_union)
self.sam = Person.objects.create(name='Sam', person_country=self.soviet_union)
# Creating Groups
self.kgb = Group.objects.create(name='KGB', group_country=self.soviet_union)
self.cia = Group.objects.create(name='CIA', group_country=self.usa)
self.republican = Group.objects.create(name='Republican', group_country=self.usa)
self.democrat = Group.objects.create(name='Democrat', group_country=self.usa)
def test_get_succeeds_on_multicolumn_match(self):
# Membership objects have access to their related Person if both
# country_ids match between them
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
person = membership.person
self.assertEqual((person.id, person.name), (self.bob.id, "Bob"))
def test_get_fails_on_multicolumn_mismatch(self):
# Membership objects returns DoesNotExist error when the there is no
# Person with the same id and country_id
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jane.id, group_id=self.cia.id)
self.assertRaises(Person.DoesNotExist, getattr, membership, 'person')
def test_reverse_query_returns_correct_result(self):
# Creating a valid membership because it has the same country has the person
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
# Creating an invalid membership because it has a different country has the person
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.bob.id,
group_id=self.republican.id)
self.assertQuerysetEqual(
self.bob.membership_set.all(), [
self.cia.id
],
attrgetter("group_id")
)
def test_query_filters_correctly(self):
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(membership_country_id=self.soviet_union.id,
person_id=self.george.id, group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__name__contains='o'), [
self.bob.id
],
attrgetter("person_id")
)
def test_reverse_query_filters_correctly(self):
timemark = datetime.datetime.utcnow()
timedelta = datetime.timedelta(days=1)
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id, date_joined=timemark - timedelta)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gte=timemark), [
'Jim'
],
attrgetter('name')
)
def test_forward_in_lookup_filters_correctly(self):
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=[self.george, self.jim]), [
self.jim.id,
],
attrgetter('person_id')
)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=Person.objects.filter(name='Jim')), [
self.jim.id,
],
attrgetter('person_id')
)
def test_double_nested_query(self):
m1 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
m2 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
Friendship.objects.create(from_friend_country_id=self.usa.id, from_friend_id=self.bob.id,
to_friend_country_id=self.usa.id, to_friend_id=self.jim.id)
self.assertQuerysetEqual(Membership.objects.filter(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(
to_friend__in=Person.objects.all()))),
[m1], lambda x: x)
self.assertQuerysetEqual(Membership.objects.exclude(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(
to_friend__in=Person.objects.all()))),
[m2], lambda x: x)
def test_select_related_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(1):
people = [m.person for m in Membership.objects.select_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.all().order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
people = [
m.person for m in Membership.objects.prefetch_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
membership_sets = [
list(p.membership_set.all())
for p in Person.objects.prefetch_related('membership_set').order_by('pk')]
normal_membership_sets = [list(p.membership_set.all())
for p in Person.objects.order_by('pk')]
self.assertEqual(membership_sets, normal_membership_sets)
def test_m2m_through_forward_returns_valid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.cia)
# Let's check to make sure that it worked. Bob and Jim should be members of the CIA.
self.assertQuerysetEqual(
self.cia.members.all(), [
'Bob',
'Jim'
], attrgetter("name")
)
def test_m2m_through_reverse_returns_valid_members(self):
# We start out by making sure that Bob is in no groups.
self.assertQuerysetEqual(
self.bob.groups.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.bob,
group=self.republican)
# Bob should be in the CIA and a Republican
self.assertQuerysetEqual(
self.bob.groups.all(), [
'CIA',
'Republican'
], attrgetter("name")
)
def test_m2m_through_forward_ignores_invalid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# There should still be no members in CIA
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
def test_m2m_through_reverse_ignores_invalid_members(self):
# We start out by making sure that Jane has no groups.
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# Jane should still not be in any groups
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
def test_m2m_through_on_self_works(self):
self.assertQuerysetEqual(
self.jane.friends.all(),
[]
)
Friendship.objects.create(
from_friend_country=self.jane.person_country, from_friend=self.jane,
to_friend_country=self.george.person_country, to_friend=self.george)
self.assertQuerysetEqual(
self.jane.friends.all(),
['George'], attrgetter("name")
)
def test_m2m_through_on_self_ignores_mismatch_columns(self):
self.assertQuerysetEqual(self.jane.friends.all(), [])
# Note that we use ids instead of instances. This is because instances on ForeignObject
# properties will set all related field off of the given instance
Friendship.objects.create(
from_friend_id=self.jane.id, to_friend_id=self.george.id,
to_friend_country_id=self.jane.person_country_id,
from_friend_country_id=self.george.person_country_id)
self.assertQuerysetEqual(self.jane.friends.all(), [])
def test_prefetch_related_m2m_foward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
members_lists = [list(g.members.all())
for g in Group.objects.prefetch_related('members')]
normal_members_lists = [list(g.members.all()) for g in Group.objects.all()]
self.assertEqual(members_lists, normal_members_lists)
def test_prefetch_related_m2m_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
groups_lists = [list(p.groups.all()) for p in Person.objects.prefetch_related('groups')]
normal_groups_lists = [list(p.groups.all()) for p in Person.objects.all()]
self.assertEqual(groups_lists, normal_groups_lists)
@translation.override('fi')
def test_translations(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
at1_fi = ArticleTranslation(article=a1, lang='fi', title='Otsikko', body='Diipadaapa')
at1_fi.save()
at2_en = ArticleTranslation(article=a1, lang='en', title='Title', body='Lalalalala')
at2_en.save()
self.assertEqual(Article.objects.get(pk=a1.pk).active_translation, at1_fi)
with self.assertNumQueries(1):
fetched = Article.objects.select_related('active_translation').get(
active_translation__title='Otsikko')
self.assertEqual(fetched.active_translation.title, 'Otsikko')
a2 = Article.objects.create(pub_date=datetime.date.today())
at2_fi = ArticleTranslation(article=a2, lang='fi', title='Atsikko', body='Diipadaapa',
abstract='dipad')
at2_fi.save()
a3 = Article.objects.create(pub_date=datetime.date.today())
at3_en = ArticleTranslation(article=a3, lang='en', title='A title', body='lalalalala',
abstract='lala')
at3_en.save()
# Test model initialization with active_translation field.
a3 = Article(id=a3.id, pub_date=a3.pub_date, active_translation=at3_en)
a3.save()
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a3])
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None,
active_translation__pk__isnull=False)),
[a1])
with translation.override('en'):
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a2])
def test_foreign_key_raises_informative_does_not_exist(self):
referrer = ArticleTranslation()
with self.assertRaisesMessage(Article.DoesNotExist, 'ArticleTranslation has no article'):
referrer.article
def test_foreign_key_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
ArticleTag.objects.create(article=a1, name="foo")
self.assertEqual(Article.objects.filter(tag__name="foo").count(), 1)
self.assertEqual(Article.objects.filter(tag__name="bar").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(tags__name="foo")
def test_many_to_many_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
i1 = ArticleIdea.objects.create(name="idea1")
a1.ideas.add(i1)
self.assertEqual(Article.objects.filter(idea_things__name="idea1").count(), 1)
self.assertEqual(Article.objects.filter(idea_things__name="idea2").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(ideas__name="idea1")
@translation.override('fi')
def test_inheritance(self):
na = NewsArticle.objects.create(pub_date=datetime.date.today())
ArticleTranslation.objects.create(
article=na, lang="fi", title="foo", body="bar")
self.assertQuerysetEqual(
NewsArticle.objects.select_related('active_translation'),
[na], lambda x: x
)
with self.assertNumQueries(1):
self.assertEqual(
NewsArticle.objects.select_related(
'active_translation')[0].active_translation.title,
"foo")
@skipUnlessDBFeature('has_bulk_insert')
def test_batch_create_foreign_object(self):
""" See: https://code.djangoproject.com/ticket/21566 """
objs = [Person(name="abcd_%s" % i, person_country=self.usa) for i in range(0, 5)]
Person.objects.bulk_create(objs, 10)
class TestModelCheckTests(SimpleTestCase):
def test_check_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
def test_check_subset_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b', 'c'),
to_fields=('a', 'b', 'c'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
| bsd-3-clause |
macobo/Bomberman | models/Explosion.py | 1 | 1538 | # -*- coding: utf-8 -*-
from misc import *
from Objects import ExplosionTile, Tile
from Player import Player
class Explosion(Tile):
CHAINTIME = 100
EXPLOSIONTIME = 700
def __init__(self, bomb, map):
Tile.__init__(self, None, solid = False)
x, y = bomb.getPos()
self.affected = calculateAffected(bomb, map)
self.time = 0
def affects(self, bomb):
xy = bomb.getRoundPos()
return xy in self.getAffected() and self.time >= self.CHAINTIME
def getAffected(self):
return self.affected
def tick(self, t):
self.time += t
return self.time > self.EXPLOSIONTIME
def __call__(self, size):
mul = min(1.5, 0.4 + 0.6 * self.time / self.EXPLOSIONTIME)
return ExplosionTile(int(round(size * mul)))
def calculateAffected(bomb, map):
" Calculates the (x,y) positions of affected squares "
ax, ay = Player.round(*bomb.getPos())
radius = bomb.radius
affected = set()
for dx, dy in DIRECTIONS:
for i in range(radius+1):
x = ax + i*dx
y = ay + i*dy
if not (0 <= x < map.size) or not (0 <= y < map.size):
continue
objects = map.objectsAt((x,y))
if objects and any(obj.solid and not isinstance(obj, bomb.__class__) for obj in objects):
if any(obj.fragile for obj in objects):
affected.add((x, y))
break
affected.add((x, y))
return affected | mit |
openpli-arm/openembedded | contrib/mtnpatch.py | 45 | 2048 | #!/usr/bin/env python
import sys, os, string, getopt, re
mtncmd = "mtn"
def main(argv = None):
if argv is None:
argv = sys.argv
opts, list = getopt.getopt(sys.argv[1:], ':R')
if len(list) < 1:
print "You must specify a file"
return 2
reverse = False
for o, a in opts:
if o == "-R":
reverse = True
if os.path.exists(list[0]):
input = open(list[0], 'r')
renameFrom = ""
cmd = ""
for line in input:
if len(line) > 0:
if line[0] == '#':
matches = re.search("#\s+(\w+)\s+\"(.*)\"", line)
if matches is not None:
cmd = matches.group(1)
fileName = matches.group(2)
if cmd == "delete":
if reverse:
print "%s add %s" % (mtncmd, fileName)
else:
print "%s drop -e %s" % (mtncmd, fileName)
elif cmd == "add" or cmd == "add_file" or cmd == "add_dir":
if reverse:
print "%s drop -e %s" % (mtncmd, fileName)
else:
print "%s add %s" % (mtncmd, fileName)
elif cmd == "rename":
renameFrom = fileName
elif cmd == "to" and renameFrom != "":
if reverse:
print "%s rename -e %s %s" % (mtncmd, fileName, renameFrom)
else:
print "%s rename -e %s %s" % (mtncmd, renameFrom, fileName)
renameFrom = ""
else:
cmd = ""
if reverse:
print "patch -R -p0 < %s" % list[0]
else:
print "patch -p0 < %s" % list[0]
if __name__ == "__main__":
sys.exit(main())
| mit |
buqing2009/MissionPlanner | Lib/imputil.py | 59 | 26489 | """
Import utilities
Exported classes:
ImportManager Manage the import process
Importer Base class for replacing standard import functions
BuiltinImporter Emulate the import mechanism for builtin and frozen modules
DynLoadSuffixImporter
"""
from warnings import warnpy3k
warnpy3k("the imputil module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
# note: avoid importing non-builtin modules
import imp ### not available in Jython?
import sys
import __builtin__
# for the DirectoryImporter
import struct
import marshal
__all__ = ["ImportManager","Importer","BuiltinImporter"]
_StringType = type('')
_ModuleType = type(sys) ### doesn't work in Jython...
class ImportManager:
"Manage the import process."
def install(self, namespace=vars(__builtin__)):
"Install this ImportManager into the specified namespace."
if isinstance(namespace, _ModuleType):
namespace = vars(namespace)
# Note: we have no notion of "chaining"
# Record the previous import hook, then install our own.
self.previous_importer = namespace['__import__']
self.namespace = namespace
namespace['__import__'] = self._import_hook
### fix this
#namespace['reload'] = self._reload_hook
def uninstall(self):
"Restore the previous import mechanism."
self.namespace['__import__'] = self.previous_importer
def add_suffix(self, suffix, importFunc):
assert hasattr(importFunc, '__call__')
self.fs_imp.add_suffix(suffix, importFunc)
######################################################################
#
# PRIVATE METHODS
#
clsFilesystemImporter = None
def __init__(self, fs_imp=None):
# we're definitely going to be importing something in the future,
# so let's just load the OS-related facilities.
if not _os_stat:
_os_bootstrap()
# This is the Importer that we use for grabbing stuff from the
# filesystem. It defines one more method (import_from_dir) for our use.
if fs_imp is None:
cls = self.clsFilesystemImporter or _FilesystemImporter
fs_imp = cls()
self.fs_imp = fs_imp
# Initialize the set of suffixes that we recognize and import.
# The default will import dynamic-load modules first, followed by
# .py files (or a .py file's cached bytecode)
for desc in imp.get_suffixes():
if desc[2] == imp.C_EXTENSION:
self.add_suffix(desc[0],
DynLoadSuffixImporter(desc).import_file)
self.add_suffix('.py', py_suffix_importer)
def _import_hook(self, fqname, globals=None, locals=None, fromlist=None):
"""Python calls this hook to locate and import a module."""
parts = fqname.split('.')
# determine the context of this import
parent = self._determine_import_context(globals)
# if there is a parent, then its importer should manage this import
if parent:
module = parent.__importer__._do_import(parent, parts, fromlist)
if module:
return module
# has the top module already been imported?
try:
top_module = sys.modules[parts[0]]
except KeyError:
# look for the topmost module
top_module = self._import_top_module(parts[0])
if not top_module:
# the topmost module wasn't found at all.
raise ImportError, 'No module named ' + fqname
# fast-path simple imports
if len(parts) == 1:
if not fromlist:
return top_module
if not top_module.__dict__.get('__ispkg__'):
# __ispkg__ isn't defined (the module was not imported by us),
# or it is zero.
#
# In the former case, there is no way that we could import
# sub-modules that occur in the fromlist (but we can't raise an
# error because it may just be names) because we don't know how
# to deal with packages that were imported by other systems.
#
# In the latter case (__ispkg__ == 0), there can't be any sub-
# modules present, so we can just return.
#
# In both cases, since len(parts) == 1, the top_module is also
# the "bottom" which is the defined return when a fromlist
# exists.
return top_module
importer = top_module.__dict__.get('__importer__')
if importer:
return importer._finish_import(top_module, parts[1:], fromlist)
# Grrr, some people "import os.path" or do "from os.path import ..."
if len(parts) == 2 and hasattr(top_module, parts[1]):
if fromlist:
return getattr(top_module, parts[1])
else:
return top_module
# If the importer does not exist, then we have to bail. A missing
# importer means that something else imported the module, and we have
# no knowledge of how to get sub-modules out of the thing.
raise ImportError, 'No module named ' + fqname
def _determine_import_context(self, globals):
"""Returns the context in which a module should be imported.
The context could be a loaded (package) module and the imported module
will be looked for within that package. The context could also be None,
meaning there is no context -- the module should be looked for as a
"top-level" module.
"""
if not globals or not globals.get('__importer__'):
# globals does not refer to one of our modules or packages. That
# implies there is no relative import context (as far as we are
# concerned), and it should just pick it off the standard path.
return None
# The globals refer to a module or package of ours. It will define
# the context of the new import. Get the module/package fqname.
parent_fqname = globals['__name__']
# if a package is performing the import, then return itself (imports
# refer to pkg contents)
if globals['__ispkg__']:
parent = sys.modules[parent_fqname]
assert globals is parent.__dict__
return parent
i = parent_fqname.rfind('.')
# a module outside of a package has no particular import context
if i == -1:
return None
# if a module in a package is performing the import, then return the
# package (imports refer to siblings)
parent_fqname = parent_fqname[:i]
parent = sys.modules[parent_fqname]
assert parent.__name__ == parent_fqname
return parent
def _import_top_module(self, name):
# scan sys.path looking for a location in the filesystem that contains
# the module, or an Importer object that can import the module.
for item in sys.path:
if isinstance(item, _StringType):
module = self.fs_imp.import_from_dir(item, name)
else:
module = item.import_top(name)
if module:
return module
return None
def _reload_hook(self, module):
"Python calls this hook to reload a module."
# reloading of a module may or may not be possible (depending on the
# importer), but at least we can validate that it's ours to reload
importer = module.__dict__.get('__importer__')
if not importer:
### oops. now what...
pass
# okay. it is using the imputil system, and we must delegate it, but
# we don't know what to do (yet)
### we should blast the module dict and do another get_code(). need to
### flesh this out and add proper docco...
raise SystemError, "reload not yet implemented"
class Importer:
"Base class for replacing standard import functions."
def import_top(self, name):
"Import a top-level module."
return self._import_one(None, name, name)
######################################################################
#
# PRIVATE METHODS
#
def _finish_import(self, top, parts, fromlist):
# if "a.b.c" was provided, then load the ".b.c" portion down from
# below the top-level module.
bottom = self._load_tail(top, parts)
# if the form is "import a.b.c", then return "a"
if not fromlist:
# no fromlist: return the top of the import tree
return top
# the top module was imported by self.
#
# this means that the bottom module was also imported by self (just
# now, or in the past and we fetched it from sys.modules).
#
# since we imported/handled the bottom module, this means that we can
# also handle its fromlist (and reliably use __ispkg__).
# if the bottom node is a package, then (potentially) import some
# modules.
#
# note: if it is not a package, then "fromlist" refers to names in
# the bottom module rather than modules.
# note: for a mix of names and modules in the fromlist, we will
# import all modules and insert those into the namespace of
# the package module. Python will pick up all fromlist names
# from the bottom (package) module; some will be modules that
# we imported and stored in the namespace, others are expected
# to be present already.
if bottom.__ispkg__:
self._import_fromlist(bottom, fromlist)
# if the form is "from a.b import c, d" then return "b"
return bottom
def _import_one(self, parent, modname, fqname):
"Import a single module."
# has the module already been imported?
try:
return sys.modules[fqname]
except KeyError:
pass
# load the module's code, or fetch the module itself
result = self.get_code(parent, modname, fqname)
if result is None:
return None
module = self._process_result(result, fqname)
# insert the module into its parent
if parent:
setattr(parent, modname, module)
return module
def _process_result(self, result, fqname):
ispkg, code, values = result
# did get_code() return an actual module? (rather than a code object)
is_module = isinstance(code, _ModuleType)
# use the returned module, or create a new one to exec code into
if is_module:
module = code
else:
module = imp.new_module(fqname)
### record packages a bit differently??
module.__importer__ = self
module.__ispkg__ = ispkg
# insert additional values into the module (before executing the code)
module.__dict__.update(values)
# the module is almost ready... make it visible
sys.modules[fqname] = module
# execute the code within the module's namespace
if not is_module:
try:
exec code in module.__dict__
except:
if fqname in sys.modules:
del sys.modules[fqname]
raise
# fetch from sys.modules instead of returning module directly.
# also make module's __name__ agree with fqname, in case
# the "exec code in module.__dict__" played games on us.
module = sys.modules[fqname]
module.__name__ = fqname
return module
def _load_tail(self, m, parts):
"""Import the rest of the modules, down from the top-level module.
Returns the last module in the dotted list of modules.
"""
for part in parts:
fqname = "%s.%s" % (m.__name__, part)
m = self._import_one(m, part, fqname)
if not m:
raise ImportError, "No module named " + fqname
return m
def _import_fromlist(self, package, fromlist):
'Import any sub-modules in the "from" list.'
# if '*' is present in the fromlist, then look for the '__all__'
# variable to find additional items (modules) to import.
if '*' in fromlist:
fromlist = list(fromlist) + \
list(package.__dict__.get('__all__', []))
for sub in fromlist:
# if the name is already present, then don't try to import it (it
# might not be a module!).
if sub != '*' and not hasattr(package, sub):
subname = "%s.%s" % (package.__name__, sub)
submod = self._import_one(package, sub, subname)
if not submod:
raise ImportError, "cannot import name " + subname
def _do_import(self, parent, parts, fromlist):
"""Attempt to import the module relative to parent.
This method is used when the import context specifies that <self>
imported the parent module.
"""
top_name = parts[0]
top_fqname = parent.__name__ + '.' + top_name
top_module = self._import_one(parent, top_name, top_fqname)
if not top_module:
# this importer and parent could not find the module (relatively)
return None
return self._finish_import(top_module, parts[1:], fromlist)
######################################################################
#
# METHODS TO OVERRIDE
#
def get_code(self, parent, modname, fqname):
"""Find and retrieve the code for the given module.
parent specifies a parent module to define a context for importing. It
may be None, indicating no particular context for the search.
modname specifies a single module (not dotted) within the parent.
fqname specifies the fully-qualified module name. This is a
(potentially) dotted name from the "root" of the module namespace
down to the modname.
If there is no parent, then modname==fqname.
This method should return None, or a 3-tuple.
* If the module was not found, then None should be returned.
* The first item of the 2- or 3-tuple should be the integer 0 or 1,
specifying whether the module that was found is a package or not.
* The second item is the code object for the module (it will be
executed within the new module's namespace). This item can also
be a fully-loaded module object (e.g. loaded from a shared lib).
* The third item is a dictionary of name/value pairs that will be
inserted into new module before the code object is executed. This
is provided in case the module's code expects certain values (such
as where the module was found). When the second item is a module
object, then these names/values will be inserted *after* the module
has been loaded/initialized.
"""
raise RuntimeError, "get_code not implemented"
######################################################################
#
# Some handy stuff for the Importers
#
# byte-compiled file suffix character
_suffix_char = __debug__ and 'c' or 'o'
# byte-compiled file suffix
_suffix = '.py' + _suffix_char
def _compile(pathname, timestamp):
"""Compile (and cache) a Python source file.
The file specified by <pathname> is compiled to a code object and
returned.
Presuming the appropriate privileges exist, the bytecodes will be
saved back to the filesystem for future imports. The source file's
modification timestamp must be provided as a Long value.
"""
codestring = open(pathname, 'rU').read()
if codestring and codestring[-1] != '\n':
codestring = codestring + '\n'
code = __builtin__.compile(codestring, pathname, 'exec')
# try to cache the compiled code
try:
f = open(pathname + _suffix_char, 'wb')
except IOError:
pass
else:
f.write('\0\0\0\0')
f.write(struct.pack('<I', timestamp))
marshal.dump(code, f)
f.flush()
f.seek(0, 0)
f.write(imp.get_magic())
f.close()
return code
_os_stat = _os_path_join = None
def _os_bootstrap():
"Set up 'os' module replacement functions for use during import bootstrap."
names = sys.builtin_module_names
join = None
if 'posix' in names:
sep = '/'
from posix import stat
elif 'nt' in names:
sep = '\\'
from nt import stat
elif 'dos' in names:
sep = '\\'
from dos import stat
elif 'os2' in names:
sep = '\\'
from os2 import stat
else:
raise ImportError, 'no os specific module found'
if join is None:
def join(a, b, sep=sep):
if a == '':
return b
lastchar = a[-1:]
if lastchar == '/' or lastchar == sep:
return a + b
return a + sep + b
global _os_stat
_os_stat = stat
global _os_path_join
_os_path_join = join
def _os_path_isdir(pathname):
"Local replacement for os.path.isdir()."
try:
s = _os_stat(pathname)
except OSError:
return None
return (s.st_mode & 0170000) == 0040000
def _timestamp(pathname):
"Return the file modification time as a Long."
try:
s = _os_stat(pathname)
except OSError:
return None
return long(s.st_mtime)
######################################################################
#
# Emulate the import mechanism for builtin and frozen modules
#
class BuiltinImporter(Importer):
def get_code(self, parent, modname, fqname):
if parent:
# these modules definitely do not occur within a package context
return None
# look for the module
if imp.is_builtin(modname):
type = imp.C_BUILTIN
elif imp.is_frozen(modname):
type = imp.PY_FROZEN
else:
# not found
return None
# got it. now load and return it.
module = imp.load_module(modname, None, modname, ('', '', type))
return 0, module, { }
######################################################################
#
# Internal importer used for importing from the filesystem
#
class _FilesystemImporter(Importer):
def __init__(self):
self.suffixes = [ ]
def add_suffix(self, suffix, importFunc):
assert hasattr(importFunc, '__call__')
self.suffixes.append((suffix, importFunc))
def import_from_dir(self, dir, fqname):
result = self._import_pathname(_os_path_join(dir, fqname), fqname)
if result:
return self._process_result(result, fqname)
return None
def get_code(self, parent, modname, fqname):
# This importer is never used with an empty parent. Its existence is
# private to the ImportManager. The ImportManager uses the
# import_from_dir() method to import top-level modules/packages.
# This method is only used when we look for a module within a package.
assert parent
for submodule_path in parent.__path__:
code = self._import_pathname(_os_path_join(submodule_path, modname), fqname)
if code is not None:
return code
return self._import_pathname(_os_path_join(parent.__pkgdir__, modname),
fqname)
def _import_pathname(self, pathname, fqname):
if _os_path_isdir(pathname):
result = self._import_pathname(_os_path_join(pathname, '__init__'),
fqname)
if result:
values = result[2]
values['__pkgdir__'] = pathname
values['__path__'] = [ pathname ]
return 1, result[1], values
return None
for suffix, importFunc in self.suffixes:
filename = pathname + suffix
try:
finfo = _os_stat(filename)
except OSError:
pass
else:
return importFunc(filename, finfo, fqname)
return None
######################################################################
#
# SUFFIX-BASED IMPORTERS
#
def py_suffix_importer(filename, finfo, fqname):
file = filename[:-3] + _suffix
t_py = long(finfo[8])
t_pyc = _timestamp(file)
code = None
if t_pyc is not None and t_pyc >= t_py:
f = open(file, 'rb')
if f.read(4) == imp.get_magic():
t = struct.unpack('<I', f.read(4))[0]
if t == t_py:
code = marshal.load(f)
f.close()
if code is None:
file = filename
code = _compile(file, t_py)
return 0, code, { '__file__' : file }
class DynLoadSuffixImporter:
def __init__(self, desc):
self.desc = desc
def import_file(self, filename, finfo, fqname):
fp = open(filename, self.desc[1])
module = imp.load_module(fqname, fp, filename, self.desc)
module.__file__ = filename
return 0, module, { }
######################################################################
def _print_importers():
items = sys.modules.items()
items.sort()
for name, module in items:
if module:
print name, module.__dict__.get('__importer__', '-- no importer')
else:
print name, '-- non-existent module'
def _test_revamp():
ImportManager().install()
sys.path.insert(0, BuiltinImporter())
######################################################################
#
# TODO
#
# from Finn Bock:
# type(sys) is not a module in Jython. what to use instead?
# imp.C_EXTENSION is not in Jython. same for get_suffixes and new_module
#
# given foo.py of:
# import sys
# sys.modules['foo'] = sys
#
# ---- standard import mechanism
# >>> import foo
# >>> foo
# <module 'sys' (built-in)>
#
# ---- revamped import mechanism
# >>> import imputil
# >>> imputil._test_revamp()
# >>> import foo
# >>> foo
# <module 'foo' from 'foo.py'>
#
#
# from MAL:
# should BuiltinImporter exist in sys.path or hard-wired in ImportManager?
# need __path__ processing
# performance
# move chaining to a subclass [gjs: it's been nuked]
# deinstall should be possible
# query mechanism needed: is a specific Importer installed?
# py/pyc/pyo piping hooks to filter/process these files
# wish list:
# distutils importer hooked to list of standard Internet repositories
# module->file location mapper to speed FS-based imports
# relative imports
# keep chaining so that it can play nice with other import hooks
#
# from Gordon:
# push MAL's mapper into sys.path[0] as a cache (hard-coded for apps)
#
# from Guido:
# need to change sys.* references for rexec environs
# need hook for MAL's walk-me-up import strategy, or Tim's absolute strategy
# watch out for sys.modules[...] is None
# flag to force absolute imports? (speeds _determine_import_context and
# checking for a relative module)
# insert names of archives into sys.path (see quote below)
# note: reload does NOT blast module dict
# shift import mechanisms and policies around; provide for hooks, overrides
# (see quote below)
# add get_source stuff
# get_topcode and get_subcode
# CRLF handling in _compile
# race condition in _compile
# refactoring of os.py to deal with _os_bootstrap problem
# any special handling to do for importing a module with a SyntaxError?
# (e.g. clean up the traceback)
# implement "domain" for path-type functionality using pkg namespace
# (rather than FS-names like __path__)
# don't use the word "private"... maybe "internal"
#
#
# Guido's comments on sys.path caching:
#
# We could cache this in a dictionary: the ImportManager can have a
# cache dict mapping pathnames to importer objects, and a separate
# method for coming up with an importer given a pathname that's not yet
# in the cache. The method should do a stat and/or look at the
# extension to decide which importer class to use; you can register new
# importer classes by registering a suffix or a Boolean function, plus a
# class. If you register a new importer class, the cache is zapped.
# The cache is independent from sys.path (but maintained per
# ImportManager instance) so that rearrangements of sys.path do the
# right thing. If a path is dropped from sys.path the corresponding
# cache entry is simply no longer used.
#
# My/Guido's comments on factoring ImportManager and Importer:
#
# > However, we still have a tension occurring here:
# >
# > 1) implementing policy in ImportManager assists in single-point policy
# > changes for app/rexec situations
# > 2) implementing policy in Importer assists in package-private policy
# > changes for normal, operating conditions
# >
# > I'll see if I can sort out a way to do this. Maybe the Importer class will
# > implement the methods (which can be overridden to change policy) by
# > delegating to ImportManager.
#
# Maybe also think about what kind of policies an Importer would be
# likely to want to change. I have a feeling that a lot of the code
# there is actually not so much policy but a *necessity* to get things
# working given the calling conventions for the __import__ hook: whether
# to return the head or tail of a dotted name, or when to do the "finish
# fromlist" stuff.
#
| gpl-3.0 |
shrimpboyho/git.js | emscript/python/2.7.5.1_32bit/Lib/site-packages/win32com/client/combrowse.py | 18 | 20235 | """A utility for browsing COM objects.
Usage:
Command Prompt
Use the command *"python.exe catbrowse.py"*. This will display
display a fairly small, modal dialog.
Pythonwin
Use the "Run Script" menu item, and this will create the browser in an
MDI window. This window can be fully resized.
Details
This module allows browsing of registered Type Libraries, COM categories,
and running COM objects. The display is similar to the Pythonwin object
browser, and displays the objects in a hierarchical window.
Note that this module requires the win32ui (ie, Pythonwin) diestribution to
work.
"""
import win32con
import win32api, win32ui
import sys
import pythoncom
from win32com.client import util
from pywin.tools import browser
class HLIRoot(browser.HLIPythonObject):
def __init__(self, title):
self.name = title
def GetSubList(self):
return [HLIHeadingCategory(), HLI_IEnumMoniker(pythoncom.GetRunningObjectTable().EnumRunning(), "Running Objects"), HLIHeadingRegisterdTypeLibs()]
def __cmp__(self, other):
return cmp(self.name, other.name)
class HLICOM(browser.HLIPythonObject):
def GetText(self):
return self.name
def CalculateIsExpandable(self):
return 1
class HLICLSID(HLICOM):
def __init__(self, myobject, name=None ):
if type(myobject)==type(''):
myobject = pythoncom.MakeIID(myobject)
if name is None:
try:
name = pythoncom.ProgIDFromCLSID(myobject)
except pythoncom.com_error:
name = str(myobject)
name = "IID: " + name
HLICOM.__init__(self, myobject, name)
def CalculateIsExpandable(self):
return 0
def GetSubList(self):
return []
class HLI_Interface(HLICOM):
pass
class HLI_Enum(HLI_Interface):
def GetBitmapColumn(self):
return 0 # Always a folder.
def CalculateIsExpandable(self):
if self.myobject is not None:
rc = len(self.myobject.Next(1))>0
self.myobject.Reset()
else:
rc = 0
return rc
pass
class HLI_IEnumMoniker(HLI_Enum):
def GetSubList(self):
ctx = pythoncom.CreateBindCtx()
ret = []
for mon in util.Enumerator(self.myobject):
ret.append(HLI_IMoniker(mon, mon.GetDisplayName(ctx, None)))
return ret
class HLI_IMoniker(HLI_Interface):
def GetSubList(self):
ret = []
ret.append(browser.MakeHLI(self.myobject.Hash(), "Hash Value"))
subenum = self.myobject.Enum(1)
ret.append(HLI_IEnumMoniker(subenum, "Sub Monikers"))
return ret
class HLIHeadingCategory(HLICOM):
"A tree heading for registered categories"
def GetText(self):
return "Registered Categories"
def GetSubList(self):
catinf=pythoncom.CoCreateInstance(pythoncom.CLSID_StdComponentCategoriesMgr,None,pythoncom.CLSCTX_INPROC,pythoncom.IID_ICatInformation)
enum=util.Enumerator(catinf.EnumCategories())
ret = []
try:
for catid, lcid, desc in enum:
ret.append(HLICategory((catid, lcid, desc)))
except pythoncom.com_error:
# Registered categories occasionally seem to give spurious errors.
pass # Use what we already have.
return ret
class HLICategory(HLICOM):
"An actual Registered Category"
def GetText(self):
desc = self.myobject[2]
if not desc: desc = "(unnamed category)"
return desc
def GetSubList(self):
win32ui.DoWaitCursor(1)
catid, lcid, desc = self.myobject
catinf=pythoncom.CoCreateInstance(pythoncom.CLSID_StdComponentCategoriesMgr,None,pythoncom.CLSCTX_INPROC,pythoncom.IID_ICatInformation)
ret = []
for clsid in util.Enumerator(catinf.EnumClassesOfCategories((catid,),())):
ret.append(HLICLSID(clsid))
win32ui.DoWaitCursor(0)
return ret
class HLIHelpFile(HLICOM):
def CalculateIsExpandable(self):
return 0
def GetText(self):
import os
fname, ctx = self.myobject
base = os.path.split(fname)[1]
return "Help reference in %s" %( base)
def TakeDefaultAction(self):
fname, ctx = self.myobject
if ctx:
cmd = win32con.HELP_CONTEXT
else:
cmd = win32con.HELP_FINDER
win32api.WinHelp(win32ui.GetMainFrame().GetSafeHwnd(), fname, cmd, ctx)
def GetBitmapColumn(self):
return 6
class HLIRegisteredTypeLibrary(HLICOM):
def GetSubList(self):
import os
clsidstr, versionStr = self.myobject
collected = []
helpPath = ""
key = win32api.RegOpenKey(win32con.HKEY_CLASSES_ROOT, "TypeLib\\%s\\%s" % (clsidstr, versionStr))
win32ui.DoWaitCursor(1)
try:
num = 0
while 1:
try:
subKey = win32api.RegEnumKey(key, num)
except win32api.error:
break
hSubKey = win32api.RegOpenKey(key, subKey)
try:
value, typ = win32api.RegQueryValueEx(hSubKey, None)
if typ == win32con.REG_EXPAND_SZ:
value = win32api.ExpandEnvironmentStrings(value)
except win32api.error:
value = ""
if subKey=="HELPDIR":
helpPath = value
elif subKey=="Flags":
flags = value
else:
try:
lcid = int(subKey)
lcidkey = win32api.RegOpenKey(key, subKey)
# Enumerate the platforms
lcidnum = 0
while 1:
try:
platform = win32api.RegEnumKey(lcidkey, lcidnum)
except win32api.error:
break
try:
hplatform = win32api.RegOpenKey(lcidkey, platform)
fname, typ = win32api.RegQueryValueEx(hplatform, None)
if typ == win32con.REG_EXPAND_SZ:
fname = win32api.ExpandEnvironmentStrings(fname)
except win32api.error:
fname = ""
collected.append((lcid, platform, fname))
lcidnum = lcidnum + 1
win32api.RegCloseKey(lcidkey)
except ValueError:
pass
num = num + 1
finally:
win32ui.DoWaitCursor(0)
win32api.RegCloseKey(key)
# Now, loop over my collected objects, adding a TypeLib and a HelpFile
ret = []
# if helpPath: ret.append(browser.MakeHLI(helpPath, "Help Path"))
ret.append(HLICLSID(clsidstr))
for lcid, platform, fname in collected:
extraDescs = []
if platform!="win32":
extraDescs.append(platform)
if lcid:
extraDescs.append("locale=%s"%lcid)
extraDesc = ""
if extraDescs: extraDesc = " (%s)" % ", ".join(extraDescs)
ret.append(HLITypeLib(fname, "Type Library" + extraDesc))
ret.sort()
return ret
class HLITypeLibEntry(HLICOM):
def GetText(self):
tlb, index = self.myobject
name, doc, ctx, helpFile = tlb.GetDocumentation(index)
try:
typedesc = HLITypeKinds[tlb.GetTypeInfoType(index)][1]
except KeyError:
typedesc = "Unknown!"
return name + " - " + typedesc
def GetSubList(self):
tlb, index = self.myobject
name, doc, ctx, helpFile = tlb.GetDocumentation(index)
ret = []
if doc: ret.append(browser.HLIDocString(doc, "Doc"))
if helpFile: ret.append(HLIHelpFile( (helpFile, ctx) ))
return ret
class HLICoClass(HLITypeLibEntry):
def GetSubList(self):
ret = HLITypeLibEntry.GetSubList(self)
tlb, index = self.myobject
typeinfo = tlb.GetTypeInfo(index)
attr = typeinfo.GetTypeAttr()
for j in range(attr[8]):
flags = typeinfo.GetImplTypeFlags(j)
refType = typeinfo.GetRefTypeInfo(typeinfo.GetRefTypeOfImplType(j))
refAttr = refType.GetTypeAttr()
ret.append(browser.MakeHLI(refAttr[0], "Name=%s, Flags = %d" % (refAttr[0], flags)))
return ret
class HLITypeLibMethod(HLITypeLibEntry):
def __init__(self, ob, name = None):
self.entry_type = "Method"
HLITypeLibEntry.__init__(self, ob, name)
def GetSubList(self):
ret = HLITypeLibEntry.GetSubList(self)
tlb, index = self.myobject
typeinfo = tlb.GetTypeInfo(index)
attr = typeinfo.GetTypeAttr()
for i in range(attr[7]):
ret.append(HLITypeLibProperty((typeinfo, i)))
for i in range(attr[6]):
ret.append(HLITypeLibFunction((typeinfo, i)))
return ret
class HLITypeLibEnum(HLITypeLibEntry):
def __init__(self, myitem):
typelib, index = myitem
typeinfo = typelib.GetTypeInfo(index)
self.id = typeinfo.GetVarDesc(index)[0]
name = typeinfo.GetNames(self.id)[0]
HLITypeLibEntry.__init__(self, myitem, name)
def GetText(self):
return self.name + " - Enum/Module"
def GetSubList(self):
ret = []
typelib, index = self.myobject
typeinfo = typelib.GetTypeInfo(index)
attr = typeinfo.GetTypeAttr()
for j in range(attr[7]):
vdesc = typeinfo.GetVarDesc(j)
name = typeinfo.GetNames(vdesc[0])[0]
ret.append(browser.MakeHLI(vdesc[1], name))
return ret
class HLITypeLibProperty(HLICOM):
def __init__(self, myitem):
typeinfo, index = myitem
self.id = typeinfo.GetVarDesc(index)[0]
name = typeinfo.GetNames(self.id)[0]
HLICOM.__init__(self, myitem, name)
def GetText(self):
return self.name + " - Property"
def GetSubList(self):
ret = []
typeinfo, index = self.myobject
names = typeinfo.GetNames(self.id)
if len(names)>1:
ret.append(browser.MakeHLI(names[1:], "Named Params"))
vd = typeinfo.GetVarDesc(index)
ret.append(browser.MakeHLI(self.id, "Dispatch ID"))
ret.append(browser.MakeHLI(vd[1], "Value"))
ret.append(browser.MakeHLI(vd[2], "Elem Desc"))
ret.append(browser.MakeHLI(vd[3], "Var Flags"))
ret.append(browser.MakeHLI(vd[4], "Var Kind"))
return ret
class HLITypeLibFunction(HLICOM):
funckinds = {pythoncom.FUNC_VIRTUAL : "Virtual",
pythoncom.FUNC_PUREVIRTUAL : "Pure Virtual",
pythoncom.FUNC_STATIC : "Static",
pythoncom.FUNC_DISPATCH : "Dispatch",
}
invokekinds = {pythoncom.INVOKE_FUNC: "Function",
pythoncom.INVOKE_PROPERTYGET : "Property Get",
pythoncom.INVOKE_PROPERTYPUT : "Property Put",
pythoncom.INVOKE_PROPERTYPUTREF : "Property Put by reference",
}
funcflags = [(pythoncom.FUNCFLAG_FRESTRICTED, "Restricted"),
(pythoncom.FUNCFLAG_FSOURCE, "Source"),
(pythoncom.FUNCFLAG_FBINDABLE, "Bindable"),
(pythoncom.FUNCFLAG_FREQUESTEDIT, "Request Edit"),
(pythoncom.FUNCFLAG_FDISPLAYBIND, "Display Bind"),
(pythoncom.FUNCFLAG_FDEFAULTBIND, "Default Bind"),
(pythoncom.FUNCFLAG_FHIDDEN, "Hidden"),
(pythoncom.FUNCFLAG_FUSESGETLASTERROR, "Uses GetLastError"),
]
vartypes = {pythoncom.VT_EMPTY: "Empty",
pythoncom.VT_NULL: "NULL",
pythoncom.VT_I2: "Integer 2",
pythoncom.VT_I4: "Integer 4",
pythoncom.VT_R4: "Real 4",
pythoncom.VT_R8: "Real 8",
pythoncom.VT_CY: "CY",
pythoncom.VT_DATE: "Date",
pythoncom.VT_BSTR: "String",
pythoncom.VT_DISPATCH: "IDispatch",
pythoncom.VT_ERROR: "Error",
pythoncom.VT_BOOL: "BOOL",
pythoncom.VT_VARIANT: "Variant",
pythoncom.VT_UNKNOWN: "IUnknown",
pythoncom.VT_DECIMAL: "Decimal",
pythoncom.VT_I1: "Integer 1",
pythoncom.VT_UI1: "Unsigned integer 1",
pythoncom.VT_UI2: "Unsigned integer 2",
pythoncom.VT_UI4: "Unsigned integer 4",
pythoncom.VT_I8: "Integer 8",
pythoncom.VT_UI8: "Unsigned integer 8",
pythoncom.VT_INT: "Integer",
pythoncom.VT_UINT: "Unsigned integer",
pythoncom.VT_VOID: "Void",
pythoncom.VT_HRESULT: "HRESULT",
pythoncom.VT_PTR: "Pointer",
pythoncom.VT_SAFEARRAY: "SafeArray",
pythoncom.VT_CARRAY: "C Array",
pythoncom.VT_USERDEFINED: "User Defined",
pythoncom.VT_LPSTR: "Pointer to string",
pythoncom.VT_LPWSTR: "Pointer to Wide String",
pythoncom.VT_FILETIME: "File time",
pythoncom.VT_BLOB: "Blob",
pythoncom.VT_STREAM: "IStream",
pythoncom.VT_STORAGE: "IStorage",
pythoncom.VT_STORED_OBJECT: "Stored object",
pythoncom.VT_STREAMED_OBJECT: "Streamed object",
pythoncom.VT_BLOB_OBJECT: "Blob object",
pythoncom.VT_CF: "CF",
pythoncom.VT_CLSID: "CLSID",
}
type_flags = [ (pythoncom.VT_VECTOR, "Vector"),
(pythoncom.VT_ARRAY, "Array"),
(pythoncom.VT_BYREF, "ByRef"),
(pythoncom.VT_RESERVED, "Reserved"),
]
def __init__(self, myitem):
typeinfo, index = myitem
self.id = typeinfo.GetFuncDesc(index)[0]
name = typeinfo.GetNames(self.id)[0]
HLICOM.__init__(self, myitem, name)
def GetText(self):
return self.name + " - Function"
def MakeReturnTypeName(self, typ):
justtyp = typ & pythoncom.VT_TYPEMASK
try:
typname = self.vartypes[justtyp]
except KeyError:
typname = "?Bad type?"
for (flag, desc) in self.type_flags:
if flag & typ:
typname = "%s(%s)" % (desc, typname)
return typname
def MakeReturnType(self, returnTypeDesc):
if type(returnTypeDesc)==type(()):
first = returnTypeDesc[0]
result = self.MakeReturnType(first)
if first != pythoncom.VT_USERDEFINED:
result = result + " " + self.MakeReturnType(returnTypeDesc[1])
return result
else:
return self.MakeReturnTypeName(returnTypeDesc)
def GetSubList(self):
ret = []
typeinfo, index = self.myobject
names = typeinfo.GetNames(self.id)
ret.append(browser.MakeHLI(self.id, "Dispatch ID"))
if len(names)>1:
ret.append(browser.MakeHLI(", ".join(names[1:]), "Named Params"))
fd = typeinfo.GetFuncDesc(index)
if fd[1]:
ret.append(browser.MakeHLI(fd[1], "Possible result values"))
if fd[8]:
typ, flags, default = fd[8]
val = self.MakeReturnType(typ)
if flags:
val = "%s (Flags=%d, default=%s)" % (val, flags, default)
ret.append(browser.MakeHLI(val, "Return Type"))
for argDesc in fd[2]:
typ, flags, default = argDesc
val = self.MakeReturnType(typ)
if flags:
val = "%s (Flags=%d)" % (val, flags)
if default is not None:
val = "%s (Default=%s)" % (val, default)
ret.append(browser.MakeHLI(val, "Argument"))
try:
fkind = self.funckinds[fd[3]]
except KeyError:
fkind = "Unknown"
ret.append(browser.MakeHLI(fkind, "Function Kind"))
try:
ikind = self.invokekinds[fd[4]]
except KeyError:
ikind = "Unknown"
ret.append(browser.MakeHLI(ikind, "Invoke Kind"))
# 5 = call conv
# 5 = offset vtbl
ret.append(browser.MakeHLI(fd[6], "Number Optional Params"))
flagDescs = []
for flag, desc in self.funcflags:
if flag & fd[9]:
flagDescs.append(desc)
if flagDescs:
ret.append(browser.MakeHLI(", ".join(flagDescs), "Function Flags"))
return ret
HLITypeKinds = {
pythoncom.TKIND_ENUM : (HLITypeLibEnum, 'Enumeration'),
pythoncom.TKIND_RECORD : (HLITypeLibEntry, 'Record'),
pythoncom.TKIND_MODULE : (HLITypeLibEnum, 'Module'),
pythoncom.TKIND_INTERFACE : (HLITypeLibMethod, 'Interface'),
pythoncom.TKIND_DISPATCH : (HLITypeLibMethod, 'Dispatch'),
pythoncom.TKIND_COCLASS : (HLICoClass, 'CoClass'),
pythoncom.TKIND_ALIAS : (HLITypeLibEntry, 'Alias'),
pythoncom.TKIND_UNION : (HLITypeLibEntry, 'Union')
}
class HLITypeLib(HLICOM):
def GetSubList(self):
ret = []
ret.append(browser.MakeHLI(self.myobject, "Filename"))
try:
tlb = pythoncom.LoadTypeLib(self.myobject)
except pythoncom.com_error:
return [browser.MakeHLI("%s can not be loaded" % self.myobject)]
for i in range(tlb.GetTypeInfoCount()):
try:
ret.append(HLITypeKinds[tlb.GetTypeInfoType(i)][0]( (tlb, i) ) )
except pythoncom.com_error:
ret.append(browser.MakeHLI("The type info can not be loaded!"))
ret.sort()
return ret
class HLIHeadingRegisterdTypeLibs(HLICOM):
"A tree heading for registered type libraries"
def GetText(self):
return "Registered Type Libraries"
def GetSubList(self):
# Explicit lookup in the registry.
ret = []
key = win32api.RegOpenKey(win32con.HKEY_CLASSES_ROOT, "TypeLib")
win32ui.DoWaitCursor(1)
try:
num = 0
while 1:
try:
keyName = win32api.RegEnumKey(key, num)
except win32api.error:
break
# Enumerate all version info
subKey = win32api.RegOpenKey(key, keyName)
name = None
try:
subNum = 0
bestVersion = 0.0
while 1:
try:
versionStr = win32api.RegEnumKey(subKey, subNum)
except win32api.error:
break
try:
versionFlt = float(versionStr)
except ValueError:
versionFlt = 0 # ????
if versionFlt > bestVersion:
bestVersion = versionFlt
name = win32api.RegQueryValue(subKey, versionStr)
subNum = subNum + 1
finally:
win32api.RegCloseKey(subKey)
if name is not None:
ret.append(HLIRegisteredTypeLibrary((keyName, versionStr), name))
num = num + 1
finally:
win32api.RegCloseKey(key)
win32ui.DoWaitCursor(0)
ret.sort()
return ret
def main():
from pywin.tools import hierlist
root = HLIRoot("COM Browser")
if "app" in sys.modules:
# do it in a window
browser.MakeTemplate()
browser.template.OpenObject(root)
else:
# list=hierlist.HierListWithItems( root, win32ui.IDB_BROWSER_HIER )
# dlg=hierlist.HierDialog("COM Browser",list)
dlg = browser.dynamic_browser(root)
dlg.DoModal()
if __name__=='__main__':
main()
ni = pythoncom._GetInterfaceCount()
ng = pythoncom._GetGatewayCount()
if ni or ng:
print "Warning - exiting with %d/%d objects alive" % (ni,ng)
| gpl-2.0 |
Osmose/normandy | recipe-server/normandy/recipes/utils.py | 1 | 1406 | import hashlib
def fraction_to_key(frac):
"""Map from the range [0, 1] to [0, max(sha256)]. The result is a string."""
# SHA 256 hashes are 64-digit hexadecimal numbers. The largest possible SHA 256
# hash is 2^256 - 1
if frac < 0 or frac > 1:
raise ValueError('frac must be between 0 and 1 inclusive (got {})'.format(frac))
mult = 2 ** 256 - 1
in_decimal = int(frac * mult)
assert in_decimal >= 0
hex_digits = hex(in_decimal)[2:] # Strip off leading "0x"
padded = "{:0>64}".format(hex_digits)
# Saturate at 2**256 - 1
if len(padded) > 64:
return 'f' * 64
else:
return padded
def deterministic_sample(rate, inputs):
"""
Deterministically choose True or False based for a set of inputs.
Internally, converts `rate` into a point in the sha256 hash space. If the
hash of `inputs` is less than that point, it returns True.
:param rate: The probability of returning True
:param input: A list of hashable data to feed to decide True or False about
:returns: True with probability `rate` and False otherwise
"""
hasher = hashlib.sha256()
for inp in inputs:
hasher.update(str(inp).encode('utf8'))
sample_point = fraction_to_key(rate)
input_hash = hasher.hexdigest()
assert len(sample_point) == 64
assert len(input_hash) == 64
return input_hash < sample_point
| mpl-2.0 |
immenz/pyload | module/plugins/accounts/FourSharedCom.py | 3 | 1094 | # -*- coding: utf-8 -*-
from module.plugins.Account import Account
class FourSharedCom(Account):
__name__ = "FourSharedCom"
__type__ = "account"
__version__ = "0.04"
__description__ = """FourShared.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
("stickell", "l.stickell@yahoo.it")]
def loadAccountInfo(self, user, req):
# Free mode only for now
return {"premium": False}
def login(self, user, data, req):
req.cj.setCookie("4shared.com", "4langcookie", "en")
res = req.load("http://www.4shared.com/web/login",
post={'login' : user,
'password' : data['password'],
'remember' : "on",
'_remember': "on",
'returnTo' : "http://www.4shared.com/account/home.jsp"},
decode=True)
if 'Please log in to access your 4shared account' in res:
self.wrongPassword()
| gpl-3.0 |
p0psicles/SickRage | lib/unidecode/x053.py | 252 | 4616 | data = (
'Yun ', # 0x00
'Mwun ', # 0x01
'Nay ', # 0x02
'Gai ', # 0x03
'Gai ', # 0x04
'Bao ', # 0x05
'Cong ', # 0x06
'[?] ', # 0x07
'Xiong ', # 0x08
'Peng ', # 0x09
'Ju ', # 0x0a
'Tao ', # 0x0b
'Ge ', # 0x0c
'Pu ', # 0x0d
'An ', # 0x0e
'Pao ', # 0x0f
'Fu ', # 0x10
'Gong ', # 0x11
'Da ', # 0x12
'Jiu ', # 0x13
'Qiong ', # 0x14
'Bi ', # 0x15
'Hua ', # 0x16
'Bei ', # 0x17
'Nao ', # 0x18
'Chi ', # 0x19
'Fang ', # 0x1a
'Jiu ', # 0x1b
'Yi ', # 0x1c
'Za ', # 0x1d
'Jiang ', # 0x1e
'Kang ', # 0x1f
'Jiang ', # 0x20
'Kuang ', # 0x21
'Hu ', # 0x22
'Xia ', # 0x23
'Qu ', # 0x24
'Bian ', # 0x25
'Gui ', # 0x26
'Qie ', # 0x27
'Zang ', # 0x28
'Kuang ', # 0x29
'Fei ', # 0x2a
'Hu ', # 0x2b
'Tou ', # 0x2c
'Gui ', # 0x2d
'Gui ', # 0x2e
'Hui ', # 0x2f
'Dan ', # 0x30
'Gui ', # 0x31
'Lian ', # 0x32
'Lian ', # 0x33
'Suan ', # 0x34
'Du ', # 0x35
'Jiu ', # 0x36
'Qu ', # 0x37
'Xi ', # 0x38
'Pi ', # 0x39
'Qu ', # 0x3a
'Yi ', # 0x3b
'Qia ', # 0x3c
'Yan ', # 0x3d
'Bian ', # 0x3e
'Ni ', # 0x3f
'Qu ', # 0x40
'Shi ', # 0x41
'Xin ', # 0x42
'Qian ', # 0x43
'Nian ', # 0x44
'Sa ', # 0x45
'Zu ', # 0x46
'Sheng ', # 0x47
'Wu ', # 0x48
'Hui ', # 0x49
'Ban ', # 0x4a
'Shi ', # 0x4b
'Xi ', # 0x4c
'Wan ', # 0x4d
'Hua ', # 0x4e
'Xie ', # 0x4f
'Wan ', # 0x50
'Bei ', # 0x51
'Zu ', # 0x52
'Zhuo ', # 0x53
'Xie ', # 0x54
'Dan ', # 0x55
'Mai ', # 0x56
'Nan ', # 0x57
'Dan ', # 0x58
'Ji ', # 0x59
'Bo ', # 0x5a
'Shuai ', # 0x5b
'Bu ', # 0x5c
'Kuang ', # 0x5d
'Bian ', # 0x5e
'Bu ', # 0x5f
'Zhan ', # 0x60
'Qia ', # 0x61
'Lu ', # 0x62
'You ', # 0x63
'Lu ', # 0x64
'Xi ', # 0x65
'Gua ', # 0x66
'Wo ', # 0x67
'Xie ', # 0x68
'Jie ', # 0x69
'Jie ', # 0x6a
'Wei ', # 0x6b
'Ang ', # 0x6c
'Qiong ', # 0x6d
'Zhi ', # 0x6e
'Mao ', # 0x6f
'Yin ', # 0x70
'Wei ', # 0x71
'Shao ', # 0x72
'Ji ', # 0x73
'Que ', # 0x74
'Luan ', # 0x75
'Shi ', # 0x76
'Juan ', # 0x77
'Xie ', # 0x78
'Xu ', # 0x79
'Jin ', # 0x7a
'Que ', # 0x7b
'Wu ', # 0x7c
'Ji ', # 0x7d
'E ', # 0x7e
'Qing ', # 0x7f
'Xi ', # 0x80
'[?] ', # 0x81
'Han ', # 0x82
'Zhan ', # 0x83
'E ', # 0x84
'Ting ', # 0x85
'Li ', # 0x86
'Zhe ', # 0x87
'Han ', # 0x88
'Li ', # 0x89
'Ya ', # 0x8a
'Ya ', # 0x8b
'Yan ', # 0x8c
'She ', # 0x8d
'Zhi ', # 0x8e
'Zha ', # 0x8f
'Pang ', # 0x90
'[?] ', # 0x91
'He ', # 0x92
'Ya ', # 0x93
'Zhi ', # 0x94
'Ce ', # 0x95
'Pang ', # 0x96
'Ti ', # 0x97
'Li ', # 0x98
'She ', # 0x99
'Hou ', # 0x9a
'Ting ', # 0x9b
'Zui ', # 0x9c
'Cuo ', # 0x9d
'Fei ', # 0x9e
'Yuan ', # 0x9f
'Ce ', # 0xa0
'Yuan ', # 0xa1
'Xiang ', # 0xa2
'Yan ', # 0xa3
'Li ', # 0xa4
'Jue ', # 0xa5
'Sha ', # 0xa6
'Dian ', # 0xa7
'Chu ', # 0xa8
'Jiu ', # 0xa9
'Qin ', # 0xaa
'Ao ', # 0xab
'Gui ', # 0xac
'Yan ', # 0xad
'Si ', # 0xae
'Li ', # 0xaf
'Chang ', # 0xb0
'Lan ', # 0xb1
'Li ', # 0xb2
'Yan ', # 0xb3
'Yan ', # 0xb4
'Yuan ', # 0xb5
'Si ', # 0xb6
'Gong ', # 0xb7
'Lin ', # 0xb8
'Qiu ', # 0xb9
'Qu ', # 0xba
'Qu ', # 0xbb
'Uk ', # 0xbc
'Lei ', # 0xbd
'Du ', # 0xbe
'Xian ', # 0xbf
'Zhuan ', # 0xc0
'San ', # 0xc1
'Can ', # 0xc2
'Can ', # 0xc3
'Can ', # 0xc4
'Can ', # 0xc5
'Ai ', # 0xc6
'Dai ', # 0xc7
'You ', # 0xc8
'Cha ', # 0xc9
'Ji ', # 0xca
'You ', # 0xcb
'Shuang ', # 0xcc
'Fan ', # 0xcd
'Shou ', # 0xce
'Guai ', # 0xcf
'Ba ', # 0xd0
'Fa ', # 0xd1
'Ruo ', # 0xd2
'Shi ', # 0xd3
'Shu ', # 0xd4
'Zhuo ', # 0xd5
'Qu ', # 0xd6
'Shou ', # 0xd7
'Bian ', # 0xd8
'Xu ', # 0xd9
'Jia ', # 0xda
'Pan ', # 0xdb
'Sou ', # 0xdc
'Gao ', # 0xdd
'Wei ', # 0xde
'Sou ', # 0xdf
'Die ', # 0xe0
'Rui ', # 0xe1
'Cong ', # 0xe2
'Kou ', # 0xe3
'Gu ', # 0xe4
'Ju ', # 0xe5
'Ling ', # 0xe6
'Gua ', # 0xe7
'Tao ', # 0xe8
'Kou ', # 0xe9
'Zhi ', # 0xea
'Jiao ', # 0xeb
'Zhao ', # 0xec
'Ba ', # 0xed
'Ding ', # 0xee
'Ke ', # 0xef
'Tai ', # 0xf0
'Chi ', # 0xf1
'Shi ', # 0xf2
'You ', # 0xf3
'Qiu ', # 0xf4
'Po ', # 0xf5
'Xie ', # 0xf6
'Hao ', # 0xf7
'Si ', # 0xf8
'Tan ', # 0xf9
'Chi ', # 0xfa
'Le ', # 0xfb
'Diao ', # 0xfc
'Ji ', # 0xfd
'[?] ', # 0xfe
'Hong ', # 0xff
)
| gpl-3.0 |
Winand/pandas | pandas/tests/io/msgpack/test_case.py | 13 | 2740 | # coding: utf-8
from pandas.io.msgpack import packb, unpackb
def check(length, obj):
v = packb(obj)
assert len(v) == length, \
"%r length should be %r but get %r" % (obj, length, len(v))
assert unpackb(v, use_list=0) == obj
def test_1():
for o in [None, True, False, 0, 1, (1 << 6), (1 << 7) - 1, -1,
-((1 << 5) - 1), -(1 << 5)]:
check(1, o)
def test_2():
for o in [1 << 7, (1 << 8) - 1, -((1 << 5) + 1), -(1 << 7)]:
check(2, o)
def test_3():
for o in [1 << 8, (1 << 16) - 1, -((1 << 7) + 1), -(1 << 15)]:
check(3, o)
def test_5():
for o in [1 << 16, (1 << 32) - 1, -((1 << 15) + 1), -(1 << 31)]:
check(5, o)
def test_9():
for o in [1 << 32, (1 << 64) - 1, -((1 << 31) + 1), -(1 << 63), 1.0, 0.1,
-0.1, -1.0]:
check(9, o)
def check_raw(overhead, num):
check(num + overhead, b" " * num)
def test_fixraw():
check_raw(1, 0)
check_raw(1, (1 << 5) - 1)
def test_raw16():
check_raw(3, 1 << 5)
check_raw(3, (1 << 16) - 1)
def test_raw32():
check_raw(5, 1 << 16)
def check_array(overhead, num):
check(num + overhead, (None, ) * num)
def test_fixarray():
check_array(1, 0)
check_array(1, (1 << 4) - 1)
def test_array16():
check_array(3, 1 << 4)
check_array(3, (1 << 16) - 1)
def test_array32():
check_array(5, (1 << 16))
def match(obj, buf):
assert packb(obj) == buf
assert unpackb(buf, use_list=0) == obj
def test_match():
cases = [
(None, b'\xc0'),
(False, b'\xc2'),
(True, b'\xc3'),
(0, b'\x00'),
(127, b'\x7f'),
(128, b'\xcc\x80'),
(256, b'\xcd\x01\x00'),
(-1, b'\xff'),
(-33, b'\xd0\xdf'),
(-129, b'\xd1\xff\x7f'),
({1: 1}, b'\x81\x01\x01'),
(1.0, b"\xcb\x3f\xf0\x00\x00\x00\x00\x00\x00"),
((), b'\x90'),
(tuple(range(15)), (b"\x9f\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09"
b"\x0a\x0b\x0c\x0d\x0e")),
(tuple(range(16)), (b"\xdc\x00\x10\x00\x01\x02\x03\x04\x05\x06\x07"
b"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f")),
({}, b'\x80'),
(dict([(x, x) for x in range(15)]),
(b'\x8f\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\x06\x07'
b'\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e')),
(dict([(x, x) for x in range(16)]),
(b'\xde\x00\x10\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06'
b'\x06\x07\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e'
b'\x0f\x0f')),
]
for v, p in cases:
match(v, p)
def test_unicode():
assert unpackb(packb('foobar'), use_list=1) == b'foobar'
| bsd-3-clause |
jhprinz/openpathsampling | openpathsampling/engines/features/statics.py | 1 | 2708 | import numpy as np
from .shared import StaticContainerStore, StaticContainer
import mdtraj
from openpathsampling.netcdfplus import WeakLRUCache
variables = ['statics']
lazy = ['statics']
storables = ['statics']
dimensions = ['n_atoms', 'n_spatial']
def netcdfplus_init(store):
static_store = StaticContainerStore()
static_store.set_caching(WeakLRUCache(10000))
name = store.prefix + 'statics'
static_store.set_dimension_prefix_store(store)
store.storage.create_store(name, static_store, False)
store.create_variable(
'statics',
'lazyobj.' + name,
description="the snapshot index (0..n_configuration-1) of "
"snapshot '{idx}'.")
@property
def coordinates(snapshot):
"""
Returns
-------
coordinates: numpy.ndarray, shape=(atoms, 3), dtype=numpy.float32
the atomic coordinates of the configuration. The coordinates are
wrapped in a `simtk.unit.Unit`.
"""
if snapshot.statics is not None:
return snapshot.statics.coordinates
return None
@coordinates.setter
def coordinates(self, value):
if value is not None:
sc = StaticContainer(coordinates=value, box_vectors=self.box_vectors)
else:
sc = None
self.statics = sc
@property
def box_vectors(snapshot):
"""
Returns
-------
box_vectors: numpy.ndarray, shape=(3, 3), dtype=numpy.float32
the box_vectors of the configuration. The coordinates are wrapped in a
simtk.unit.Unit.
"""
if snapshot.statics is not None:
return snapshot.statics.box_vectors
return None
@box_vectors.setter
def box_vectors(self, value):
if value is not None:
sc = StaticContainer(box_vectors=value, coordinates=self.coordinates)
else:
sc = None
self.statics = sc
@property
def md(snapshot):
"""
Returns
-------
md : mdtraj.Trajectory
the actual trajectory object. Can be used with all functions from mdtraj
Notes
-----
Rather slow since the topology has to be made each time. Try to avoid it
"""
if snapshot.statics is not None:
n_atoms = snapshot.coordinates.shape[0]
output = np.zeros([1, n_atoms, 3], np.float32)
output[0, :, :] = snapshot.coordinates
return mdtraj.Trajectory(output, snapshot.topology.mdtraj)
@property
def xyz(snapshot):
"""
Returns
-------
xyz : numpy.ndarray, shape=(atoms, 3), dtype=numpy.float32
atomic coordinates without dimensions. Be careful.
"""
import simtk.unit as u
coord = snapshot.coordinates
if type(coord) is u.Quantity:
return coord._value
else:
return coord
| lgpl-2.1 |
tzaffi/git-in-practice-repo | book/lib/python2.7/site-packages/setuptools/command/rotate.py | 125 | 2034 | import os
from setuptools import Command
from setuptools.compat import basestring
from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, basestring):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name()+'*'+pattern
files = glob(os.path.join(self.dist_dir,pattern))
files = [(os.path.getmtime(f),f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t,f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
os.unlink(f)
| mit |
dimara/ganeti | lib/netutils.py | 6 | 20765 | #
#
# Copyright (C) 2010, 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Ganeti network utility module.
This module holds functions that can be used in both daemons (all) and
the command line scripts.
"""
import errno
import os
import re
import socket
import struct
import IN
import logging
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti import vcluster
# Structure definition for getsockopt(SOL_SOCKET, SO_PEERCRED, ...):
# struct ucred { pid_t pid; uid_t uid; gid_t gid; };
#
# The GNU C Library defines gid_t and uid_t to be "unsigned int" and
# pid_t to "int".
#
# IEEE Std 1003.1-2008:
# "nlink_t, uid_t, gid_t, and id_t shall be integer types"
# "blksize_t, pid_t, and ssize_t shall be signed integer types"
_STRUCT_UCRED = "iII"
_STRUCT_UCRED_SIZE = struct.calcsize(_STRUCT_UCRED)
# Workaround a bug in some linux distributions that don't define SO_PEERCRED
try:
# pylint: disable=E1101
_SO_PEERCRED = IN.SO_PEERCRED
except AttributeError:
_SO_PEERCRED = 17
# Regexes used to find IP addresses in the output of ip.
_IP_RE_TEXT = r"[.:a-z0-9]+" # separate for testing purposes
_IP_FAMILY_RE = re.compile(r"(?P<family>inet6?)\s+(?P<ip>%s)/" % _IP_RE_TEXT,
re.IGNORECASE)
# Dict used to convert from a string representing an IP family to an IP
# version
_NAME_TO_IP_VER = {
"inet": constants.IP4_VERSION,
"inet6": constants.IP6_VERSION,
}
def _GetIpAddressesFromIpOutput(ip_output):
"""Parses the output of the ip command and retrieves the IP addresses and
version.
@param ip_output: string containing the output of the ip command;
@rtype: dict; (int, list)
@return: a dict having as keys the IP versions and as values the
corresponding list of addresses found in the IP output.
"""
addr = dict((i, []) for i in _NAME_TO_IP_VER.values())
for row in ip_output.splitlines():
match = _IP_FAMILY_RE.search(row)
if match and IPAddress.IsValid(match.group("ip")):
addr[_NAME_TO_IP_VER[match.group("family")]].append(match.group("ip"))
return addr
def GetSocketCredentials(sock):
"""Returns the credentials of the foreign process connected to a socket.
@param sock: Unix socket
@rtype: tuple; (number, number, number)
@return: The PID, UID and GID of the connected foreign process.
"""
peercred = sock.getsockopt(socket.SOL_SOCKET, _SO_PEERCRED,
_STRUCT_UCRED_SIZE)
return struct.unpack(_STRUCT_UCRED, peercred)
def IsValidInterface(ifname):
"""Validate an interface name.
@type ifname: string
@param ifname: Name of the network interface
@return: boolean indicating whether the interface name is valid or not.
"""
return os.path.exists(utils.PathJoin("/sys/class/net", ifname))
def GetInterfaceIpAddresses(ifname):
"""Returns the IP addresses associated to the interface.
@type ifname: string
@param ifname: Name of the network interface
@return: A dict having for keys the IP version (either
L{constants.IP4_VERSION} or L{constants.IP6_VERSION}) and for
values the lists of IP addresses of the respective version
associated to the interface
"""
result = utils.RunCmd([constants.IP_COMMAND_PATH, "-o", "addr", "show",
ifname])
if result.failed:
logging.error("Error running the ip command while getting the IP"
" addresses of %s", ifname)
return None
return _GetIpAddressesFromIpOutput(result.output)
def GetHostname(name=None, family=None):
"""Returns a Hostname object.
@type name: str
@param name: hostname or None
@type family: int
@param family: AF_INET | AF_INET6 | None
@rtype: L{Hostname}
@return: Hostname object
@raise errors.OpPrereqError: in case of errors in resolving
"""
try:
return Hostname(name=name, family=family)
except errors.ResolverError, err:
raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
(err[0], err[2]), errors.ECODE_RESOLVER)
class Hostname(object):
"""Class implementing resolver and hostname functionality.
"""
_VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
def __init__(self, name=None, family=None):
"""Initialize the host name object.
If the name argument is None, it will use this system's name.
@type family: int
@param family: AF_INET | AF_INET6 | None
@type name: str
@param name: hostname or None
"""
self.name = self.GetFqdn(name)
self.ip = self.GetIP(self.name, family=family)
@classmethod
def GetSysName(cls):
"""Legacy method the get the current system's name.
"""
return cls.GetFqdn()
@classmethod
def GetFqdn(cls, hostname=None):
"""Return fqdn.
If hostname is None the system's fqdn is returned.
@type hostname: str
@param hostname: name to be fqdn'ed
@rtype: str
@return: fqdn of given name, if it exists, unmodified name otherwise
"""
if hostname is None:
virtfqdn = vcluster.GetVirtualHostname()
if virtfqdn:
result = virtfqdn
else:
result = socket.getfqdn()
else:
result = socket.getfqdn(hostname)
return cls.GetNormalizedName(result)
@staticmethod
def GetIP(hostname, family=None):
"""Return IP address of given hostname.
Supports both IPv4 and IPv6.
@type hostname: str
@param hostname: hostname to look up
@type family: int
@param family: AF_INET | AF_INET6 | None
@rtype: str
@return: IP address
@raise errors.ResolverError: in case of errors in resolving
"""
try:
if family in (socket.AF_INET, socket.AF_INET6):
result = socket.getaddrinfo(hostname, None, family)
else:
result = socket.getaddrinfo(hostname, None)
except (socket.gaierror, socket.herror, socket.error), err:
# hostname not found in DNS, or other socket exception in the
# (code, description format)
raise errors.ResolverError(hostname, err.args[0], err.args[1])
# getaddrinfo() returns a list of 5-tupes (family, socktype, proto,
# canonname, sockaddr). We return the first tuple's first address in
# sockaddr
try:
return result[0][4][0]
except IndexError, err:
# we don't have here an actual error code, it's just that the
# data type returned by getaddrinfo is not what we expected;
# let's keep the same format in the exception arguments with a
# dummy error code
raise errors.ResolverError(hostname, 0,
"Unknown error in getaddrinfo(): %s" % err)
@classmethod
def GetNormalizedName(cls, hostname):
"""Validate and normalize the given hostname.
@attention: the validation is a bit more relaxed than the standards
require; most importantly, we allow underscores in names
@raise errors.OpPrereqError: when the name is not valid
"""
hostname = hostname.lower()
if (not cls._VALID_NAME_RE.match(hostname) or
# double-dots, meaning empty label
".." in hostname or
# empty initial label
hostname.startswith(".")):
raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
errors.ECODE_INVAL)
if hostname.endswith("."):
hostname = hostname.rstrip(".")
return hostname
def ValidatePortNumber(port):
"""Returns the validated integer port number if it is valid.
@param port: the port number to be validated
@raise ValueError: if the port is not valid
@rtype: int
@return: the validated value.
"""
try:
port = int(port)
except TypeError:
raise errors.ProgrammerError("ValidatePortNumber called with non-numeric"
" type %s." % port.__class__.__name__)
except ValueError:
raise ValueError("Invalid port value: '%s'" % port)
if not 0 < port < 2 ** 16:
raise ValueError("Invalid port value: '%d'" % port)
return port
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
"""Simple ping implementation using TCP connect(2).
Check if the given IP is reachable by doing attempting a TCP connect
to it.
@type target: str
@param target: the IP to ping
@type port: int
@param port: the port to connect to
@type timeout: int
@param timeout: the timeout on the connection attempt
@type live_port_needed: boolean
@param live_port_needed: whether a closed port will cause the
function to return failure, as if there was a timeout
@type source: str or None
@param source: if specified, will cause the connect to be made
from this specific source address; failures to bind other
than C{EADDRNOTAVAIL} will be ignored
"""
logging.debug("Attempting to reach TCP port %s on target %s with a timeout"
" of %s seconds", port, target, timeout)
try:
family = IPAddress.GetAddressFamily(target)
except errors.IPAddressError, err:
raise errors.ProgrammerError("Family of IP address given in parameter"
" 'target' can't be determined: %s" % err)
sock = socket.socket(family, socket.SOCK_STREAM)
success = False
if source is not None:
try:
sock.bind((source, 0))
except socket.error, err:
if err[0] == errno.EADDRNOTAVAIL:
success = False
sock.settimeout(timeout)
try:
sock.connect((target, port))
sock.close()
success = True
except socket.timeout:
success = False
except socket.error, err:
success = (not live_port_needed) and (err[0] == errno.ECONNREFUSED)
return success
def GetDaemonPort(daemon_name):
"""Get the daemon port for this cluster.
Note that this routine does not read a ganeti-specific file, but
instead uses C{socket.getservbyname} to allow pre-customization of
this parameter outside of Ganeti.
@type daemon_name: string
@param daemon_name: daemon name (in constants.DAEMONS_PORTS)
@rtype: int
"""
if daemon_name not in constants.DAEMONS_PORTS:
raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
(proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
try:
port = socket.getservbyname(daemon_name, proto)
except socket.error:
port = default_port
return port
class IPAddress(object):
"""Class that represents an IP address.
"""
iplen = 0
family = None
loopback_cidr = None
@staticmethod
def _GetIPIntFromString(address):
"""Abstract method to please pylint.
"""
raise NotImplementedError
@classmethod
def IsValid(cls, address):
"""Validate a IP address.
@type address: str
@param address: IP address to be checked
@rtype: bool
@return: True if valid, False otherwise
"""
if cls.family is None:
try:
family = cls.GetAddressFamily(address)
except errors.IPAddressError:
return False
else:
family = cls.family
try:
socket.inet_pton(family, address)
return True
except socket.error:
return False
@classmethod
def ValidateNetmask(cls, netmask):
"""Validate a netmask suffix in CIDR notation.
@type netmask: int
@param netmask: netmask suffix to validate
@rtype: bool
@return: True if valid, False otherwise
"""
assert (isinstance(netmask, (int, long)))
return 0 < netmask <= cls.iplen
@classmethod
def Own(cls, address):
"""Check if the current host has the the given IP address.
This is done by trying to bind the given address. We return True if we
succeed or false if a socket.error is raised.
@type address: str
@param address: IP address to be checked
@rtype: bool
@return: True if we own the address, False otherwise
"""
if cls.family is None:
try:
family = cls.GetAddressFamily(address)
except errors.IPAddressError:
return False
else:
family = cls.family
s = socket.socket(family, socket.SOCK_DGRAM)
success = False
try:
try:
s.bind((address, 0))
success = True
except socket.error:
success = False
finally:
s.close()
return success
@classmethod
def InNetwork(cls, cidr, address):
"""Determine whether an address is within a network.
@type cidr: string
@param cidr: Network in CIDR notation, e.g. '192.0.2.0/24', '2001:db8::/64'
@type address: str
@param address: IP address
@rtype: bool
@return: True if address is in cidr, False otherwise
"""
address_int = cls._GetIPIntFromString(address)
subnet = cidr.split("/")
assert len(subnet) == 2
try:
prefix = int(subnet[1])
except ValueError:
return False
assert 0 <= prefix <= cls.iplen
target_int = cls._GetIPIntFromString(subnet[0])
# Convert prefix netmask to integer value of netmask
netmask_int = (2 ** cls.iplen) - 1 ^ ((2 ** cls.iplen) - 1 >> prefix)
# Calculate hostmask
hostmask_int = netmask_int ^ (2 ** cls.iplen) - 1
# Calculate network address by and'ing netmask
network_int = target_int & netmask_int
# Calculate broadcast address by or'ing hostmask
broadcast_int = target_int | hostmask_int
return network_int <= address_int <= broadcast_int
@staticmethod
def GetAddressFamily(address):
"""Get the address family of the given address.
@type address: str
@param address: ip address whose family will be returned
@rtype: int
@return: C{socket.AF_INET} or C{socket.AF_INET6}
@raise errors.GenericError: for invalid addresses
"""
try:
return IP4Address(address).family
except errors.IPAddressError:
pass
try:
return IP6Address(address).family
except errors.IPAddressError:
pass
raise errors.IPAddressError("Invalid address '%s'" % address)
@staticmethod
def GetVersionFromAddressFamily(family):
"""Convert an IP address family to the corresponding IP version.
@type family: int
@param family: IP address family, one of socket.AF_INET or socket.AF_INET6
@return: an int containing the IP version, one of L{constants.IP4_VERSION}
or L{constants.IP6_VERSION}
@raise errors.ProgrammerError: for unknown families
"""
if family == socket.AF_INET:
return constants.IP4_VERSION
elif family == socket.AF_INET6:
return constants.IP6_VERSION
raise errors.ProgrammerError("%s is not a valid IP address family" % family)
@staticmethod
def GetAddressFamilyFromVersion(version):
"""Convert an IP version to the corresponding IP address family.
@type version: int
@param version: IP version, one of L{constants.IP4_VERSION} or
L{constants.IP6_VERSION}
@return: an int containing the IP address family, one of C{socket.AF_INET}
or C{socket.AF_INET6}
@raise errors.ProgrammerError: for unknown IP versions
"""
if version == constants.IP4_VERSION:
return socket.AF_INET
elif version == constants.IP6_VERSION:
return socket.AF_INET6
raise errors.ProgrammerError("%s is not a valid IP version" % version)
@staticmethod
def GetClassFromIpVersion(version):
"""Return the IPAddress subclass for the given IP version.
@type version: int
@param version: IP version, one of L{constants.IP4_VERSION} or
L{constants.IP6_VERSION}
@return: a subclass of L{netutils.IPAddress}
@raise errors.ProgrammerError: for unknowo IP versions
"""
if version == constants.IP4_VERSION:
return IP4Address
elif version == constants.IP6_VERSION:
return IP6Address
raise errors.ProgrammerError("%s is not a valid IP version" % version)
@staticmethod
def GetClassFromIpFamily(family):
"""Return the IPAddress subclass for the given IP family.
@param family: IP family (one of C{socket.AF_INET} or C{socket.AF_INET6}
@return: a subclass of L{netutils.IPAddress}
@raise errors.ProgrammerError: for unknowo IP versions
"""
return IPAddress.GetClassFromIpVersion(
IPAddress.GetVersionFromAddressFamily(family))
@classmethod
def IsLoopback(cls, address):
"""Determine whether it is a loopback address.
@type address: str
@param address: IP address to be checked
@rtype: bool
@return: True if loopback, False otherwise
"""
try:
return cls.InNetwork(cls.loopback_cidr, address)
except errors.IPAddressError:
return False
class IP4Address(IPAddress):
"""IPv4 address class.
"""
iplen = 32
family = socket.AF_INET
loopback_cidr = "127.0.0.0/8"
def __init__(self, address):
"""Constructor for IPv4 address.
@type address: str
@param address: IP address
@raises errors.IPAddressError: if address invalid
"""
IPAddress.__init__(self)
if not self.IsValid(address):
raise errors.IPAddressError("IPv4 Address %s invalid" % address)
self.address = address
@staticmethod
def _GetIPIntFromString(address):
"""Get integer value of IPv4 address.
@type address: str
@param address: IPv6 address
@rtype: int
@return: integer value of given IP address
"""
address_int = 0
parts = address.split(".")
assert len(parts) == 4
for part in parts:
address_int = (address_int << 8) | int(part)
return address_int
class IP6Address(IPAddress):
"""IPv6 address class.
"""
iplen = 128
family = socket.AF_INET6
loopback_cidr = "::1/128"
def __init__(self, address):
"""Constructor for IPv6 address.
@type address: str
@param address: IP address
@raises errors.IPAddressError: if address invalid
"""
IPAddress.__init__(self)
if not self.IsValid(address):
raise errors.IPAddressError("IPv6 Address [%s] invalid" % address)
self.address = address
@staticmethod
def _GetIPIntFromString(address):
"""Get integer value of IPv6 address.
@type address: str
@param address: IPv6 address
@rtype: int
@return: integer value of given IP address
"""
doublecolons = address.count("::")
assert not doublecolons > 1
if doublecolons == 1:
# We have a shorthand address, expand it
parts = []
twoparts = address.split("::")
sep = len(twoparts[0].split(":")) + len(twoparts[1].split(":"))
parts = twoparts[0].split(":")
parts.extend(["0"] * (8 - sep))
parts += twoparts[1].split(":")
else:
parts = address.split(":")
address_int = 0
for part in parts:
address_int = (address_int << 16) + int(part or "0", 16)
return address_int
def FormatAddress(address, family=None):
"""Format a socket address
@type address: family specific (usually tuple)
@param address: address, as reported by this class
@type family: integer
@param family: socket family (one of socket.AF_*) or None
"""
if family is None:
try:
family = IPAddress.GetAddressFamily(address[0])
except errors.IPAddressError:
raise errors.ParameterError(address)
if family == socket.AF_UNIX and len(address) == 3:
return "pid=%s, uid=%s, gid=%s" % address
if family in (socket.AF_INET, socket.AF_INET6) and len(address) == 2:
host, port = address
if family == socket.AF_INET6:
res = "[%s]" % host
else:
res = host
if port is not None:
res += ":%s" % port
return res
raise errors.ParameterError(family, address)
| bsd-2-clause |
Krossom/python-for-android | python3-alpha/python3-src/Lib/idlelib/Bindings.py | 130 | 3295 | """Define the menu contents, hotkeys, and event bindings.
There is additional configuration information in the EditorWindow class (and
subclasses): the menus are created there based on the menu_specs (class)
variable, and menus not created are silently skipped in the code here. This
makes it possible, for example, to define a Debug menu which is only present in
the PythonShell window, and a Format menu which is only present in the Editor
windows.
"""
import sys
from idlelib.configHandler import idleConf
from idlelib import macosxSupport
menudefs = [
# underscore prefixes character to underscore
('file', [
('_New Window', '<<open-new-window>>'),
('_Open...', '<<open-window-from-file>>'),
('Open _Module...', '<<open-module>>'),
('Class _Browser', '<<open-class-browser>>'),
('_Path Browser', '<<open-path-browser>>'),
None,
('_Save', '<<save-window>>'),
('Save _As...', '<<save-window-as-file>>'),
('Save Cop_y As...', '<<save-copy-of-window-as-file>>'),
None,
('Prin_t Window', '<<print-window>>'),
None,
('_Close', '<<close-window>>'),
('E_xit', '<<close-all-windows>>'),
]),
('edit', [
('_Undo', '<<undo>>'),
('_Redo', '<<redo>>'),
None,
('Cu_t', '<<cut>>'),
('_Copy', '<<copy>>'),
('_Paste', '<<paste>>'),
('Select _All', '<<select-all>>'),
None,
('_Find...', '<<find>>'),
('Find A_gain', '<<find-again>>'),
('Find _Selection', '<<find-selection>>'),
('Find in Files...', '<<find-in-files>>'),
('R_eplace...', '<<replace>>'),
('Go to _Line', '<<goto-line>>'),
]),
('format', [
('_Indent Region', '<<indent-region>>'),
('_Dedent Region', '<<dedent-region>>'),
('Comment _Out Region', '<<comment-region>>'),
('U_ncomment Region', '<<uncomment-region>>'),
('Tabify Region', '<<tabify-region>>'),
('Untabify Region', '<<untabify-region>>'),
('Toggle Tabs', '<<toggle-tabs>>'),
('New Indent Width', '<<change-indentwidth>>'),
]),
('run', [
('Python Shell', '<<open-python-shell>>'),
]),
('shell', [
('_View Last Restart', '<<view-restart>>'),
('_Restart Shell', '<<restart-shell>>'),
]),
('debug', [
('_Go to File/Line', '<<goto-file-line>>'),
('!_Debugger', '<<toggle-debugger>>'),
('_Stack Viewer', '<<open-stack-viewer>>'),
('!_Auto-open Stack Viewer', '<<toggle-jit-stack-viewer>>'),
]),
('options', [
('_Configure IDLE...', '<<open-config-dialog>>'),
None,
]),
('help', [
('_About IDLE', '<<about-idle>>'),
None,
('_IDLE Help', '<<help>>'),
('Python _Docs', '<<python-docs>>'),
]),
]
if macosxSupport.runningAsOSXApp():
# Running as a proper MacOS application bundle. This block restructures
# the menus a little to make them conform better to the HIG.
quitItem = menudefs[0][1][-1]
closeItem = menudefs[0][1][-2]
# Remove the last 3 items of the file menu: a separator, close window and
# quit. Close window will be reinserted just above the save item, where
# it should be according to the HIG. Quit is in the application menu.
del menudefs[0][1][-3:]
menudefs[0][1].insert(6, closeItem)
# Remove the 'About' entry from the help menu, it is in the application
# menu
del menudefs[-1][1][0:2]
default_keydefs = idleConf.GetCurrentKeySet()
del sys
| apache-2.0 |
rednach/krill | test/test_macroresolver.py | 17 | 8483 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
from shinken.macroresolver import MacroResolver
from shinken.commandcall import CommandCall
from shinken.objects import Command
class TestMacroResolver(ShinkenTest):
# setUp is inherited from ShinkenTest
def setUp(self):
self.setup_with_file('etc/shinken_macroresolver.cfg')
def get_mr(self):
mr = MacroResolver()
mr.init(self.conf)
return mr
def get_hst_svc(self):
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
hst = self.sched.hosts.find_by_name("test_host_0")
return (svc, hst)
def test_resolv_simple(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
com = mr.resolve_command(svc.check_command, data)
print com
self.assertEqual("plugins/test_servicecheck.pl --type=ok --failchance=5% --previous-state=PENDING --state-duration=0 --total-critical-on-host=0 --total-warning-on-host=0 --hostname test_host_0 --servicedesc test_ok_0 --custom custvalue", com)
# Here call with a special macro TOTALHOSTSUP
# but call it as arg. So will need 2 pass in macro resolver
# at last to resolv it.
def test_special_macros(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
hst.state = 'UP'
dummy_call = "special_macro!$TOTALHOSTSUP$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print com
self.assertEqual('plugins/nothing 1', com)
# Here call with a special macro HOSTREALM
def test_special_macros_realm(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
hst.state = 'UP'
dummy_call = "special_macro!$HOSTREALM$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print com
self.assertEqual('plugins/nothing Default', com)
# For output macro we want to delete all illegal macro caracter
def test_illegal_macro_output_chars(self):
"$HOSTOUTPUT$, $HOSTPERFDATA$, $HOSTACKAUTHOR$, $HOSTACKCOMMENT$, $SERVICEOUTPUT$, $SERVICEPERFDATA$, $SERVICEACKAUTHOR$, and $SERVICEACKCOMMENT$ "
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
illegal_macro_output_chars = self.sched.conf.illegal_macro_output_chars
print "Illegal macros caracters:", illegal_macro_output_chars
hst.output = 'monculcestdupoulet'
dummy_call = "special_macro!$HOSTOUTPUT$"
for c in illegal_macro_output_chars:
hst.output = 'monculcestdupoulet' + c
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print com
self.assertEqual('plugins/nothing monculcestdupoulet', com)
def test_env_macros(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
data.append(self.conf)
env = mr.get_env_macros(data)
print "Env:", env
self.assertNotEqual(env, {})
self.assertEqual('test_host_0', env['NAGIOS_HOSTNAME'])
self.assertEqual('0.0', env['NAGIOS_SERVICEPERCENTCHANGE'])
self.assertEqual('custvalue', env['NAGIOS__SERVICECUSTNAME'])
self.assertEqual('gnulinux', env['NAGIOS__HOSTOSTYPE'])
self.assertNotIn('NAGIOS_USER1', env)
def test_resource_file(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = svc.get_data_for_checks()
dummy_call = "special_macro!$USER1$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
self.assertEqual('plugins/nothing plugins', com)
dummy_call = "special_macro!$INTERESTINGVARIABLE$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print "CUCU", com
self.assertEqual('plugins/nothing interestingvalue', com)
# Look for multiple = in lines, should split the first
# and keep others in the macro value
dummy_call = "special_macro!$ANOTHERVALUE$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print "CUCU", com
self.assertEqual('plugins/nothing blabla=toto', com)
# Look at on demand macros
def test_ondemand_macros(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = hst.get_data_for_checks()
hst.state = 'UP'
svc.state = 'UNKNOWN'
# Ok sample host call
dummy_call = "special_macro!$HOSTSTATE:test_host_0$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print com
self.assertEqual('plugins/nothing UP', com)
# Call with a void host name, means : myhost
data = hst.get_data_for_checks()
dummy_call = "special_macro!$HOSTSTATE:$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print com
self.assertEqual('plugins/nothing UP', com)
# Now with a service, for our implicit host state
data = svc.get_data_for_checks()
dummy_call = "special_macro!$HOSTSTATE:test_host_0$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print com
self.assertEqual('plugins/nothing UP', com)
# Now with a service, for our implicit host state
data = svc.get_data_for_checks()
dummy_call = "special_macro!$HOSTSTATE:$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print com
self.assertEqual('plugins/nothing UP', com)
# Now prepare another service
svc2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_another_service")
svc2.output = 'you should not pass'
# Now call this data from our previous service
data = svc.get_data_for_checks()
dummy_call = "special_macro!$SERVICEOUTPUT:test_host_0:test_another_service$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print com
self.assertEqual('plugins/nothing you should not pass', com)
# Ok now with a host implicit way
data = svc.get_data_for_checks()
dummy_call = "special_macro!$SERVICEOUTPUT::test_another_service$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print com
self.assertEqual('plugins/nothing you should not pass', com)
# Look at on demand macros
def test_hostadressX_macros(self):
mr = self.get_mr()
(svc, hst) = self.get_hst_svc()
data = hst.get_data_for_checks()
# Ok sample host call
dummy_call = "special_macro!$HOSTADDRESS6$"
cc = CommandCall(self.conf.commands, dummy_call)
com = mr.resolve_command(cc, data)
print com
self.assertEqual('plugins/nothing ::1', com)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
nelsonag/openmc | tests/regression_tests/track_output/test.py | 10 | 1359 | import glob
import os
from subprocess import call
import shutil
import pytest
from tests.testing_harness import TestHarness
class TrackTestHarness(TestHarness):
def _test_output_created(self):
"""Make sure statepoint.* and track* have been created."""
TestHarness._test_output_created(self)
outputs = glob.glob('track_1_1_*.h5')
assert len(outputs) == 2, 'Expected two track files.'
def _get_results(self):
"""Digest info in the statepoint and return as a string."""
# Run the track-to-vtk conversion script.
call(['../../../scripts/openmc-track-to-vtk', '-o', 'poly'] +
glob.glob('track_1_1_*.h5'))
# Make sure the vtk file was created then return it's contents.
assert os.path.isfile('poly.pvtp'), 'poly.pvtp file not found.'
with open('poly.pvtp', 'r') as fin:
outstr = fin.read()
return outstr
def _cleanup(self):
TestHarness._cleanup(self)
output = glob.glob('track*') + glob.glob('poly*')
for f in output:
if os.path.exists(f):
os.remove(f)
def test_track_output():
# If vtk python module is not available, we can't run track.py so skip this
# test.
vtk = pytest.importorskip('vtk')
harness = TrackTestHarness('statepoint.2.h5')
harness.main()
| mit |
Sutto/cloud-custodian | tests/zpill.py | 1 | 10961 | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import fnmatch
import json
import os
import shutil
import zipfile
from datetime import datetime, timedelta, tzinfo
from distutils.util import strtobool
import boto3
import placebo
from botocore.response import StreamingBody
from placebo import pill
from six import StringIO
from c7n.testing import CustodianTestCore
###########################################################################
# BEGIN PLACEBO MONKEY PATCH
#
# Placebo is effectively abandoned upstream, since mitch went back to work at AWS, irony...
# These monkeypatch patches represent fixes on trunk of that repo that have not been released
# into an extant version, we carry them here. We can drop this when this issue is resolved
#
# https://github.com/garnaat/placebo/issues/63
#
# License - Apache 2.0
# Copyright (c) 2015 Mitch Garnaat
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
utc = UTC()
def deserialize(obj):
"""Convert JSON dicts back into objects."""
# Be careful of shallow copy here
target = dict(obj)
class_name = None
if "__class__" in target:
class_name = target.pop("__class__")
if "__module__" in obj:
obj.pop("__module__")
# Use getattr(module, class_name) for custom types if needed
if class_name == "datetime":
return datetime(tzinfo=utc, **target)
if class_name == "StreamingBody":
return StringIO(target["body"])
# Return unrecognized structures as-is
return obj
def serialize(obj):
"""Convert objects into JSON structures."""
# Record class and module information for deserialization
result = {"__class__": obj.__class__.__name__}
try:
result["__module__"] = obj.__module__
except AttributeError:
pass
# Convert objects to dictionary representation based on type
if isinstance(obj, datetime):
result["year"] = obj.year
result["month"] = obj.month
result["day"] = obj.day
result["hour"] = obj.hour
result["minute"] = obj.minute
result["second"] = obj.second
result["microsecond"] = obj.microsecond
return result
if isinstance(obj, StreamingBody):
result["body"] = obj.read()
obj._raw_stream = StringIO(result["body"])
obj._amount_read = 0
return result
if isinstance(obj, bytes):
return obj.decode('utf8')
# Raise a TypeError if the object isn't recognized
raise TypeError("Type not serializable")
placebo.pill.serialize = serialize
placebo.pill.deserialize = deserialize
# END PLACEBO MONKEY
##########################################################################
class BluePill(pill.Pill):
def playback(self):
super(BluePill, self).playback()
self._avail = self.get_available()
def get_available(self):
return set(
[
os.path.join(self.data_path, n)
for n in fnmatch.filter(os.listdir(self.data_path), "*.json")
]
)
def get_next_file_path(self, service, operation):
fn = super(BluePill, self).get_next_file_path(service, operation)
# couple of double use cases
if fn in self._avail:
self._avail.remove(fn)
else:
print("\ndouble use %s\n" % fn)
return fn
def stop(self):
result = super(BluePill, self).stop()
if self._avail:
print("Unused json files \n %s" % ("\n".join(sorted(self._avail))))
return result
class ZippedPill(pill.Pill):
def __init__(self, path, prefix=None, debug=False):
super(ZippedPill, self).__init__(prefix, debug)
self.path = path
self._used = set()
self.archive = None
def playback(self):
self.archive = zipfile.ZipFile(self.path, "r")
self._files = set(self.archive.namelist())
return super(ZippedPill, self).playback()
def record(self):
self.archive = zipfile.ZipFile(self.path, "a", zipfile.ZIP_DEFLATED)
self._files = set()
files = set([n for n in self.archive.namelist() if n.startswith(self.prefix)])
if not files:
return super(ZippedPill, self).record()
# We can't update files in a zip, so copy
self.archive.close()
os.rename(self.path, "%s.tmp" % self.path)
src = zipfile.ZipFile("%s.tmp" % self.path, "r")
self.archive = zipfile.ZipFile(self.path, "w", zipfile.ZIP_DEFLATED)
for n in src.namelist():
if n in files:
continue
self.archive.writestr(n, src.read(n))
os.remove("%s.tmp" % self.path)
return super(ZippedPill, self).record()
def stop(self):
super(ZippedPill, self).stop()
if self.archive:
self.archive.close()
def save_response(self, service, operation, response_data, http_response=200):
filepath = self.get_new_file_path(service, operation)
pill.LOG.debug("save_response: path=%s", filepath)
json_data = {"status_code": http_response, "data": response_data}
self.archive.writestr(
filepath,
json.dumps(json_data, indent=4, default=pill.serialize),
zipfile.ZIP_DEFLATED,
)
self._files.add(filepath)
def load_response(self, service, operation):
response_file = self.get_next_file_path(service, operation)
self._used.add(response_file)
pill.LOG.debug("load_responses: %s", response_file)
response_data = json.loads(
self.archive.read(response_file), object_hook=pill.deserialize
)
return (
pill.FakeHttpResponse(response_data["status_code"]), response_data["data"]
)
def get_new_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_new_file_path: %s", base_name)
index = 0
glob_pattern = os.path.join(self._data_path, base_name + "*")
for file_path in fnmatch.filter(self._files, glob_pattern):
file_name = os.path.basename(file_path)
m = self.filename_re.match(file_name)
if m:
i = int(m.group("index"))
if i > index:
index = i
index += 1
return os.path.join(self._data_path, "{0}_{1}.json".format(base_name, index))
def get_next_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_next_file_path: %s", base_name)
next_file = None
while next_file is None:
index = self._index.setdefault(base_name, 1)
fn = os.path.join(self._data_path, base_name + "_{0}.json".format(index))
fn = fn.replace('\\', '/')
if fn in self._files:
next_file = fn
self._index[base_name] += 1
self._files.add(fn)
elif index != 1:
self._index[base_name] = 1
else:
# we are looking for the first index and it's not here
raise IOError("response file ({0}) not found".format(fn))
return fn
def attach(session, data_path, prefix=None, debug=False):
pill = ZippedPill(data_path, prefix=prefix, debug=debug)
pill.attach(session, prefix)
return pill
class PillTest(CustodianTestCore):
archive_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "placebo_data.zip"
)
placebo_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "placebo"
)
output_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "output"
)
recording = False
def cleanUp(self):
self.pill = None
def record_flight_data(self, test_case, zdata=False, augment=False):
self.recording = True
test_dir = os.path.join(self.placebo_dir, test_case)
if not (zdata or augment):
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.makedirs(test_dir)
session = boto3.Session()
default_region = session.region_name
if not zdata:
pill = placebo.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, debug=True)
pill.record()
self.pill = pill
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
def factory(region=None, assume=None):
if region and region != default_region:
new_session = boto3.Session(region_name=region)
assert not zdata
new_pill = placebo.attach(new_session, test_dir, debug=True)
new_pill.record()
self.addCleanup(new_pill.stop)
return new_session
return session
return factory
def replay_flight_data(self, test_case, zdata=False, region=None):
"""
The `region` argument is to allow functional tests to override the
default region. It is unused when replaying stored data.
"""
if strtobool(os.environ.get('C7N_FUNCTIONAL', 'no')):
self.recording = True
return lambda region=region, assume=None: boto3.Session(region_name=region)
if not zdata:
test_dir = os.path.join(self.placebo_dir, test_case)
if not os.path.exists(test_dir):
raise RuntimeError("Invalid Test Dir for flight data %s" % test_dir)
session = boto3.Session()
if not zdata:
pill = placebo.attach(session, test_dir)
# pill = BluePill()
# pill.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, False)
pill.playback()
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
return lambda region=None, assume=None: session
| apache-2.0 |
Mhynlo/SickRage | lib/hachoir_parser/video/mpeg_ts.py | 72 | 3528 | """
MPEG-2 Transport Stream parser.
Documentation:
- MPEG-2 Transmission
http://erg.abdn.ac.uk/research/future-net/digital-video/mpeg2-trans.html
Author: Victor Stinner
Creation date: 13 january 2007
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError, MissingField,
UInt8, Enum, Bit, Bits, RawBytes)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
class Packet(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
if self["has_error"].value:
self._size = 204*8
else:
self._size = 188*8
PID = {
0x0000: "Program Association Table (PAT)",
0x0001: "Conditional Access Table (CAT)",
# 0x0002..0x000f: reserved
# 0x0010..0x1FFE: network PID, program map PID, elementary PID, etc.
# TODO: Check above values
#0x0044: "video",
#0x0045: "audio",
0x1FFF: "Null packet",
}
def createFields(self):
yield textHandler(UInt8(self, "sync", 8), hexadecimal)
if self["sync"].value != 0x47:
raise ParserError("MPEG-2 TS: Invalid synchronization byte")
yield Bit(self, "has_error")
yield Bit(self, "payload_unit_start")
yield Bit(self, "priority")
yield Enum(textHandler(Bits(self, "pid", 13, "Program identifier"), hexadecimal), self.PID)
yield Bits(self, "scrambling_control", 2)
yield Bit(self, "has_adaptation")
yield Bit(self, "has_payload")
yield Bits(self, "counter", 4)
yield RawBytes(self, "payload", 184)
if self["has_error"].value:
yield RawBytes(self, "error_correction", 16)
def createDescription(self):
text = "Packet: PID %s" % self["pid"].display
if self["payload_unit_start"].value:
text += ", start of payload"
return text
def isValid(self):
if not self["has_payload"].value and not self["has_adaptation"].value:
return u"No payload and no adaptation"
pid = self["pid"].value
if (0x0002 <= pid <= 0x000f) or (0x2000 <= pid):
return u"Invalid program identifier (%s)" % self["pid"].display
return ""
class MPEG_TS(Parser):
PARSER_TAGS = {
"id": "mpeg_ts",
"category": "video",
"file_ext": ("ts",),
"min_size": 188*8,
"description": u"MPEG-2 Transport Stream"
}
endian = BIG_ENDIAN
def validate(self):
sync = self.stream.searchBytes("\x47", 0, 204*8)
if sync is None:
return "Unable to find synchronization byte"
for index in xrange(5):
try:
packet = self["packet[%u]" % index]
except (ParserError, MissingField):
if index and self.eof:
return True
else:
return "Unable to get packet #%u" % index
err = packet.isValid()
if err:
return "Packet #%u is invalid: %s" % (index, err)
return True
def createFields(self):
while not self.eof:
sync = self.stream.searchBytes("\x47", self.current_size, self.current_size+204*8)
if sync is None:
raise ParserError("Unable to find synchronization byte")
elif sync:
yield RawBytes(self, "incomplete_packet[]", (sync-self.current_size)//8)
yield Packet(self, "packet[]")
| gpl-3.0 |
songmonit/CTTMSONLINE_V8 | addons/resource/__openerp__.py | 261 | 1941 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Resource',
'version' : '1.1',
'author' : 'OpenERP SA',
'category' : 'Hidden/Dependency',
'website' : 'http://www.openerp.com',
'description': """
Module for resource management.
===============================
A resource represent something that can be scheduled (a developer on a task or a
work center on manufacturing orders). This module manages a resource calendar
associated to every resource. It also manages the leaves of every resource.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': [
'security/ir.model.access.csv',
'security/resource_security.xml',
'resource_view.xml',
],
'demo': ['resource_demo.xml'],
'test': [
'test/resource.yml',
'test/duplicate_resource.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
40023154/40023154_test2 | static/Brython3.1.1-20150328-091302/Lib/_codecs.py | 526 | 4147 |
def ascii_decode(*args,**kw):
pass
def ascii_encode(*args,**kw):
pass
def charbuffer_encode(*args,**kw):
pass
def charmap_build(*args,**kw):
pass
def charmap_decode(*args,**kw):
pass
def charmap_encode(*args,**kw):
pass
def decode(*args,**kw):
"""decode(obj, [encoding[,errors]]) -> object
Decodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle ValueErrors."""
pass
def encode(*args,**kw):
"""encode(obj, [encoding[,errors]]) -> object
Encodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors."""
pass
def escape_decode(*args,**kw):
pass
def escape_encode(*args,**kw):
pass
def latin_1_decode(*args,**kw):
pass
def latin_1_encode(*args,**kw):
pass
def lookup(encoding):
"""lookup(encoding) -> CodecInfo
Looks up a codec tuple in the Python codec registry and returns
a CodecInfo object."""
if encoding in ('utf-8', 'utf_8'):
from javascript import console
console.log('encoding', encoding)
import encodings.utf_8
return encodings.utf_8.getregentry()
LookupError(encoding)
def lookup_error(*args,**kw):
"""lookup_error(errors) -> handler
Return the error handler for the specified error handling name
or raise a LookupError, if no handler exists under this name."""
pass
def mbcs_decode(*args,**kw):
pass
def mbcs_encode(*args,**kw):
pass
def raw_unicode_escape_decode(*args,**kw):
pass
def raw_unicode_escape_encode(*args,**kw):
pass
def readbuffer_encode(*args,**kw):
pass
def register(*args,**kw):
"""register(search_function)
Register a codec search function. Search functions are expected to take
one argument, the encoding name in all lower case letters, and return
a tuple of functions (encoder, decoder, stream_reader, stream_writer)
(or a CodecInfo object)."""
pass
def register_error(*args,**kw):
"""register_error(errors, handler)
Register the specified error handler under the name
errors. handler must be a callable object, that
will be called with an exception instance containing
information about the location of the encoding/decoding
error and must return a (replacement, new position) tuple."""
pass
def unicode_escape_decode(*args,**kw):
pass
def unicode_escape_encode(*args,**kw):
pass
def unicode_internal_decode(*args,**kw):
pass
def unicode_internal_encode(*args,**kw):
pass
def utf_16_be_decode(*args,**kw):
pass
def utf_16_be_encode(*args,**kw):
pass
def utf_16_decode(*args,**kw):
pass
def utf_16_encode(*args,**kw):
pass
def utf_16_ex_decode(*args,**kw):
pass
def utf_16_le_decode(*args,**kw):
pass
def utf_16_le_encode(*args,**kw):
pass
def utf_32_be_decode(*args,**kw):
pass
def utf_32_be_encode(*args,**kw):
pass
def utf_32_decode(*args,**kw):
pass
def utf_32_encode(*args,**kw):
pass
def utf_32_ex_decode(*args,**kw):
pass
def utf_32_le_decode(*args,**kw):
pass
def utf_32_le_encode(*args,**kw):
pass
def utf_7_decode(*args,**kw):
pass
def utf_7_encode(*args,**kw):
pass
def utf_8_decode(*args,**kw):
pass
def utf_8_encode(*args,**kw):
input=args[0]
if len(args) == 2:
errors = args[1]
else:
errors=kw.get('errors', 'strict')
#todo need to deal with errors, but for now assume all is well.
return (bytes([_f for _f in input], 'utf-8'), len(input))
| gpl-3.0 |
emailhippo/email-verify-api-docs | docs/conf.py | 1 | 8412 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Email Address Verification API documentation build configuration file, created by
# sphinx-quickstart on Sat Aug 29 13:07:24 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Email Address Verification API'
copyright = '2017, Email Hippo Ltd.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0'
# The full version, including alpha/beta/rc tags.
release = '3.0.12'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'EmailAddressVerificationAPIDocumentationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'EmailAddressVerificationAPIDocumentation.tex', 'Email Address Verification API',
'Email Hippo Ltd.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'emailaddressverificationapidocumentation', 'Email Address Verification API',
['Email Hippo Ltd.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'EmailAddressVerificationAPIDocumentation', 'Email Address Verification API',
'Email Hippo Ltd.', 'EmailAddressVerificationAPIDocumentation', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 |
samkuehn/box-python-sdk | test/functional/mock_box/util/json_utils.py | 8 | 2196 | # coding: utf-8
from __future__ import unicode_literals
import codecs
import json
from test.functional.mock_box.db_model.file_model import FileModel
from test.functional.mock_box.db_model import DbModel
def serializer_factory(cls):
class BoxObjectSerializer(json.JSONEncoder):
# pylint:disable=method-hidden
_blacklist = ['metadata', 'files', 'folders']
_cls = cls
def default(self, o):
if hasattr(o, 'isoformat'):
return o.isoformat()
if isinstance(o, DbModel):
if DbModel not in self._cls.__bases__:
self._cls = o.__class__
fields = {}
for field in (x for x in dir(o) if not x.startswith('_') and x not in self._blacklist):
data = o.__getattribute__(field)
if isinstance(o, self._cls):
fields[field] = data
else:
try:
json.dumps(data)
fields[field] = data
except (TypeError, UnicodeDecodeError):
fields[field] = None
object_type = type(o).__name__.lower()[:-5]
fields['type'] = object_type
object_id = object_type + '_id'
if hasattr(o, 'id') and hasattr(o, object_id):
fields['id'] = o.__getattribute__(object_id)
del fields[object_id]
if isinstance(o, FileModel):
del fields['content']
return fields
return super(BoxObjectSerializer, self).default(o)
return BoxObjectSerializer
def loads(string, **kwargs):
return json.loads(string, **kwargs)
def load(file_pointer, **kwargs):
reader = codecs.getreader('utf-8')
return json.load(reader(file_pointer), **kwargs)
def dump(obj, file_pointer, **kwargs):
return json.dump(obj, file_pointer, cls=serializer_factory(obj.__class__), check_circular=False, **kwargs)
def dumps(obj, **kwargs):
return json.dumps(obj, cls=serializer_factory(obj.__class__), check_circular=False, **kwargs)
| apache-2.0 |
ZsTizy/heekscnc | nc/tnc151.py | 34 | 2662 | ################################################################################
# tnc151.py
#
# Post Processor for the Heidenhain TNC151 machine
#
import nc
import iso_modal
import math
################################################################################
class Creator(iso_modal.Creator):
def __init__(self):
iso_modal.Creator.__init__(self)
self.fmt.add_plus = True
self.fmt.add_trailing_zeros = True
self.f.fmt.add_plus = True
self.s.fmt.add_plus = True
self.n = 1
self.waiting_t = None
self.waiting_for_program_begin = False
######## Codes
def SPACE(self): return(' ')
def TOOL(self): return('T%i')
######## Overridden functions
def write_blocknum(self):
self.write(self.BLOCK() % self.n)
self.n += 1
def program_begin(self, id, name=''):
self.waiting_for_program_begin = True
def write_waiting_program_begin(self):
if self.waiting_for_program_begin == True:
self.write('% 123')
self.waiting_for_program_begin = False
def imperial(self):
self.write_waiting_program_begin()
self.write(' G70\n')
self.fmt.number_of_decimal_places = 4
def metric(self):
self.write_waiting_program_begin()
self.write(' G71\n')
self.fmt.number_of_decimal_places = 3
# no tool definition lines wanted
def tool_defn(self, id, name='', params=None):
pass
# no comments wanted
def comment(self, text):
pass
def spindle(self, s, clockwise):
iso_modal.Creator.spindle(self, s, clockwise)
self.write_waiting_tool_change()
def tool_change(self, id):
self.waiting_t = id
def write_waiting_tool_change(self):
if self.waiting_t:
if len(self.g_list) > 0:
self.write_blocknum()
for g in self.g_list:
self.write(self.SPACE() + g)
self.g_list = []
self.write('\n')
self.write_blocknum()
self.write(self.SPACE() + (self.TOOL() % self.waiting_t))
self.write_preps()
self.write_spindle()
self.write_misc()
self.write('\n')
self.t = self.waiting_t
self.waiting_t = None
def workplane(self, id):
pass
################################################################################
nc.creator = Creator()
| bsd-3-clause |
JamesClough/dagology | dagology/algorithms/midpoint_scaling_dimension.py | 1 | 1758 | """
Midpoint Scaling dimension estimator
"""
# Copyright (C) 2016 by
# James Clough <james.clough91@gmail.com>
# All rights reserved.
# BSD license.
__author__ = "\n".join(["James Clough (james.clough91@gmail.com)"])
import networkx as nx
import numpy as np
import dagology as dag
__all__ = ['mpsd']
def sub_interval_sizes(G, a, b, i):
"""
Find the sizes of subintervals of an interval in a DAG
Parameters
----------
G : Networkx DiGraph
a : Node in G
b : Node in G, such that a < b (a precedes b in the graph)
i : Node in G, such that a < i < b
Returns
-------
2-tuple of the number of nodes in the [a,i] interval and the [i,b] interval
"""
I_a = dag.interval(G, a, i)
I_b = dag.interval(G, i, b)
return (I_a.number_of_nodes(), I_b.number_of_nodes())
def mpsd(G):
"""
Calculate the midpoint scaling dimension of a DAG
Parameters
----------
G : Networkx DiGraph
"""
if G.number_of_edges() == 0:
return 0.
LP = nx.dag_longest_path(G)
if len(LP) < 5:
return 0.
u, v = LP[0], LP[-1]
I = dag.interval(G, u, v)
# easy method - just check every item on the midpoint
# hard method - start at the middle and check to see where value first drops
# easy is implemented here for now
max_N_min = 0
max_intervals = None
for i, w in enumerate(LP):
intervals = sub_interval_sizes(I, u, v, w)
N_min = min(intervals)
if N_min > max_N_min:
max_N_min = N_min
max_intervals = intervals
I_total = I.number_of_nodes()
sub_I_total = sum(max_intervals) - 1. # midpoint appears twice
D = np.log2(I_total / sub_I_total)
return (D + 1)
| mit |
ojdo/rivus | replot.py | 2 | 1589 | import glob
import os
from rivus.main import rivus
import sys
def replot(directory):
"""Recreate result figures for all pickled rivus results in directory
Args:
directory: a directory with 1 or multiple pickled rivus instances
Returns:
Nothing
"""
glob_pattern = os.path.join(directory, '*.pgz')
pickle_filenames = glob.glob(glob_pattern)
data_dir = os.path.join('data', os.path.basename(directory).split('-')[0])
# if directory = 'result/moosh' try to find a suitable building shapefile
# in 'data/moosh'
buildings = None
building_filename = os.path.join(data_dir, 'building')
if os.path.exists(building_filename+'.shp'):
buildings = (building_filename, False) # if True, color buildings
# if data/.../to_edge exists, paint it
shapefiles = None
to_edge_filename = os.path.join(data_dir, 'to_edge')
if os.path.exists(to_edge_filename+'.shp'):
shapefiles = [{'name': 'to_edge',
'color': rivus.to_rgb(192, 192, 192),
'shapefile': to_edge_filename,
'zorder': 1,
'linewidth': 0.1}]
for pf in pickle_filenames:
prob = rivus.load(pf)
figure_basename = os.path.splitext(pf)[0]
if buildings:
figure_basename += '_bld'
rivus.result_figures(prob, figure_basename,
buildings=buildings,
shapefiles=shapefiles)
if __name__ == '__main__':
for directory in sys.argv[1:]:
replot(directory)
| gpl-3.0 |
albertyw/albertyw.com | app/tests/test_data.py | 1 | 1618 | import unittest
from app import data, util
class TestProjects(unittest.TestCase):
def setUp(self) -> None:
self.original_cache = util.SHOULD_CACHE
util.SHOULD_CACHE = True
def tearDown(self) -> None:
util.SHOULD_CACHE = self.original_cache
def test_load_from_file(self) -> None:
projects = data.Projects.load_from_file()
self.assertNotEqual(projects.languages, [])
self.assertTrue(len(projects.languages) > 0)
self.assertTrue(len(projects.languages[0].projects) > 0)
self.assertTrue(len(projects.languages[0].projects[0].name) > 0)
self.assertTrue(len(projects.languages[0].projects[0].description) > 0)
def test_get_projects(self) -> None:
projects1 = data.get_projects()
projects2 = data.get_projects()
self.assertEqual(projects1, projects2)
class TestShelf(unittest.TestCase):
def setUp(self) -> None:
self.original_cache = util.SHOULD_CACHE
util.SHOULD_CACHE = True
def tearDown(self) -> None:
util.SHOULD_CACHE = self.original_cache
def test_load_from_file(self) -> None:
shelf = data.Shelf.load_from_file()
self.assertNotEqual(shelf.sections, [])
self.assertNotEqual(shelf.sections[0].name, '')
self.assertNotEqual(shelf.sections[0].items, [])
self.assertNotEqual(shelf.sections[0].items[0].name, '')
self.assertNotEqual(shelf.sections[0].items[0].link, '')
def test_get_shelf(self) -> None:
shelf1 = data.get_shelf()
shelf2 = data.get_shelf()
self.assertEqual(shelf1, shelf2)
| mit |
bgris/ODL_bgris | lib/python3.5/site-packages/scipy/sparse/linalg/isolve/utils.py | 108 | 3840 | from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = []
from warnings import warn
from numpy import asanyarray, asarray, asmatrix, array, matrix, zeros
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator, \
IdentityOperator
_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F',
('f','D'):'D', ('d','f'):'d', ('d','d'):'d',
('d','F'):'D', ('d','D'):'D', ('F','f'):'F',
('F','d'):'D', ('F','F'):'F', ('F','D'):'D',
('D','f'):'D', ('D','d'):'D', ('D','F'):'D',
('D','D'):'D'}
def coerce(x,y):
if x not in 'fdFD':
x = 'd'
if y not in 'fdFD':
y = 'd'
return _coerce_rules[x,y]
def id(x):
return x
def make_system(A, M, x0, b, xtype=None):
"""Make a linear system Ax=b
Parameters
----------
A : LinearOperator
sparse or dense matrix (or any valid input to aslinearoperator)
M : {LinearOperator, Nones}
preconditioner
sparse or dense matrix (or any valid input to aslinearoperator)
x0 : {array_like, None}
initial guess to iterative method
b : array_like
right hand side
xtype : {'f', 'd', 'F', 'D', None}, optional
dtype of the x vector
Returns
-------
(A, M, x, b, postprocess)
A : LinearOperator
matrix of the linear system
M : LinearOperator
preconditioner
x : rank 1 ndarray
initial guess
b : rank 1 ndarray
right hand side
postprocess : function
converts the solution vector to the appropriate
type and dimensions (e.g. (N,1) matrix)
"""
A_ = A
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix, but got shape=%s' % (A.shape,))
N = A.shape[0]
b = asanyarray(b)
if not (b.shape == (N,1) or b.shape == (N,)):
raise ValueError('A and b have incompatible dimensions')
if b.dtype.char not in 'fdFD':
b = b.astype('d') # upcast non-FP types to double
def postprocess(x):
if isinstance(b,matrix):
x = asmatrix(x)
return x.reshape(b.shape)
if xtype is None:
if hasattr(A,'dtype'):
xtype = A.dtype.char
else:
xtype = A.matvec(b).dtype.char
xtype = coerce(xtype, b.dtype.char)
else:
warn('Use of xtype argument is deprecated. '
'Use LinearOperator( ... , dtype=xtype) instead.',
DeprecationWarning)
if xtype == 0:
xtype = b.dtype.char
else:
if xtype not in 'fdFD':
raise ValueError("xtype must be 'f', 'd', 'F', or 'D'")
b = asarray(b,dtype=xtype) # make b the same type as x
b = b.ravel()
if x0 is None:
x = zeros(N, dtype=xtype)
else:
x = array(x0, dtype=xtype)
if not (x.shape == (N,1) or x.shape == (N,)):
raise ValueError('A and x have incompatible dimensions')
x = x.ravel()
# process preconditioner
if M is None:
if hasattr(A_,'psolve'):
psolve = A_.psolve
else:
psolve = id
if hasattr(A_,'rpsolve'):
rpsolve = A_.rpsolve
else:
rpsolve = id
if psolve is id and rpsolve is id:
M = IdentityOperator(shape=A.shape, dtype=A.dtype)
else:
M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
dtype=A.dtype)
else:
M = aslinearoperator(M)
if A.shape != M.shape:
raise ValueError('matrix and preconditioner have different shapes')
return A, M, x, b, postprocess
| gpl-3.0 |
ebu/ebu-tt-live-toolkit | ebu_tt_live/bindings/pyxb_utils.py | 2 | 6824 | """
This file contains those bits and pieces that are necessary to give PyXB extra functionality.
"""
import threading
import logging
from ebu_tt_live.errors import StopBranchIteration
from pyxb.binding.basis import NonElementContent, ElementContent, complexTypeDefinition
from pyxb.exceptions_ import NotComplexContentError
log = logging.getLogger(__name__)
__xml_parsing_context = threading.local()
__xml_parsing_context.parsing = False
def get_xml_parsing_context():
"""
The parsing context is a simple python dictionary that helps tie together semantic rules at parsing time.
For example: making sure that limitedClockTimingtype and fullClockTimingType are instantiated appropriately taking
into account the timeBase attribute on the tt element. In that case when the timeBase element is encountered by the
parser is is added to the parsing context object to help PyXB make the right type in the timingType union.
:return: dict that is te parsing context for the currently running parser
:return: None if not in parsing mode
"""
log.debug('Accessing xml_parsing_context: {}'.format(__xml_parsing_context))
if __xml_parsing_context.parsing is False:
# We are not in parsing mode
return None
return __xml_parsing_context.context
def reset_xml_parsing_context(parsing=False):
log.debug('Resetting xml_parsing_context: {}'.format(__xml_parsing_context))
__xml_parsing_context.context = {}
__xml_parsing_context.parsing = parsing
class xml_parsing_context(object):
"""
This context manager is helpful to inject a thread local parsing context into the XML parser to be able to control
its type choices based on semantic rules. The context manager makes sure the context is renewed every time a new
document is parsed. This prevents unwanted correlation between documents.
"""
def __enter__(self):
reset_xml_parsing_context(True)
def __exit__(self, exc_type, exc_val, exc_tb):
reset_xml_parsing_context()
class RecursiveOperation(object):
"""
A recursive operation can be a validation of the content model, a full- or partial copy of the document tree, the
splicing of two documents together or conversion of one document format to another. This class contains the
generic content iteration logic with hook functions that are meant to be customized by descendant classes.
"""
_filter_criteria = None
_root_element = None
_post_order = None
_children_iterator = None
def __init__(self, root_element, filter=None, post_order=False, children_iterator=None):
"""
This class requires a root element to operate on and an optional filter function to help limit the elements
selected for the operations defined in the hook functions thereby reducing their complexity and improving
general processing speeds.
:param root_element: Practically the document root but could be any PyXB type instance that has children
:param filter: A function that filters the elements selected for processing.
:param post_order(boolean): Post order processing during the traversal. Defaults to False(pre-order).
:param children_iterator: PyXB has multiple ways it likes to traverse the structure. It can be based on the
order described in the XSD or it can be the order described by the document that is using that schema. The
value of this parameter will be resolved on matching complexTypeDefinition objects and called to give children
in the specified order.
"""
if filter is None:
self._filter_criteria = lambda value, element: True
else:
self._filter_criteria = filter
self._root_element = root_element
self._post_order = post_order
if children_iterator is None:
self._children_iterator = 'orderedContent'
else:
self._children_iterator = children_iterator
def _process_children(self, value, element=None, proc_value=None, **kwargs):
"""
Recursive step
:param element:
:param dataset:
:return:
"""
output = []
if isinstance(value, complexTypeDefinition):
try:
children = getattr(value, self._children_iterator)()
except NotComplexContentError:
return output
for item in children:
try:
proc_elem = self._recursive_step(value=item.value, element=item, parent_binding=value, **kwargs)
if proc_elem is not None:
output.append(proc_elem)
except StopBranchIteration:
# Moving on...
continue
return output
def _recursive_step(self, value, element, parent_binding=None, **kwargs):
children = []
proc_value = None
element_value = value
if (element is not None and isinstance(element, ElementContent) or element is None) \
and self._filter_criteria(value, element) is True:
self._before_element(value=element_value, element=element, parent_binding=parent_binding, **kwargs)
if self._post_order:
children = self._process_children(value=element_value, element=element, **kwargs)
proc_value = self._process_element(
value=element_value, element=element, parent_binding=parent_binding,proc_value=proc_value,
children=children, **kwargs)
if not self._post_order:
proc_value = self._process_element(
value=element_value, element=element, parent_binding=parent_binding, **kwargs)
children = self._process_children(value=element_value, element=element, proc_value=proc_value, **kwargs)
self._after_element(value=element_value, element=element, parent_binding=parent_binding, proc_value=proc_value, children=children, **kwargs)
else:
proc_value = self._process_non_element(
value=element_value, non_element=element, parent_binding=parent_binding, **kwargs)
return proc_value
def proceed(self, **kwargs):
return self._recursive_step(value=self._root_element, element=None, **kwargs)
def _before_element(self, value, element=None, parent_binding=None, **kwargs):
return None
def _process_element(self, value, element=None, parent_binding=None, **kwargs):
raise NotImplementedError()
def _process_non_element(self, value, non_element, parent_binding=None, **kwargs):
raise NotImplementedError()
def _after_element(self, value, element=None, parent_binding=None, **kwargs):
return None
| bsd-3-clause |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/test/test_pty.py | 3 | 3106 | import pty, os, sys
from test_support import verbose, TestFailed, TestSkipped
TEST_STRING_1 = "I wish to buy a fish license.\n"
TEST_STRING_2 = "For my pet fish, Eric.\n"
if verbose:
def debug(msg):
print msg
else:
def debug(msg):
pass
# Marginal testing of pty suite. Cannot do extensive 'do or fail' testing
# because pty code is not too portable.
try:
debug("Calling master_open()")
master_fd, slave_name = pty.master_open()
debug("Got master_fd '%d', slave_name '%s'"%(master_fd, slave_name))
debug("Calling slave_open(%s)"%`slave_name`)
slave_fd = pty.slave_open(slave_name)
debug("Got slave_fd '%d'"%slave_fd)
except OSError:
# " An optional feature could not be imported " ... ?
raise TestSkipped, "Pseudo-terminals (seemingly) not functional."
if not os.isatty(slave_fd):
raise TestFailed, "slave_fd is not a tty"
# IRIX apparently turns \n into \r\n. Allow that, but avoid allowing other
# differences (like extra whitespace, trailing garbage, etc.)
debug("Writing to slave_fd")
os.write(slave_fd, TEST_STRING_1)
s1 = os.read(master_fd, 1024)
if s1[-2:] == "\r\n":
s1 = s1[:-2] + "\n"
sys.stdout.write(s1)
debug("Writing chunked output")
os.write(slave_fd, TEST_STRING_2[:5])
os.write(slave_fd, TEST_STRING_2[5:])
s2 = os.read(master_fd, 1024)
if s2[-2:] == "\r\n":
s2 = s2[:-2] + "\n"
sys.stdout.write(s2)
os.close(slave_fd)
os.close(master_fd)
# basic pty passed.
debug("calling pty.fork()")
pid, master_fd = pty.fork()
if pid == pty.CHILD:
# stdout should be connected to a tty.
if not os.isatty(1):
debug("Child's fd 1 is not a tty?!")
os._exit(3)
# After pty.fork(), the child should already be a session leader.
# (on those systems that have that concept.)
debug("In child, calling os.setsid()")
try:
os.setsid()
except OSError:
# Good, we already were session leader
debug("Good: OSError was raised.")
pass
except AttributeError:
# Have pty, but not setsid() ?
debug("No setsid() available ?")
pass
except:
# We don't want this error to propagate, escaping the call to
# os._exit() and causing very peculiar behavior in the calling
# regrtest.py !
# Note: could add traceback printing here.
debug("An unexpected error was raised.")
os._exit(1)
else:
debug("os.setsid() succeeded! (bad!)")
os._exit(2)
os._exit(4)
else:
debug("Waiting for child (%d) to finish."%pid)
(pid, status) = os.waitpid(pid, 0)
res = status / 256
debug("Child (%d) exited with status %d (%d)."%(pid, res, status))
if res == 1:
raise TestFailed, "Child raised an unexpected exception in os.setsid()"
elif res == 2:
raise TestFailed, "pty.fork() failed to make child a session leader."
elif res == 3:
raise TestFailed, "Child spawned by pty.fork() did not have a tty as stdout"
elif res != 4:
raise TestFailed, "pty.fork() failed for unknown reasons."
os.close(master_fd)
# pty.fork() passed.
| mit |
kurikaesu/arsenalsuite | cpp/lib/PyQt4/examples/layouts/borderlayout.py | 20 | 8069 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt4 import QtCore, QtGui
class ItemWrapper(object):
def __init__(self, i, p):
self.item = i
self.position = p
class BorderLayout(QtGui.QLayout):
West, North, South, East, Center = range(5)
MinimumSize, SizeHint = range(2)
def __init__(self, parent=None, margin=0, spacing=-1):
super(BorderLayout, self).__init__(parent)
self.setMargin(margin)
self.setSpacing(spacing)
self.list = []
def __del__(self):
l = self.takeAt(0)
while l:
l = self.takeAt(0)
def addItem(self, item):
self.add(item, BorderLayout.West)
def addWidget(self, widget, position):
self.add(QtGui.QWidgetItem(widget), position)
def expandingDirections(self):
return QtCore.Qt.Horizontal | QtCore.Qt.Vertical
def hasHeightForWidth(self):
return False
def count(self):
return len(self.list)
def itemAt(self, index):
if index < len(self.list):
return self.list[index].item
return None
def minimumSize(self):
return self.calculateSize(BorderLayout.MinimumSize)
def setGeometry(self, rect):
center = None
eastWidth = 0
westWidth = 0
northHeight = 0
southHeight = 0
centerHeight = 0
super(BorderLayout, self).setGeometry(rect)
for wrapper in self.list:
item = wrapper.item
position = wrapper.position
if position == BorderLayout.North:
item.setGeometry(QtCore.QRect(rect.x(), northHeight,
rect.width(), item.sizeHint().height()))
northHeight += item.geometry().height() + self.spacing()
elif position == BorderLayout.South:
item.setGeometry(QtCore.QRect(item.geometry().x(),
item.geometry().y(), rect.width(),
item.sizeHint().height()))
southHeight += item.geometry().height() + self.spacing()
item.setGeometry(QtCore.QRect(rect.x(),
rect.y() + rect.height() - southHeight + self.spacing(),
item.geometry().width(), item.geometry().height()))
elif position == BorderLayout.Center:
center = wrapper
centerHeight = rect.height() - northHeight - southHeight
for wrapper in self.list:
item = wrapper.item
position = wrapper.position
if position == BorderLayout.West:
item.setGeometry(QtCore.QRect(rect.x() + westWidth,
northHeight, item.sizeHint().width(), centerHeight))
westWidth += item.geometry().width() + self.spacing()
elif position == BorderLayout.East:
item.setGeometry(QtCore.QRect(item.geometry().x(),
item.geometry().y(), item.sizeHint().width(),
centerHeight))
eastWidth += item.geometry().width() + self.spacing()
item.setGeometry(QtCore.QRect(rect.x() + rect.width() - eastWidth + self.spacing(),
northHeight, item.geometry().width(),
item.geometry().height()))
if center:
center.item.setGeometry(QtCore.QRect(westWidth, northHeight,
rect.width() - eastWidth - westWidth, centerHeight))
def sizeHint(self):
return self.calculateSize(BorderLayout.SizeHint)
def takeAt(self, index):
if index >= 0 and index < len(self.list):
layoutStruct = self.list.pop(index)
return layoutStruct.item
return None
def add(self, item, position):
self.list.append(ItemWrapper(item, position))
def calculateSize(self, sizeType):
totalSize = QtCore.QSize()
for wrapper in self.list:
position = wrapper.position
itemSize = QtCore.QSize()
if sizeType == BorderLayout.MinimumSize:
itemSize = wrapper.item.minimumSize()
else: # sizeType == BorderLayout.SizeHint
itemSize = wrapper.item.sizeHint()
if position in (BorderLayout.North, BorderLayout.South, BorderLayout.Center):
totalSize.setHeight(totalSize.height() + itemSize.height())
if position in (BorderLayout.West, BorderLayout.East, BorderLayout.Center):
totalSize.setWidth(totalSize.width() + itemSize.width())
return totalSize
class Window(QtGui.QWidget):
def __init__(self):
super(Window, self).__init__()
centralWidget = QtGui.QTextBrowser()
centralWidget.setPlainText("Central widget")
layout = BorderLayout()
layout.addWidget(centralWidget, BorderLayout.Center)
# Because BorderLayout doesn't call its super-class addWidget() it
# doesn't take ownership of the widgets until setLayout() is called.
# Therefore we keep a local reference to each label to prevent it being
# garbage collected too soon.
label_n = self.createLabel("North")
layout.addWidget(label_n, BorderLayout.North)
label_w = self.createLabel("West")
layout.addWidget(label_w, BorderLayout.West)
label_e1 = self.createLabel("East 1")
layout.addWidget(label_e1, BorderLayout.East)
label_e2 = self.createLabel("East 2")
layout.addWidget(label_e2, BorderLayout.East)
label_s = self.createLabel("South")
layout.addWidget(label_s, BorderLayout.South)
self.setLayout(layout)
self.setWindowTitle("Border Layout")
def createLabel(self, text):
label = QtGui.QLabel(text)
label.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Raised)
return label
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| gpl-2.0 |
empeeu/numpy | numpy/lib/tests/test_regression.py | 120 | 8927 | from __future__ import division, absolute_import, print_function
import os
import sys
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_raises
)
from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import unicode
rlevel = 1
class TestRegression(TestCase):
def test_poly1d(self, level=rlevel):
# Ticket #28
assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
np.poly1d([-1, 1]))
def test_cov_parameters(self, level=rlevel):
# Ticket #91
x = np.random.random((3, 3))
y = x.copy()
np.cov(x, rowvar=1)
np.cov(y, rowvar=0)
assert_array_equal(x, y)
def test_mem_digitize(self, level=rlevel):
# Ticket #95
for i in range(100):
np.digitize([1, 2, 3, 4], [1, 3])
np.digitize([0, 1, 2, 3, 4], [1, 3])
def test_unique_zero_sized(self, level=rlevel):
# Ticket #205
assert_array_equal([], np.unique(np.array([])))
def test_mem_vectorise(self, level=rlevel):
# Ticket #325
vt = np.vectorize(lambda *args: args)
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)))
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1,
1, 2)), np.zeros((2, 2)))
def test_mgrid_single_element(self, level=rlevel):
# Ticket #339
assert_array_equal(np.mgrid[0:0:1j], [0])
assert_array_equal(np.mgrid[0:0], [])
def test_refcount_vectorize(self, level=rlevel):
# Ticket #378
def p(x, y):
return 123
v = np.vectorize(p)
_assert_valid_refcount(v)
def test_poly1d_nan_roots(self, level=rlevel):
# Ticket #396
p = np.poly1d([np.nan, np.nan, 1], r=0)
self.assertRaises(np.linalg.LinAlgError, getattr, p, "r")
def test_mem_polymul(self, level=rlevel):
# Ticket #448
np.polymul([], [1.])
def test_mem_string_concat(self, level=rlevel):
# Ticket #469
x = np.array([])
np.append(x, 'asdasd\tasdasd')
def test_poly_div(self, level=rlevel):
# Ticket #553
u = np.poly1d([1, 2, 3])
v = np.poly1d([1, 2, 3, 4, 5])
q, r = np.polydiv(u, v)
assert_equal(q*v + r, u)
def test_poly_eq(self, level=rlevel):
# Ticket #554
x = np.poly1d([1, 2, 3])
y = np.poly1d([3, 4])
assert_(x != y)
assert_(x == x)
def test_mem_insert(self, level=rlevel):
# Ticket #572
np.lib.place(1, 1, 1)
def test_polyfit_build(self):
# Ticket #628
ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
9.95368241e+00, -3.14526520e+02]
x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
170, 171, 172, 173, 174, 175, 176]
y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
tested = np.polyfit(x, y, 4)
assert_array_almost_equal(ref, tested)
def test_polydiv_type(self):
# Make polydiv work for complex types
msg = "Wrong type, should be complex"
x = np.ones(3, dtype=np.complex)
q, r = np.polydiv(x, x)
assert_(q.dtype == np.complex, msg)
msg = "Wrong type, should be float"
x = np.ones(3, dtype=np.int)
q, r = np.polydiv(x, x)
assert_(q.dtype == np.float, msg)
def test_histogramdd_too_many_bins(self):
# Ticket 928.
assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10)
def test_polyint_type(self):
# Ticket #944
msg = "Wrong type, should be complex"
x = np.ones(3, dtype=np.complex)
assert_(np.polyint(x).dtype == np.complex, msg)
msg = "Wrong type, should be float"
x = np.ones(3, dtype=np.int)
assert_(np.polyint(x).dtype == np.float, msg)
def test_ndenumerate_crash(self):
# Ticket 1140
# Shouldn't crash:
list(np.ndenumerate(np.array([[]])))
def test_asfarray_none(self, level=rlevel):
# Test for changeset r5065
assert_array_equal(np.array([np.nan]), np.asfarray([None]))
def test_large_fancy_indexing(self, level=rlevel):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0/5.0)+1)
def dp():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)] = 0
def dp2():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)]
self.assertRaises(ValueError, dp)
self.assertRaises(ValueError, dp2)
def test_void_coercion(self, level=rlevel):
dt = np.dtype([('a', 'f4'), ('b', 'i4')])
x = np.zeros((1,), dt)
assert_(np.r_[x, x].dtype == dt)
def test_who_with_0dim_array(self, level=rlevel):
# ticket #1243
import os
import sys
oldstdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
try:
try:
np.who({'foo': np.array(1)})
except:
raise AssertionError("ticket #1243")
finally:
sys.stdout.close()
sys.stdout = oldstdout
def test_include_dirs(self):
# As a sanity check, just test that get_include
# includes something reasonable. Somewhat
# related to ticket #1405.
include_dirs = [np.get_include()]
for path in include_dirs:
assert_(isinstance(path, (str, unicode)))
assert_(path != '')
def test_polyder_return_type(self):
# Ticket #1249
assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d))
assert_(isinstance(np.polyder([1], 0), np.ndarray))
assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d))
assert_(isinstance(np.polyder([1], 1), np.ndarray))
def test_append_fields_dtype_list(self):
# Ticket #1676
from numpy.lib.recfunctions import append_fields
base = np.array([1, 2, 3], dtype=np.int32)
names = ['a', 'b', 'c']
data = np.eye(3).astype(np.int32)
dlist = [np.float64, np.int32, np.int32]
try:
append_fields(base, names, data, dlist)
except:
raise AssertionError()
def test_loadtxt_fields_subarrays(self):
# For ticket #1936
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])]
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt))
dt = [("a", 'u1', (2, 2))]
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt))
dt = [("a", 'u1', (2, 3, 2))]
x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt)
data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)]
assert_equal(x, np.array(data, dtype=dt))
def test_nansum_with_boolean(self):
# gh-2978
a = np.zeros(2, dtype=np.bool)
try:
np.nansum(a)
except:
raise AssertionError()
def test_py3_compat(self):
# gh-2561
# Test if the oldstyle class test is bypassed in python3
class C():
"""Old-style class in python2, normal class in python3"""
pass
out = open(os.devnull, 'w')
try:
np.info(C(), output=out)
except AttributeError:
raise AssertionError()
finally:
out.close()
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
ytoyama/yans_chainer_hackathon | tests/cupy_tests/manipulation_tests/test_transpose.py | 13 | 1256 | import unittest
import cupy
from cupy import testing
@testing.gpu
class TestTranspose(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.numpy_cupy_array_equal()
def test_rollaxis(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return xp.rollaxis(a, 2)
def test_rollaxis_failure(self):
a = testing.shaped_arange((2, 3, 4))
with self.assertRaises(ValueError):
cupy.rollaxis(a, 3)
@testing.numpy_cupy_array_equal()
def test_swapaxes(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return xp.swapaxes(a, 2, 0)
def test_swapaxes_failure(self):
a = testing.shaped_arange((2, 3, 4))
with self.assertRaises(ValueError):
cupy.swapaxes(a, 3, 0)
@testing.numpy_cupy_array_equal()
def test_transpose(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return a.transpose(-1, 0, 1)
@testing.numpy_cupy_array_equal()
def test_transpose_empty(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return a.transpose()
@testing.numpy_cupy_array_equal()
def test_external_transpose(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return xp.transpose(a)
| mit |
tongwang01/tensorflow | tensorflow/python/client/notebook.py | 33 | 4608 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import sys
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"password", None,
"Password to require. If set, the server will allow public access."
" Only used if notebook config file does not exist.")
flags.DEFINE_string("notebook_dir", "experimental/brain/notebooks",
"root location where to store notebooks")
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
app.run()
| apache-2.0 |
torchbox/django-mailer-2 | django_mailer/models.py | 1 | 2844 | from django.db import models
from django_mailer import constants, managers
import datetime
PRIORITIES = (
(constants.PRIORITY_HIGH, 'high'),
(constants.PRIORITY_NORMAL, 'normal'),
(constants.PRIORITY_LOW, 'low'),
)
RESULT_CODES = (
(constants.RESULT_SENT, 'success'),
(constants.RESULT_SKIPPED, 'not sent (blacklisted)'),
(constants.RESULT_FAILED, 'failure'),
)
class Message(models.Model):
"""
An email message.
The ``to_address``, ``from_address`` and ``subject`` fields are merely for
easy of access for these common values. The ``encoded_message`` field
contains the entire encoded email message ready to be sent to an SMTP
connection.
"""
to_address = models.CharField(max_length=200)
from_address = models.CharField(max_length=200)
subject = models.CharField(max_length=255)
encoded_message = models.TextField()
date_created = models.DateTimeField(default=datetime.datetime.now)
class Meta:
ordering = ('date_created',)
def __unicode__(self):
return '%s: %s' % (self.to_address, self.subject)
class QueuedMessage(models.Model):
"""
A queued message.
Messages in the queue can be prioritised so that the higher priority
messages are sent first (secondarily sorted by the oldest message).
"""
message = models.OneToOneField(Message, editable=False, on_delete=models.CASCADE)
priority = models.PositiveSmallIntegerField(choices=PRIORITIES,
default=constants.PRIORITY_NORMAL)
deferred = models.DateTimeField(null=True, blank=True)
retries = models.PositiveIntegerField(default=0)
date_queued = models.DateTimeField(default=datetime.datetime.now)
objects = managers.QueueManager()
class Meta:
ordering = ('priority', 'date_queued')
def defer(self):
self.deferred = datetime.datetime.now()
self.save()
class Blacklist(models.Model):
"""
A blacklisted email address.
Messages attempted to be sent to e-mail addresses which appear on this
blacklist will be skipped entirely.
"""
email = models.EmailField(max_length=200)
date_added = models.DateTimeField(default=datetime.datetime.now)
class Meta:
ordering = ('-date_added',)
verbose_name = 'blacklisted e-mail address'
verbose_name_plural = 'blacklisted e-mail addresses'
class Log(models.Model):
"""
A log used to record the activity of a queued message.
"""
message = models.ForeignKey(Message, editable=False, on_delete=models.CASCADE)
result = models.PositiveSmallIntegerField(choices=RESULT_CODES)
date = models.DateTimeField(default=datetime.datetime.now)
log_message = models.TextField()
class Meta:
ordering = ('-date',)
| mit |
tkas/osmose-backend | mapcss/generated/MapCSSLexer.py | 4 | 18673 | # Generated from MapCSS.g4 by ANTLR 4.8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\67")
buf.write("\u01a2\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\3\2\3")
buf.write("\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b")
buf.write("\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3\r\3\r\3\16\3\16\3")
buf.write("\17\3\17\3\17\5\17\u00a4\n\17\3\20\3\20\3\20\3\21\3\21")
buf.write("\3\21\3\22\3\22\3\22\3\23\3\23\3\24\3\24\3\25\3\25\3\25")
buf.write("\3\26\3\26\3\26\3\27\3\27\3\27\3\30\3\30\3\30\3\31\3\31")
buf.write("\3\31\3\32\3\32\3\32\3\33\3\33\3\33\3\34\3\34\3\34\3\35")
buf.write("\3\35\3\36\3\36\3\37\3\37\3 \3 \3!\3!\3\"\3\"\3#\3#\3")
buf.write("#\3#\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3%\3&\3&\3&\3&\3&\3")
buf.write("&\3&\3&\3\'\3\'\3(\3(\3)\3)\3*\3*\5*\u00f6\n*\3+\3+\5")
buf.write("+\u00fa\n+\3,\3,\5,\u00fe\n,\3,\3,\3,\7,\u0103\n,\f,\16")
buf.write(",\u0106\13,\3-\3-\3.\3.\3/\3/\3\60\3\60\3\61\3\61\3\62")
buf.write("\3\62\3\63\3\63\3\63\3\64\3\64\3\64\3\65\3\65\3\65\3\65")
buf.write("\3\65\7\65\u011f\n\65\f\65\16\65\u0122\13\65\3\65\3\65")
buf.write("\3\66\3\66\3\66\3\66\3\66\7\66\u012b\n\66\f\66\16\66\u012e")
buf.write("\13\66\3\66\3\66\3\67\6\67\u0133\n\67\r\67\16\67\u0134")
buf.write("\38\38\38\39\69\u013b\n9\r9\169\u013c\39\79\u0140\n9\f")
buf.write("9\169\u0143\139\39\39\69\u0147\n9\r9\169\u0148\59\u014b")
buf.write("\n9\3:\3:\3:\3;\3;\3;\3;\3;\6;\u0155\n;\r;\16;\u0156\3")
buf.write(";\6;\u015a\n;\r;\16;\u015b\3;\3;\6;\u0160\n;\r;\16;\u0161")
buf.write("\5;\u0164\n;\5;\u0166\n;\5;\u0168\n;\3<\3<\3<\3=\3=\3")
buf.write("=\5=\u0170\n=\3>\3>\3>\5>\u0175\n>\3?\3?\3?\7?\u017a\n")
buf.write("?\f?\16?\u017d\13?\3?\3?\3@\3@\3@\3@\3A\3A\3A\3A\7A\u0189")
buf.write("\nA\fA\16A\u018c\13A\3A\5A\u018f\nA\3A\3A\3A\3A\3B\3B")
buf.write("\3B\3B\7B\u0199\nB\fB\16B\u019c\13B\3B\3B\3B\3B\3B\4\u018a")
buf.write("\u019a\2C\3\2\5\2\7\3\t\4\13\5\r\6\17\7\21\b\23\t\25\n")
buf.write("\27\13\31\f\33\r\35\16\37\17!\20#\21%\22\'\23)\24+\25")
buf.write("-\26/\27\61\30\63\31\65\32\67\339\34;\35=\36?\37A C!E")
buf.write("\"G#I$K%M\2O\2Q\2S\2U\2W&Y\'[(])_*a+c,e\2g\2i-k.m/o\60")
buf.write("q\61s\62u\63w\2y\2{\2}\64\177\65\u0081\66\u0083\67\3\2")
buf.write("\31\4\2UUuu\4\2GGgg\4\2VVvv\4\2TTtt\4\2QQqq\4\2NNnn\4")
buf.write("\2KKkk\4\2PPpp\4\2FFff\4\2ZZzz\4\2OOoo\4\2RRrr\4\2C\\")
buf.write("c|\3\2\2\u00a1\5\2C\\aac|\6\2//C\\aac|\4\2//aa\6\2\"#")
buf.write("%]_\u0080\u00b2\u00b2\6\2\"(*]_\u0080\u00b2\u00b2\3\2")
buf.write("\62;\6\2\"+-\60\62\u0080\u00b2\u00b2\5\2\"\60\62\u0080")
buf.write("\u00b2\u00b2\5\2\13\f\16\17\"\"\2\u01b7\2\7\3\2\2\2\2")
buf.write("\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21")
buf.write("\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3")
buf.write("\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2")
buf.write("\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2")
buf.write("\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2")
buf.write("\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2")
buf.write("\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3")
buf.write("\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[")
buf.write("\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2")
buf.write("i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2")
buf.write("\2s\3\2\2\2\2u\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081")
buf.write("\3\2\2\2\2\u0083\3\2\2\2\3\u0085\3\2\2\2\5\u0088\3\2\2")
buf.write("\2\7\u008a\3\2\2\2\t\u008c\3\2\2\2\13\u008e\3\2\2\2\r")
buf.write("\u0090\3\2\2\2\17\u0092\3\2\2\2\21\u0094\3\2\2\2\23\u0096")
buf.write("\3\2\2\2\25\u0098\3\2\2\2\27\u009a\3\2\2\2\31\u009c\3")
buf.write("\2\2\2\33\u009e\3\2\2\2\35\u00a3\3\2\2\2\37\u00a5\3\2")
buf.write("\2\2!\u00a8\3\2\2\2#\u00ab\3\2\2\2%\u00ae\3\2\2\2\'\u00b0")
buf.write("\3\2\2\2)\u00b2\3\2\2\2+\u00b5\3\2\2\2-\u00b8\3\2\2\2")
buf.write("/\u00bb\3\2\2\2\61\u00be\3\2\2\2\63\u00c1\3\2\2\2\65\u00c4")
buf.write("\3\2\2\2\67\u00c7\3\2\2\29\u00ca\3\2\2\2;\u00cc\3\2\2")
buf.write("\2=\u00ce\3\2\2\2?\u00d0\3\2\2\2A\u00d2\3\2\2\2C\u00d4")
buf.write("\3\2\2\2E\u00d6\3\2\2\2G\u00da\3\2\2\2I\u00df\3\2\2\2")
buf.write("K\u00e5\3\2\2\2M\u00ed\3\2\2\2O\u00ef\3\2\2\2Q\u00f1\3")
buf.write("\2\2\2S\u00f5\3\2\2\2U\u00f9\3\2\2\2W\u00fd\3\2\2\2Y\u0107")
buf.write("\3\2\2\2[\u0109\3\2\2\2]\u010b\3\2\2\2_\u010d\3\2\2\2")
buf.write("a\u010f\3\2\2\2c\u0111\3\2\2\2e\u0113\3\2\2\2g\u0116\3")
buf.write("\2\2\2i\u0119\3\2\2\2k\u0125\3\2\2\2m\u0132\3\2\2\2o\u0136")
buf.write("\3\2\2\2q\u014a\3\2\2\2s\u014c\3\2\2\2u\u014f\3\2\2\2")
buf.write("w\u0169\3\2\2\2y\u016f\3\2\2\2{\u0174\3\2\2\2}\u0176\3")
buf.write("\2\2\2\177\u0180\3\2\2\2\u0081\u0184\3\2\2\2\u0083\u0194")
buf.write("\3\2\2\2\u0085\u0086\7^\2\2\u0086\u0087\7^\2\2\u0087\4")
buf.write("\3\2\2\2\u0088\u0089\4\u0082\uffff\2\u0089\6\3\2\2\2\u008a")
buf.write("\u008b\7.\2\2\u008b\b\3\2\2\2\u008c\u008d\7A\2\2\u008d")
buf.write("\n\3\2\2\2\u008e\u008f\7\u220a\2\2\u008f\f\3\2\2\2\u0090")
buf.write("\u0091\7\u29cb\2\2\u0091\16\3\2\2\2\u0092\u0093\7\u2288")
buf.write("\2\2\u0093\20\3\2\2\2\u0094\u0095\7\u2289\2\2\u0095\22")
buf.write("\3\2\2\2\u0096\u0097\7\u228a\2\2\u0097\24\3\2\2\2\u0098")
buf.write("\u0099\7\u228b\2\2\u0099\26\3\2\2\2\u009a\u009b\7*\2\2")
buf.write("\u009b\30\3\2\2\2\u009c\u009d\7+\2\2\u009d\32\3\2\2\2")
buf.write("\u009e\u009f\7\60\2\2\u009f\34\3\2\2\2\u00a0\u00a4\7?")
buf.write("\2\2\u00a1\u00a2\7?\2\2\u00a2\u00a4\7?\2\2\u00a3\u00a0")
buf.write("\3\2\2\2\u00a3\u00a1\3\2\2\2\u00a4\36\3\2\2\2\u00a5\u00a6")
buf.write("\7#\2\2\u00a6\u00a7\7?\2\2\u00a7 \3\2\2\2\u00a8\u00a9")
buf.write("\7>\2\2\u00a9\u00aa\7?\2\2\u00aa\"\3\2\2\2\u00ab\u00ac")
buf.write("\7@\2\2\u00ac\u00ad\7?\2\2\u00ad$\3\2\2\2\u00ae\u00af")
buf.write("\7>\2\2\u00af&\3\2\2\2\u00b0\u00b1\7@\2\2\u00b1(\3\2\2")
buf.write("\2\u00b2\u00b3\7?\2\2\u00b3\u00b4\7\u0080\2\2\u00b4*\3")
buf.write("\2\2\2\u00b5\u00b6\7#\2\2\u00b6\u00b7\7\u0080\2\2\u00b7")
buf.write(",\3\2\2\2\u00b8\u00b9\7`\2\2\u00b9\u00ba\7?\2\2\u00ba")
buf.write(".\3\2\2\2\u00bb\u00bc\7&\2\2\u00bc\u00bd\7?\2\2\u00bd")
buf.write("\60\3\2\2\2\u00be\u00bf\7,\2\2\u00bf\u00c0\7?\2\2\u00c0")
buf.write("\62\3\2\2\2\u00c1\u00c2\7\u0080\2\2\u00c2\u00c3\7?\2\2")
buf.write("\u00c3\64\3\2\2\2\u00c4\u00c5\7~\2\2\u00c5\u00c6\7~\2")
buf.write("\2\u00c6\66\3\2\2\2\u00c7\u00c8\7(\2\2\u00c8\u00c9\7(")
buf.write("\2\2\u00c98\3\2\2\2\u00ca\u00cb\7,\2\2\u00cb:\3\2\2\2")
buf.write("\u00cc\u00cd\7\61\2\2\u00cd<\3\2\2\2\u00ce\u00cf\7\'\2")
buf.write("\2\u00cf>\3\2\2\2\u00d0\u00d1\7-\2\2\u00d1@\3\2\2\2\u00d2")
buf.write("\u00d3\7/\2\2\u00d3B\3\2\2\2\u00d4\u00d5\7#\2\2\u00d5")
buf.write("D\3\2\2\2\u00d6\u00d7\t\2\2\2\u00d7\u00d8\t\3\2\2\u00d8")
buf.write("\u00d9\t\4\2\2\u00d9F\3\2\2\2\u00da\u00db\t\5\2\2\u00db")
buf.write("\u00dc\t\6\2\2\u00dc\u00dd\t\7\2\2\u00dd\u00de\t\3\2\2")
buf.write("\u00deH\3\2\2\2\u00df\u00e0\t\b\2\2\u00e0\u00e1\t\t\2")
buf.write("\2\u00e1\u00e2\t\n\2\2\u00e2\u00e3\t\3\2\2\u00e3\u00e4")
buf.write("\t\13\2\2\u00e4J\3\2\2\2\u00e5\u00e6\7B\2\2\u00e6\u00e7")
buf.write("\t\b\2\2\u00e7\u00e8\t\f\2\2\u00e8\u00e9\t\r\2\2\u00e9")
buf.write("\u00ea\t\6\2\2\u00ea\u00eb\t\5\2\2\u00eb\u00ec\t\4\2\2")
buf.write("\u00ecL\3\2\2\2\u00ed\u00ee\4\62;\2\u00eeN\3\2\2\2\u00ef")
buf.write("\u00f0\t\16\2\2\u00f0P\3\2\2\2\u00f1\u00f2\n\17\2\2\u00f2")
buf.write("R\3\2\2\2\u00f3\u00f6\t\20\2\2\u00f4\u00f6\5Q)\2\u00f5")
buf.write("\u00f3\3\2\2\2\u00f5\u00f4\3\2\2\2\u00f6T\3\2\2\2\u00f7")
buf.write("\u00fa\t\21\2\2\u00f8\u00fa\5Q)\2\u00f9\u00f7\3\2\2\2")
buf.write("\u00f9\u00f8\3\2\2\2\u00faV\3\2\2\2\u00fb\u00fe\5O(\2")
buf.write("\u00fc\u00fe\7a\2\2\u00fd\u00fb\3\2\2\2\u00fd\u00fc\3")
buf.write("\2\2\2\u00fe\u0104\3\2\2\2\u00ff\u0103\5O(\2\u0100\u0103")
buf.write("\5M\'\2\u0101\u0103\t\22\2\2\u0102\u00ff\3\2\2\2\u0102")
buf.write("\u0100\3\2\2\2\u0102\u0101\3\2\2\2\u0103\u0106\3\2\2\2")
buf.write("\u0104\u0102\3\2\2\2\u0104\u0105\3\2\2\2\u0105X\3\2\2")
buf.write("\2\u0106\u0104\3\2\2\2\u0107\u0108\7]\2\2\u0108Z\3\2\2")
buf.write("\2\u0109\u010a\7_\2\2\u010a\\\3\2\2\2\u010b\u010c\7}\2")
buf.write("\2\u010c^\3\2\2\2\u010d\u010e\7\177\2\2\u010e`\3\2\2\2")
buf.write("\u010f\u0110\7<\2\2\u0110b\3\2\2\2\u0111\u0112\7=\2\2")
buf.write("\u0112d\3\2\2\2\u0113\u0114\7^\2\2\u0114\u0115\7$\2\2")
buf.write("\u0115f\3\2\2\2\u0116\u0117\7^\2\2\u0117\u0118\7)\2\2")
buf.write("\u0118h\3\2\2\2\u0119\u0120\7$\2\2\u011a\u011f\t\23\2")
buf.write("\2\u011b\u011f\5\5\3\2\u011c\u011f\5e\63\2\u011d\u011f")
buf.write("\5\3\2\2\u011e\u011a\3\2\2\2\u011e\u011b\3\2\2\2\u011e")
buf.write("\u011c\3\2\2\2\u011e\u011d\3\2\2\2\u011f\u0122\3\2\2\2")
buf.write("\u0120\u011e\3\2\2\2\u0120\u0121\3\2\2\2\u0121\u0123\3")
buf.write("\2\2\2\u0122\u0120\3\2\2\2\u0123\u0124\7$\2\2\u0124j\3")
buf.write("\2\2\2\u0125\u012c\7)\2\2\u0126\u012b\t\24\2\2\u0127\u012b")
buf.write("\5\5\3\2\u0128\u012b\5g\64\2\u0129\u012b\5\3\2\2\u012a")
buf.write("\u0126\3\2\2\2\u012a\u0127\3\2\2\2\u012a\u0128\3\2\2\2")
buf.write("\u012a\u0129\3\2\2\2\u012b\u012e\3\2\2\2\u012c\u012a\3")
buf.write("\2\2\2\u012c\u012d\3\2\2\2\u012d\u012f\3\2\2\2\u012e\u012c")
buf.write("\3\2\2\2\u012f\u0130\7)\2\2\u0130l\3\2\2\2\u0131\u0133")
buf.write("\t\25\2\2\u0132\u0131\3\2\2\2\u0133\u0134\3\2\2\2\u0134")
buf.write("\u0132\3\2\2\2\u0134\u0135\3\2\2\2\u0135n\3\2\2\2\u0136")
buf.write("\u0137\7/\2\2\u0137\u0138\5m\67\2\u0138p\3\2\2\2\u0139")
buf.write("\u013b\t\25\2\2\u013a\u0139\3\2\2\2\u013b\u013c\3\2\2")
buf.write("\2\u013c\u013a\3\2\2\2\u013c\u013d\3\2\2\2\u013d\u014b")
buf.write("\3\2\2\2\u013e\u0140\t\25\2\2\u013f\u013e\3\2\2\2\u0140")
buf.write("\u0143\3\2\2\2\u0141\u013f\3\2\2\2\u0141\u0142\3\2\2\2")
buf.write("\u0142\u0144\3\2\2\2\u0143\u0141\3\2\2\2\u0144\u0146\7")
buf.write("\60\2\2\u0145\u0147\t\25\2\2\u0146\u0145\3\2\2\2\u0147")
buf.write("\u0148\3\2\2\2\u0148\u0146\3\2\2\2\u0148\u0149\3\2\2\2")
buf.write("\u0149\u014b\3\2\2\2\u014a\u013a\3\2\2\2\u014a\u0141\3")
buf.write("\2\2\2\u014br\3\2\2\2\u014c\u014d\7/\2\2\u014d\u014e\5")
buf.write("q9\2\u014et\3\2\2\2\u014f\u0150\7~\2\2\u0150\u0151\7|")
buf.write("\2\2\u0151\u0167\3\2\2\2\u0152\u0154\7/\2\2\u0153\u0155")
buf.write("\5M\'\2\u0154\u0153\3\2\2\2\u0155\u0156\3\2\2\2\u0156")
buf.write("\u0154\3\2\2\2\u0156\u0157\3\2\2\2\u0157\u0168\3\2\2\2")
buf.write("\u0158\u015a\5M\'\2\u0159\u0158\3\2\2\2\u015a\u015b\3")
buf.write("\2\2\2\u015b\u0159\3\2\2\2\u015b\u015c\3\2\2\2\u015c\u0165")
buf.write("\3\2\2\2\u015d\u0163\7/\2\2\u015e\u0160\5M\'\2\u015f\u015e")
buf.write("\3\2\2\2\u0160\u0161\3\2\2\2\u0161\u015f\3\2\2\2\u0161")
buf.write("\u0162\3\2\2\2\u0162\u0164\3\2\2\2\u0163\u015f\3\2\2\2")
buf.write("\u0163\u0164\3\2\2\2\u0164\u0166\3\2\2\2\u0165\u015d\3")
buf.write("\2\2\2\u0165\u0166\3\2\2\2\u0166\u0168\3\2\2\2\u0167\u0152")
buf.write("\3\2\2\2\u0167\u0159\3\2\2\2\u0168v\3\2\2\2\u0169\u016a")
buf.write("\7^\2\2\u016a\u016b\7\61\2\2\u016bx\3\2\2\2\u016c\u0170")
buf.write("\5w<\2\u016d\u0170\t\26\2\2\u016e\u0170\5\5\3\2\u016f")
buf.write("\u016c\3\2\2\2\u016f\u016d\3\2\2\2\u016f\u016e\3\2\2\2")
buf.write("\u0170z\3\2\2\2\u0171\u0175\5w<\2\u0172\u0175\t\27\2\2")
buf.write("\u0173\u0175\5\5\3\2\u0174\u0171\3\2\2\2\u0174\u0172\3")
buf.write("\2\2\2\u0174\u0173\3\2\2\2\u0175|\3\2\2\2\u0176\u0177")
buf.write("\7\61\2\2\u0177\u017b\5y=\2\u0178\u017a\5{>\2\u0179\u0178")
buf.write("\3\2\2\2\u017a\u017d\3\2\2\2\u017b\u0179\3\2\2\2\u017b")
buf.write("\u017c\3\2\2\2\u017c\u017e\3\2\2\2\u017d\u017b\3\2\2\2")
buf.write("\u017e\u017f\7\61\2\2\u017f~\3\2\2\2\u0180\u0181\t\30")
buf.write("\2\2\u0181\u0182\3\2\2\2\u0182\u0183\b@\2\2\u0183\u0080")
buf.write("\3\2\2\2\u0184\u0185\7\61\2\2\u0185\u0186\7\61\2\2\u0186")
buf.write("\u018a\3\2\2\2\u0187\u0189\13\2\2\2\u0188\u0187\3\2\2")
buf.write("\2\u0189\u018c\3\2\2\2\u018a\u018b\3\2\2\2\u018a\u0188")
buf.write("\3\2\2\2\u018b\u018e\3\2\2\2\u018c\u018a\3\2\2\2\u018d")
buf.write("\u018f\7\17\2\2\u018e\u018d\3\2\2\2\u018e\u018f\3\2\2")
buf.write("\2\u018f\u0190\3\2\2\2\u0190\u0191\7\f\2\2\u0191\u0192")
buf.write("\3\2\2\2\u0192\u0193\bA\2\2\u0193\u0082\3\2\2\2\u0194")
buf.write("\u0195\7\61\2\2\u0195\u0196\7,\2\2\u0196\u019a\3\2\2\2")
buf.write("\u0197\u0199\13\2\2\2\u0198\u0197\3\2\2\2\u0199\u019c")
buf.write("\3\2\2\2\u019a\u019b\3\2\2\2\u019a\u0198\3\2\2\2\u019b")
buf.write("\u019d\3\2\2\2\u019c\u019a\3\2\2\2\u019d\u019e\7,\2\2")
buf.write("\u019e\u019f\7\61\2\2\u019f\u01a0\3\2\2\2\u01a0\u01a1")
buf.write("\bB\2\2\u01a1\u0084\3\2\2\2\36\2\u00a3\u00f5\u00f9\u00fd")
buf.write("\u0102\u0104\u011e\u0120\u012a\u012c\u0134\u013c\u0141")
buf.write("\u0148\u014a\u0156\u015b\u0161\u0163\u0165\u0167\u016f")
buf.write("\u0174\u017b\u018a\u018e\u019a\3\2\3\2")
return buf.getvalue()
class MapCSSLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
COMMA = 1
QUESTION_MARK = 2
OP_INCLUDED_IN = 3
OP_INTERSECTS = 4
OP_SUBSET = 5
OP_SUPERSET = 6
OP_NOSUBSET = 7
OP_NOSUPERSET = 8
PAR_OPEN = 9
PAR_CLOSE = 10
DOT = 11
OP_EQ = 12
OP_NEQ = 13
OP_LE = 14
OP_GE = 15
OP_LT = 16
OP_GT = 17
OP_MATCH = 18
OP_NOT_MATCH = 19
OP_STARTS_WITH = 20
OP_ENDS_WITH = 21
OP_SUBSTRING = 22
OP_CONTAINS = 23
OP_OR = 24
OP_AND = 25
OP_MUL = 26
OP_DIV = 27
OP_MOD = 28
OP_PLUS = 29
OP_MINUS = 30
OP_NOT = 31
SET = 32
ROLE = 33
INDEX = 34
IMPORT = 35
NCOMPONENT = 36
LBRACKET = 37
RBRACKET = 38
LBRACE = 39
RBRACE = 40
COLON = 41
SEMICOLON = 42
DQUOTED_STRING = 43
SQUOTED_STRING = 44
POSITIVE_INT = 45
NEGATIVE_INT = 46
POSITIVE_FLOAT = 47
NEGATIVE_FLOAT = 48
RANGE = 49
REGEXP = 50
WS = 51
SL_COMMENT = 52
ML_COMMENT = 53
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"','", "'?'", "'\u2208'", "'\u29C9'", "'\u2286'", "'\u2287'",
"'\u2288'", "'\u2289'", "'('", "')'", "'.'", "'!='", "'<='",
"'>='", "'<'", "'>'", "'=~'", "'!~'", "'^='", "'$='", "'*='",
"'~='", "'||'", "'&&'", "'*'", "'/'", "'%'", "'+'", "'-'", "'!'",
"'['", "']'", "'{'", "'}'", "':'", "';'" ]
symbolicNames = [ "<INVALID>",
"COMMA", "QUESTION_MARK", "OP_INCLUDED_IN", "OP_INTERSECTS",
"OP_SUBSET", "OP_SUPERSET", "OP_NOSUBSET", "OP_NOSUPERSET",
"PAR_OPEN", "PAR_CLOSE", "DOT", "OP_EQ", "OP_NEQ", "OP_LE",
"OP_GE", "OP_LT", "OP_GT", "OP_MATCH", "OP_NOT_MATCH", "OP_STARTS_WITH",
"OP_ENDS_WITH", "OP_SUBSTRING", "OP_CONTAINS", "OP_OR", "OP_AND",
"OP_MUL", "OP_DIV", "OP_MOD", "OP_PLUS", "OP_MINUS", "OP_NOT",
"SET", "ROLE", "INDEX", "IMPORT", "NCOMPONENT", "LBRACKET",
"RBRACKET", "LBRACE", "RBRACE", "COLON", "SEMICOLON", "DQUOTED_STRING",
"SQUOTED_STRING", "POSITIVE_INT", "NEGATIVE_INT", "POSITIVE_FLOAT",
"NEGATIVE_FLOAT", "RANGE", "REGEXP", "WS", "SL_COMMENT", "ML_COMMENT" ]
ruleNames = [ "EBACKSLASH", "UNICODE", "COMMA", "QUESTION_MARK", "OP_INCLUDED_IN",
"OP_INTERSECTS", "OP_SUBSET", "OP_SUPERSET", "OP_NOSUBSET",
"OP_NOSUPERSET", "PAR_OPEN", "PAR_CLOSE", "DOT", "OP_EQ",
"OP_NEQ", "OP_LE", "OP_GE", "OP_LT", "OP_GT", "OP_MATCH",
"OP_NOT_MATCH", "OP_STARTS_WITH", "OP_ENDS_WITH", "OP_SUBSTRING",
"OP_CONTAINS", "OP_OR", "OP_AND", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_PLUS", "OP_MINUS", "OP_NOT", "SET", "ROLE",
"INDEX", "IMPORT", "DIGIT", "CHAR", "NONASCII", "NMSTART",
"NMCHAR", "NCOMPONENT", "LBRACKET", "RBRACKET", "LBRACE",
"RBRACE", "COLON", "SEMICOLON", "EDQUOTE", "ESQUOTE",
"DQUOTED_STRING", "SQUOTED_STRING", "POSITIVE_INT", "NEGATIVE_INT",
"POSITIVE_FLOAT", "NEGATIVE_FLOAT", "RANGE", "REGEX_ESCAPE",
"REGEX_START", "REGEX_CHAR", "REGEXP", "WS", "SL_COMMENT",
"ML_COMMENT" ]
grammarFileName = "MapCSS.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| gpl-3.0 |
ddico/odoo | addons/product/models/res_company.py | 2 | 2888 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
class ResCompany(models.Model):
_inherit = "res.company"
@api.model
def create(self, vals):
new_company = super(ResCompany, self).create(vals)
ProductPricelist = self.env['product.pricelist']
pricelist = ProductPricelist.search([('currency_id', '=', new_company.currency_id.id), ('company_id', '=', False)], limit=1)
if not pricelist:
params = {'currency': new_company.currency_id.name}
pricelist = ProductPricelist.create({
'name': _("Default %(currency)s pricelist") % params,
'currency_id': new_company.currency_id.id,
})
self.env['ir.property']._set_default(
'property_product_pricelist',
'res.partner',
pricelist,
new_company,
)
return new_company
def write(self, values):
# When we modify the currency of the company, we reflect the change on the list0 pricelist, if
# that pricelist is not used by another company. Otherwise, we create a new pricelist for the
# given currency.
ProductPricelist = self.env['product.pricelist']
currency_id = values.get('currency_id')
main_pricelist = self.env.ref('product.list0', False)
if currency_id and main_pricelist:
nb_companies = self.search_count([])
for company in self:
existing_pricelist = ProductPricelist.search(
[('company_id', 'in', (False, company.id)),
('currency_id', 'in', (currency_id, company.currency_id.id))])
if existing_pricelist:
continue
if currency_id == company.currency_id.id:
continue
currency_match = main_pricelist.currency_id == company.currency_id
company_match = (main_pricelist.company_id == company or
(main_pricelist.company_id.id is False and nb_companies == 1))
if currency_match and company_match:
main_pricelist.write({'currency_id': currency_id})
else:
params = {'currency': self.env['res.currency'].browse(currency_id).name}
pricelist = ProductPricelist.create({
'name': _("Default %(currency)s pricelist") % params,
'currency_id': currency_id,
})
self.env['ir.property']._set_default(
'property_product_pricelist',
'res.partner',
pricelist,
company,
)
return super(ResCompany, self).write(values)
| agpl-3.0 |
mhogg/scipy | scipy/signal/waveforms.py | 64 | 14818 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
| bsd-3-clause |
Antiun/server-tools | base_optional_quick_create/__openerp__.py | 23 | 1737 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Optional quick create",
'version': '8.0.0.1.0',
'category': 'Tools',
'summary': "Avoid 'quick create' on m2o fields, on a 'by model' basis",
'description': """
This module allows to avoid to *quick create* new records, through many2one
fields, for a specific model.
You can configure which models should allow *quick create*.
When specified, the *quick create* option will always open the standard create
form.
Got the idea from https://twitter.com/nbessi/status/337869826028605441
""",
'author': "Agile Business Group,Odoo Community Association (OCA)",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": ['base'],
"data": [
'model_view.xml',
],
"demo": [],
'test': [],
"installable": True
}
| agpl-3.0 |
xsynergy510x/android_external_chromium_org | tools/cr/cr/base/client.py | 64 | 6651 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Client configuration management.
This module holds the code for detecting and configuring the current client and
it's output directories.
It is responsible for writing out the client specific plugins that tell the
rest of the cr tool what the client is capable of.
"""
import os
import pprint
import sys
import cr
import cr.auto.build
import cr.auto.client
# The config version currently supported.
VERSION = 0.5
# The default directory name to store config inside
CLIENT_CONFIG_PATH = '.cr'
# The partial filename to add to a directory to get it's config file.
CLIENT_CONFIG_FILE = os.path.join(CLIENT_CONFIG_PATH, 'config.py')
# The format string for the header of a config file.
CONFIG_FILE_PREFIX = """
# This is an autogenerated file
# it *will* be overwritten, and changes may lost
# The system will autoload any other python file in the same folder.
import cr
OVERRIDES = cr.Config.From("""
# The format string for each value in a config file.
CONFIG_VAR_LINE = '\n {0} = {1!r},'
# The format string for the tail of a config file.
CONFIG_FILE_SUFFIX = '\n)\n'
# The name of the gclient config file
GCLIENT_FILENAME = '.gclient'
# The default config values installed by this module.
DEFAULT = cr.Config.From(
CR_ROOT_PATH=os.path.join('{GOOGLE_CODE}'),
CR_CLIENT_NAME='chromium',
CR_CLIENT_PATH=os.path.join('{CR_ROOT_PATH}', '{CR_CLIENT_NAME}'),
CR_SRC=os.path.join('{CR_CLIENT_PATH}', 'src'),
CR_BUILD_DIR=os.path.join('{CR_SRC}', '{CR_OUT_FULL}'),
)
def DetectClient():
# Attempt to detect the current client from the cwd
# See if we can detect the source tree root
client_path = os.getcwd()
while (client_path and
not os.path.exists(os.path.join(client_path, GCLIENT_FILENAME))):
old = client_path
client_path = os.path.dirname(client_path)
if client_path == old:
client_path = None
if client_path is not None:
dirname, basename = os.path.split(client_path)
if basename == 'src':
# we have the src path, base is one level up
client_path = dirname
if client_path is not None:
cr.context.derived['CR_CLIENT_PATH'] = client_path
# now get the value from it may be different
client_path = cr.context.Get('CR_CLIENT_PATH')
if client_path is not None:
cr.context.derived['CR_CLIENT_NAME'] = os.path.basename(client_path)
def _GetConfigFilename(path):
return os.path.realpath(os.path.join(path, CLIENT_CONFIG_FILE))
def _IsOutputDir(path):
return os.path.isfile(_GetConfigFilename(path))
def _WriteConfig(writer, data):
writer.write(CONFIG_FILE_PREFIX)
for key, value in data.items():
writer.write(CONFIG_VAR_LINE.format(key, value))
writer.write(CONFIG_FILE_SUFFIX)
def AddArguments(parser):
parser.add_argument(
'-o', '--out', dest='_out', metavar='name',
default=None,
help='The name of the out directory to use. Overrides CR_OUT.'
)
def GetOutArgument():
return getattr(cr.context.args, '_out', None)
def ApplyOutArgument():
# TODO(iancottrell): be flexible, allow out to do approximate match...
out = GetOutArgument()
if out:
cr.context.derived.Set(CR_OUT_FULL=out)
def ReadGClient():
"""Loads the .gclient configuration for the current client.
This will load from CR_CLIENT_PATH.
Returns:
The dict of values set in the .gclient file.
"""
# Now attempt to load and parse the .gclient file
result = {}
try:
gclient_file = cr.context.Substitute(
os.path.join('{CR_CLIENT_PATH}', GCLIENT_FILENAME))
with open(gclient_file, 'r') as spec_file:
# matching the behaviour of gclient, so pylint: disable=exec-used
exec(spec_file.read(), {}, result)
except IOError:
# no .gclient file, skip it
pass
return result
def WriteGClient():
"""Writes the .gclient configuration for the current client.
This will write to CR_CLIENT_PATH.
"""
gclient_file = cr.context.Substitute(
os.path.join('{CR_CLIENT_PATH}', GCLIENT_FILENAME))
spec = '\n'.join('%s = %s' % (key, pprint.pformat(value))
for key,value in cr.context.gclient.items())
if cr.context.dry_run:
print 'Write the following spec to', gclient_file
print spec
else:
with open(gclient_file, 'w') as spec_file:
spec_file.write(spec)
def LoadConfig():
"""Loads the client configuration for the given context.
This will load configuration if present from CR_CLIENT_PATH and then
CR_BUILD_DIR.
Returns:
True if configuration was fully loaded.
"""
# Load the root config, will help set default build dir
client_path = cr.context.Find('CR_CLIENT_PATH')
if not client_path:
return False
cr.auto.client.__path__.append(os.path.join(client_path, CLIENT_CONFIG_PATH))
cr.loader.Scan()
# Now load build dir config
build_dir = cr.context.Find('CR_BUILD_DIR')
if not build_dir:
return False
cr.auto.build.__path__.append(os.path.join(build_dir, CLIENT_CONFIG_PATH))
cr.loader.Scan()
return hasattr(cr.auto.build, 'config')
def WriteConfig(path, data):
"""Writes a configuration out to a file.
This writes all the key value pairs in data out to a config file below path.
Args:
path: The base path to write the config plugin into.
data: The key value pairs to write.
"""
filename = _GetConfigFilename(path)
config_dir = os.path.dirname(filename)
if cr.context.dry_run:
print 'makedirs', config_dir
print 'Write config to', filename
_WriteConfig(sys.stdout, data)
else:
try:
os.makedirs(config_dir)
except OSError:
if not os.path.isdir(config_dir):
raise
with open(filename, 'w') as writer:
_WriteConfig(writer, data)
def PrintInfo():
print 'Selected output directory is', cr.context.Find('CR_BUILD_DIR')
try:
for name in cr.auto.build.config.OVERRIDES.exported.keys():
print ' ', name, '=', cr.context.Get(name)
except AttributeError:
pass
class InitHook(cr.Plugin, cr.Plugin.Type):
"""Base class for output directory initialization hooks.
Implementations used to fix from old version to new ones live in the
cr.fixups package.
"""
def Run(self, old_version, config):
"""Run the initialization hook.
This is invoked once per init invocation.
Args:
old_version: The old version,
0.0 if the old version was bad or missing,
None if building a new output direcory.
config: The mutable config that will be written.
"""
raise NotImplementedError('Must be overridden.')
| bsd-3-clause |
ian-garrett/meetMe | env/lib/python3.4/site-packages/pip/_vendor/distlib/markers.py | 1261 | 6282 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Parser for the environment markers micro-language defined in PEP 345."""
import ast
import os
import sys
import platform
from .compat import python_implementation, string_types
from .util import in_venv
__all__ = ['interpret']
class Evaluator(object):
"""
A limited evaluator for Python expressions.
"""
operators = {
'eq': lambda x, y: x == y,
'gt': lambda x, y: x > y,
'gte': lambda x, y: x >= y,
'in': lambda x, y: x in y,
'lt': lambda x, y: x < y,
'lte': lambda x, y: x <= y,
'not': lambda x: not x,
'noteq': lambda x, y: x != y,
'notin': lambda x, y: x not in y,
}
allowed_values = {
'sys_platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os_name': os.name,
'platform_in_venv': str(in_venv()),
'platform_release': platform.release(),
'platform_version': platform.version(),
'platform_machine': platform.machine(),
'platform_python_implementation': python_implementation(),
}
def __init__(self, context=None):
"""
Initialise an instance.
:param context: If specified, names are looked up in this mapping.
"""
self.context = context or {}
self.source = None
def get_fragment(self, offset):
"""
Get the part of the source which is causing a problem.
"""
fragment_len = 10
s = '%r' % (self.source[offset:offset + fragment_len])
if offset + fragment_len < len(self.source):
s += '...'
return s
def get_handler(self, node_type):
"""
Get a handler for the specified AST node type.
"""
return getattr(self, 'do_%s' % node_type, None)
def evaluate(self, node, filename=None):
"""
Evaluate a source string or node, using ``filename`` when
displaying errors.
"""
if isinstance(node, string_types):
self.source = node
kwargs = {'mode': 'eval'}
if filename:
kwargs['filename'] = filename
try:
node = ast.parse(node, **kwargs)
except SyntaxError as e:
s = self.get_fragment(e.offset)
raise SyntaxError('syntax error %s' % s)
node_type = node.__class__.__name__.lower()
handler = self.get_handler(node_type)
if handler is None:
if self.source is None:
s = '(source not available)'
else:
s = self.get_fragment(node.col_offset)
raise SyntaxError("don't know how to evaluate %r %s" % (
node_type, s))
return handler(node)
def get_attr_key(self, node):
assert isinstance(node, ast.Attribute), 'attribute node expected'
return '%s.%s' % (node.value.id, node.attr)
def do_attribute(self, node):
if not isinstance(node.value, ast.Name):
valid = False
else:
key = self.get_attr_key(node)
valid = key in self.context or key in self.allowed_values
if not valid:
raise SyntaxError('invalid expression: %s' % key)
if key in self.context:
result = self.context[key]
else:
result = self.allowed_values[key]
return result
def do_boolop(self, node):
result = self.evaluate(node.values[0])
is_or = node.op.__class__ is ast.Or
is_and = node.op.__class__ is ast.And
assert is_or or is_and
if (is_and and result) or (is_or and not result):
for n in node.values[1:]:
result = self.evaluate(n)
if (is_or and result) or (is_and and not result):
break
return result
def do_compare(self, node):
def sanity_check(lhsnode, rhsnode):
valid = True
if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str):
valid = False
#elif (isinstance(lhsnode, ast.Attribute)
# and isinstance(rhsnode, ast.Attribute)):
# klhs = self.get_attr_key(lhsnode)
# krhs = self.get_attr_key(rhsnode)
# valid = klhs != krhs
if not valid:
s = self.get_fragment(node.col_offset)
raise SyntaxError('Invalid comparison: %s' % s)
lhsnode = node.left
lhs = self.evaluate(lhsnode)
result = True
for op, rhsnode in zip(node.ops, node.comparators):
sanity_check(lhsnode, rhsnode)
op = op.__class__.__name__.lower()
if op not in self.operators:
raise SyntaxError('unsupported operation: %r' % op)
rhs = self.evaluate(rhsnode)
result = self.operators[op](lhs, rhs)
if not result:
break
lhs = rhs
lhsnode = rhsnode
return result
def do_expression(self, node):
return self.evaluate(node.body)
def do_name(self, node):
valid = False
if node.id in self.context:
valid = True
result = self.context[node.id]
elif node.id in self.allowed_values:
valid = True
result = self.allowed_values[node.id]
if not valid:
raise SyntaxError('invalid expression: %s' % node.id)
return result
def do_str(self, node):
return node.s
def interpret(marker, execution_context=None):
"""
Interpret a marker and return a result depending on environment.
:param marker: The marker to interpret.
:type marker: str
:param execution_context: The context used for name lookup.
:type execution_context: mapping
"""
return Evaluator(execution_context).evaluate(marker.strip())
| artistic-2.0 |
fighterCui/L4ReFiascoOC | l4/pkg/python/contrib/Lib/distutils/command/bdist_msi.py | 2 | 31388 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005, 2006 Martin v. Löwis
# Licensed to PSF under a Contributor Agreement.
# The bdist_wininst command proper
# based on bdist_wininst
"""
Implements the bdist_msi command.
"""
import sys, os
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils.sysconfig import get_python_version
from distutils.version import StrictVersion
from distutils.errors import DistutilsOptionError
from distutils.util import get_platform
from distutils import log
import msilib
from msilib import schema, sequence, text
from msilib import Directory, Feature, Dialog, add_data
class PyDialog(Dialog):
"""Dialog class with a fixed layout: controls at the top, then a ruler,
then a list of buttons: back, next, cancel. Optionally a bitmap at the
left."""
def __init__(self, *args, **kw):
"""Dialog(database, name, x, y, w, h, attributes, title, first,
default, cancel, bitmap=true)"""
Dialog.__init__(self, *args)
ruler = self.h - 36
bmwidth = 152*ruler/328
#if kw.get("bitmap", True):
# self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
self.line("BottomLine", 0, ruler, self.w, 0)
def title(self, title):
"Set the title text of the dialog at the top."
# name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
# text, in VerdanaBold10
self.text("Title", 15, 10, 320, 60, 0x30003,
r"{\VerdanaBold10}%s" % title)
def back(self, title, next, name = "Back", active = 1):
"""Add a back button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
def cancel(self, title, next, name = "Cancel", active = 1):
"""Add a cancel button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
def next(self, title, next, name = "Next", active = 1):
"""Add a Next button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
def xbutton(self, name, title, next, xpos):
"""Add a button with a given title, the tab-next button,
its name in the Control table, giving its x position; the
y-position is aligned with the other buttons.
Return the button, so that events can be associated"""
return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
class bdist_msi (Command):
description = "create a Microsoft Installer (.msi) binary distribution"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.skip_build = 0
self.install_script = None
self.pre_install_script = None
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'msi')
short_version = get_python_version()
if self.target_version:
if not self.skip_build and self.distribution.has_ext_modules()\
and self.target_version != short_version:
raise DistutilsOptionError, \
"target version can only be %s, or the '--skip_build'" \
" option must be specified" % (short_version,)
else:
self.target_version = short_version
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
)
if self.pre_install_script:
raise DistutilsOptionError, "the pre-install-script feature is not yet implemented"
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError, \
"install_script '%s' not found in scripts" % \
self.install_script
self.install_script_key = None
# finalize_options()
def run (self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.prefix = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (self.plat_name, target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
self.mkpath(self.dist_dir)
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
installer_name = os.path.abspath(installer_name)
if os.path.exists(installer_name): os.unlink(installer_name)
metadata = self.distribution.metadata
author = metadata.author
if not author:
author = metadata.maintainer
if not author:
author = "UNKNOWN"
version = metadata.get_version()
# ProductVersion must be strictly numeric
# XXX need to deal with prerelease versions
sversion = "%d.%d.%d" % StrictVersion(version).version
# Prefix ProductName with Python x.y, so that
# it sorts together with the other Python packages
# in Add-Remove-Programs (APR)
product_name = "Python %s %s" % (self.target_version,
self.distribution.get_fullname())
self.db = msilib.init_database(installer_name, schema,
product_name, msilib.gen_uuid(),
sversion, author)
msilib.add_tables(self.db, sequence)
props = [('DistVersion', version)]
email = metadata.author_email or metadata.maintainer_email
if email:
props.append(("ARPCONTACT", email))
if metadata.url:
props.append(("ARPURLINFOABOUT", metadata.url))
if props:
add_data(self.db, 'Property', props)
self.add_find_python()
self.add_files()
self.add_scripts()
self.add_ui()
self.db.Commit()
if hasattr(self.distribution, 'dist_files'):
self.distribution.dist_files.append(('bdist_msi', self.target_version, fullname))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
def add_files(self):
db = self.db
cab = msilib.CAB("distfiles")
f = Feature(db, "default", "Default Feature", "Everything", 1, directory="TARGETDIR")
f.set_current()
rootdir = os.path.abspath(self.bdist_dir)
root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir")
db.Commit()
todo = [root]
while todo:
dir = todo.pop()
for file in os.listdir(dir.absolute):
afile = os.path.join(dir.absolute, file)
if os.path.isdir(afile):
newdir = Directory(db, cab, dir, file, file, "%s|%s" % (dir.make_short(file), file))
todo.append(newdir)
else:
key = dir.add_file(file)
if file==self.install_script:
if self.install_script_key:
raise DistutilsOptionError, "Multiple files with name %s" % file
self.install_script_key = '[#%s]' % key
cab.commit(db)
def add_find_python(self):
"""Adds code to the installer to compute the location of Python.
Properties PYTHON.MACHINE, PYTHON.USER, PYTHONDIR and PYTHON will be set
in both the execute and UI sequences; PYTHONDIR will be set from
PYTHON.USER if defined, else from PYTHON.MACHINE.
PYTHON is PYTHONDIR\python.exe"""
install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % self.target_version
if msilib.Win64:
# type: msidbLocatorTypeRawValue + msidbLocatorType64bit
Type = 2+16
else:
Type = 2
add_data(self.db, "RegLocator",
[("python.machine", 2, install_path, None, Type),
("python.user", 1, install_path, None, Type)])
add_data(self.db, "AppSearch",
[("PYTHON.MACHINE", "python.machine"),
("PYTHON.USER", "python.user")])
add_data(self.db, "CustomAction",
[("PythonFromMachine", 51+256, "PYTHONDIR", "[PYTHON.MACHINE]"),
("PythonFromUser", 51+256, "PYTHONDIR", "[PYTHON.USER]"),
("PythonExe", 51+256, "PYTHON", "[PYTHONDIR]\\python.exe"),
("InitialTargetDir", 51+256, "TARGETDIR", "[PYTHONDIR]")])
add_data(self.db, "InstallExecuteSequence",
[("PythonFromMachine", "PYTHON.MACHINE", 401),
("PythonFromUser", "PYTHON.USER", 402),
("PythonExe", None, 403),
("InitialTargetDir", 'TARGETDIR=""', 404),
])
add_data(self.db, "InstallUISequence",
[("PythonFromMachine", "PYTHON.MACHINE", 401),
("PythonFromUser", "PYTHON.USER", 402),
("PythonExe", None, 403),
("InitialTargetDir", 'TARGETDIR=""', 404),
])
def add_scripts(self):
if self.install_script:
add_data(self.db, "CustomAction",
[("install_script", 50, "PYTHON", self.install_script_key)])
add_data(self.db, "InstallExecuteSequence",
[("install_script", "NOT Installed", 6800)])
if self.pre_install_script:
scriptfn = os.path.join(self.bdist_dir, "preinstall.bat")
f = open(scriptfn, "w")
# The batch file will be executed with [PYTHON], so that %1
# is the path to the Python interpreter; %0 will be the path
# of the batch file.
# rem ="""
# %1 %0
# exit
# """
# <actual script>
f.write('rem ="""\n%1 %0\nexit\n"""\n')
f.write(open(self.pre_install_script).read())
f.close()
add_data(self.db, "Binary",
[("PreInstall", msilib.Binary(scriptfn))
])
add_data(self.db, "CustomAction",
[("PreInstall", 2, "PreInstall", None)
])
add_data(self.db, "InstallExecuteSequence",
[("PreInstall", "NOT Installed", 450)])
def add_ui(self):
db = self.db
x = y = 50
w = 370
h = 300
title = "[ProductName] Setup"
# see "Dialog Style Bits"
modal = 3 # visible | modal
modeless = 1 # visible
track_disk_space = 32
# UI customization properties
add_data(db, "Property",
# See "DefaultUIFont Property"
[("DefaultUIFont", "DlgFont8"),
# See "ErrorDialog Style Bit"
("ErrorDialog", "ErrorDlg"),
("Progress1", "Install"), # modified in maintenance type dlg
("Progress2", "installs"),
("MaintenanceForm_Action", "Repair"),
# possible values: ALL, JUSTME
("WhichUsers", "ALL")
])
# Fonts, see "TextStyle Table"
add_data(db, "TextStyle",
[("DlgFont8", "Tahoma", 9, None, 0),
("DlgFontBold8", "Tahoma", 8, None, 1), #bold
("VerdanaBold10", "Verdana", 10, None, 1),
("VerdanaRed9", "Verdana", 9, 255, 0),
])
# UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
# Numbers indicate sequence; see sequence.py for how these action integrate
add_data(db, "InstallUISequence",
[("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
# In the user interface, assume all-users installation if privileged.
("SelectDirectoryDlg", "Not Installed", 1230),
# XXX no support for resume installations yet
#("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
("ProgressDlg", None, 1280)])
add_data(db, 'ActionText', text.ActionText)
add_data(db, 'UIText', text.UIText)
#####################################################################
# Standard dialogs: FatalError, UserExit, ExitDialog
fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
fatal.title("[ProductName] Installer ended prematurely")
fatal.back("< Back", "Finish", active = 0)
fatal.cancel("Cancel", "Back", active = 0)
fatal.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
fatal.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c=fatal.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
user_exit.title("[ProductName] Installer was interrupted")
user_exit.back("< Back", "Finish", active = 0)
user_exit.cancel("Cancel", "Back", active = 0)
user_exit.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup was interrupted. Your system has not been modified. "
"To install this program at a later time, please run the installation again.")
user_exit.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = user_exit.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
exit_dialog.title("Completing the [ProductName] Installer")
exit_dialog.back("< Back", "Finish", active = 0)
exit_dialog.cancel("Cancel", "Back", active = 0)
exit_dialog.text("Description", 15, 235, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = exit_dialog.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Return")
#####################################################################
# Required dialog: FilesInUse, ErrorDlg
inuse = PyDialog(db, "FilesInUse",
x, y, w, h,
19, # KeepModeless|Modal|Visible
title,
"Retry", "Retry", "Retry", bitmap=False)
inuse.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Files in Use")
inuse.text("Description", 20, 23, 280, 20, 0x30003,
"Some files that need to be updated are currently in use.")
inuse.text("Text", 20, 55, 330, 50, 3,
"The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
None, None, None)
c=inuse.back("Exit", "Ignore", name="Exit")
c.event("EndDialog", "Exit")
c=inuse.next("Ignore", "Retry", name="Ignore")
c.event("EndDialog", "Ignore")
c=inuse.cancel("Retry", "Exit", name="Retry")
c.event("EndDialog","Retry")
# See "Error Dialog". See "ICE20" for the required names of the controls.
error = Dialog(db, "ErrorDlg",
50, 10, 330, 101,
65543, # Error|Minimize|Modal|Visible
title,
"ErrorText", None, None)
error.text("ErrorText", 50,9,280,48,3, "")
#error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
#####################################################################
# Global "Query Cancel" dialog
cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
"No", "No", "No")
cancel.text("Text", 48, 15, 194, 30, 3,
"Are you sure you want to cancel [ProductName] installation?")
#cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
# "py.ico", None, None)
c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
c.event("EndDialog", "Exit")
c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Global "Wait for costing" dialog
costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
"Return", "Return", "Return")
costing.text("Text", 48, 15, 194, 30, 3,
"Please wait while the installer finishes determining your disk space requirements.")
c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
c.event("EndDialog", "Exit")
#####################################################################
# Preparation dialog: no user input except cancellation
prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel")
prep.text("Description", 15, 70, 320, 40, 0x30003,
"Please wait while the Installer prepares to guide you through the installation.")
prep.title("Welcome to the [ProductName] Installer")
c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...")
c.mapping("ActionText", "Text")
c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None)
c.mapping("ActionData", "Text")
prep.back("Back", None, active=0)
prep.next("Next", None, active=0)
c=prep.cancel("Cancel", None)
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Target directory selection
seldlg = PyDialog(db, "SelectDirectoryDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
seldlg.title("Select Destination Directory")
version = sys.version[:3]+" "
seldlg.text("Hint", 15, 30, 300, 40, 3,
"The destination directory should contain a Python %sinstallation" % version)
seldlg.back("< Back", None, active=0)
c = seldlg.next("Next >", "Cancel")
c.event("SetTargetPath", "TARGETDIR", ordering=1)
c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=2)
c.event("EndDialog", "Return", ordering=3)
c = seldlg.cancel("Cancel", "DirectoryCombo")
c.event("SpawnDialog", "CancelDlg")
seldlg.control("DirectoryCombo", "DirectoryCombo", 15, 70, 272, 80, 393219,
"TARGETDIR", None, "DirectoryList", None)
seldlg.control("DirectoryList", "DirectoryList", 15, 90, 308, 136, 3, "TARGETDIR",
None, "PathEdit", None)
seldlg.control("PathEdit", "PathEdit", 15, 230, 306, 16, 3, "TARGETDIR", None, "Next", None)
c = seldlg.pushbutton("Up", 306, 70, 18, 18, 3, "Up", None)
c.event("DirectoryListUp", "0")
c = seldlg.pushbutton("NewDir", 324, 70, 30, 18, 3, "New", None)
c.event("DirectoryListNew", "0")
#####################################################################
# Disk cost
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
"OK", "OK", "OK", bitmap=False)
cost.text("Title", 15, 6, 200, 15, 0x30003,
"{\DlgFontBold8}Disk Space Requirements")
cost.text("Description", 20, 20, 280, 20, 0x30003,
"The disk space required for the installation of the selected features.")
cost.text("Text", 20, 53, 330, 60, 3,
"The highlighted volumes (if any) do not have enough disk space "
"available for the currently selected features. You can either "
"remove some files from the highlighted volumes, or choose to "
"install less features onto local drive(s), or select different "
"destination drive(s).")
cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
None, "{120}{70}{70}{70}{70}", None, None)
cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
#####################################################################
# WhichUsers Dialog. Only available on NT, and for privileged users.
# This must be run before FindRelatedProducts, because that will
# take into account whether the previous installation was per-user
# or per-machine. We currently don't support going back to this
# dialog after "Next" was selected; to support this, we would need to
# find how to reset the ALLUSERS property, and how to re-run
# FindRelatedProducts.
# On Windows9x, the ALLUSERS property is ignored on the command line
# and in the Property table, but installer fails according to the documentation
# if a dialog attempts to set ALLUSERS.
whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
"AdminInstall", "Next", "Cancel")
whichusers.title("Select whether to install [ProductName] for all users of this computer.")
# A radio group with two options: allusers, justme
g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3,
"WhichUsers", "", "Next")
g.add("ALL", 0, 5, 150, 20, "Install for all users")
g.add("JUSTME", 0, 25, 150, 20, "Install just for me")
whichusers.back("Back", None, active=0)
c = whichusers.next("Next >", "Cancel")
c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
c.event("EndDialog", "Return", ordering = 2)
c = whichusers.cancel("Cancel", "AdminInstall")
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Installation Progress dialog (modeless)
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel", bitmap=False)
progress.text("Title", 20, 15, 200, 15, 0x30003,
"{\DlgFontBold8}[Progress1] [ProductName]")
progress.text("Text", 35, 65, 300, 30, 3,
"Please wait while the Installer [Progress2] [ProductName]. "
"This may take several minutes.")
progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
c.mapping("ActionText", "Text")
#c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
#c.mapping("ActionData", "Text")
c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
None, "Progress done", None, None)
c.mapping("SetProgress", "Progress")
progress.back("< Back", "Next", active=False)
progress.next("Next >", "Cancel", active=False)
progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
###################################################################
# Maintenance type: repair/uninstall
maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
maint.title("Welcome to the [ProductName] Setup Wizard")
maint.text("BodyText", 15, 63, 330, 42, 3,
"Select whether you want to repair or remove [ProductName].")
g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3,
"MaintenanceForm_Action", "", "Next")
#g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
maint.back("< Back", None, active=False)
c=maint.next("Finish", "Cancel")
# Change installation: Change progress dialog to "Change", then ask
# for feature selection
#c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
#c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
# Reinstall: Change progress dialog to "Repair", then invoke reinstall
# Also set list of reinstalled features to "ALL"
c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
# Uninstall: Change progress to "Remove", then invoke uninstall
# Also set list of removed features to "ALL"
c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
# Close dialog when maintenance action scheduled
c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
#c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name,
self.target_version)
installer_name = os.path.join(self.dist_dir, base_name)
return installer_name
| gpl-2.0 |
Azure/azure-sdk-for-python | sdk/communication/azure-communication-sms/samples/send_sms_to_multiple_recipients_sample_async.py | 1 | 2517 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: send_sms_to_multiple_recipients_sample_async.py
DESCRIPTION:
This sample demonstrates sending an SMS message to multiple recipients. The SMS client is
authenticated using a connection string.
USAGE:
python send_sms_to_multiple_recipients_sample_async.py
Set the environment variable with your own value before running the sample:
1) COMMUNICATION_SAMPLES_CONNECTION_STRING - the connection string in your ACS resource
2) AZURE_PHONE_NUMBER - a phone number with SMS capabilities in your ACS resource
"""
import os
import sys
import asyncio
from azure.communication.sms.aio import SmsClient
sys.path.append("..")
class SmsMultipleRecipientsSampleAsync(object):
connection_string = os.getenv("COMMUNICATION_SAMPLES_CONNECTION_STRING")
phone_number = os.getenv("AZURE_PHONE_NUMBER")
async def send_sms_to_multiple_recipients_async(self):
sms_client = SmsClient.from_connection_string(self.connection_string)
async with sms_client:
try:
# calling send() with sms values
sms_responses = await sms_client.send(
from_=self.phone_number,
to=[self.phone_number, self.phone_number],
message="Hello World via SMS",
enable_delivery_report=True, # optional property
tag="custom-tag") # optional property
for sms_response in sms_responses:
if (sms_response.successful):
print("Message with message id {} was successful sent to {}"
.format(sms_response.message_id, sms_response.to))
else:
print("Message failed to send to {} with the status code {} and error: {}"
.format(sms_response.to, sms_response.http_status_code, sms_response.error_message))
except Exception:
print(Exception)
pass
if __name__ == '__main__':
sample = SmsMultipleRecipientsSampleAsync()
loop = asyncio.get_event_loop()
loop.run_until_complete(sample.send_sms_to_multiple_recipients_async())
| mit |
chyeh727/django | django/core/serializers/json.py | 320 | 3782 | """
Serialize data to/from JSON
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import, unicode_literals
import datetime
import decimal
import json
import sys
import uuid
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.utils import six
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def _init_options(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'use_decimal': False})
self._current = None
self.json_kwargs = self.options.copy()
self.json_kwargs.pop('stream', None)
self.json_kwargs.pop('fields', None)
if self.options.get('indent'):
# Prevent trailing spaces
self.json_kwargs['separators'] = (',', ': ')
def start_serialization(self):
self._init_options()
self.stream.write("[")
def end_serialization(self):
if self.options.get("indent"):
self.stream.write("\n")
self.stream.write("]")
if self.options.get("indent"):
self.stream.write("\n")
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.get_dump_object(obj), self.stream,
cls=DjangoJSONEncoder, **self.json_kwargs)
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(stream_or_string, (bytes, six.string_types)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
try:
objects = json.loads(stream_or_string)
for obj in PythonDeserializer(objects, **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
class DjangoJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time, decimal types and UUIDs.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
elif isinstance(o, uuid.UUID):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| bsd-3-clause |
daniel-de-vries/OpenLEGO | openlego/docs/_exts/tags.py | 1 | 2483 | # tag.py, this custom Sphinx extension is activated in conf.py
# and allows the use of the custom directive for tags in our rst (e.g.):
# .. tags:: tag1, tag2, tag3
from sphinx.util.compat import Directive
from docutils.parsers.rst.directives.admonitions import Admonition
from docutils import nodes
from sphinx.locale import _
# The setup function for the Sphinx extension
def setup(app):
# This adds a new node class to build sys, with custom functs, (same name as file)
app.add_node(tag, html=(visit_tag_node, depart_tag_node))
# This creates a new ".. tags:: " directive in Sphinx
app.add_directive('tags', TagDirective)
# These are event handlers, functions connected to events.
app.connect('doctree-resolved', process_tag_nodes)
app.connect('env-purge-doc', purge_tags)
# Identifies the version of our extension
return {'version': '0.1'}
def visit_tag_node(self, node):
self.visit_admonition(node)
def depart_tag_node(self, node):
self.depart_admonition(node)
def purge_tags(app, env, docname):
return
def process_tag_nodes(app, doctree, fromdocname):
env = app.builder.env
class tag (nodes.Admonition, nodes.Element):
pass
class TagDirective(Directive):
# This allows content in the directive, e.g. to list tags here
has_content = True
def run(self):
env = self.state.document.settings.env
targetid = "tag-%d" % env.new_serialno('tag')
targetnode = nodes.target('', '', ids=[targetid])
# The tags fetched from the custom directive are one piece of text
# sitting in self.content[0]
taggs = self.content[0].split(", ")
links = []
for tagg in taggs:
# Create rst hyperlinks of format `Python <http://www.python.org/>`_.
import os
cwd = os.getcwd()
html_dir = os.path.join(cwd, "_build", "html")
link = "`" + tagg + " <" + html_dir + os.sep + "tags" + os.sep + tagg + ".html>`_ "
links.append(link)
# Put links back in a single comma-separated string together
linkjoin = ", ".join(links)
# Replace content[0] with hyperlinks to display in admonition
self.content[0] = linkjoin
ad = Admonition(self.name, [_('Tags')], self.options,
self.content, self.lineno, self.content_offset,
self.block_text, self.state, self.state_machine)
return [targetnode] + ad.run()
| apache-2.0 |
tigeorgia/GeorgiaCorporationScraper | registry/terms.py | 1 | 18013 | # -*- coding: utf-8 -*-
positions = set([u"პარტნიორი",
u"შეზღუდული პარტნიორები",
u"სრული პარტნიორები",
u"ხელმძღვანელი",
u"კომანდიტი",
u"კომპლემენტარი",
u"დამფუძნებელი/დამფუძნებლები",
u"დირექტორი",
u"წარმომადგენელი",
u"პარტნიორთა კრება",
u"დირექტორი",
u"დამფუძნებელი",
u"პარტნიორები",
u"ინდივიდუალური მეწარმე",
u"დამფუძნებელი",
u"გამგეობის წევრი",
u"ხელმძღვანელობაზე/წარმომადგენლობაზე უფლებამოსილი პირები",
u"თავმჯდომარე",
u"პრეზიდენტი",
u"გამგეობის წევრი",
u"მოადგილე",
u"გამგეობის წევრი",
u"მენეჯერი",
u"ხელმძღვანელი",
u"დირექტორი",
u"გენერალური დირექტორი",
u"საოპერაციო დირექტორი",
u"დირექტორი",
u"წარმომადგენელი",
u"ხელმძღვანელი",
u"თავმჯდომარე",
u"დამფუძნებელი",
u"დამფუძნებელი",
u"აქციონერი/აქციონერები",
u"აღმასრულებელი დირექტორი",
u"განმცხადებელი",
u"სამეთვალყურეო საბჭოს წევრი",
u"სავაჭრო წარმომადგენელი",
u"წარმომადგენელთა კრების წევრი",])
nationalities = set([
u"ავსტრალია",
u"ავსტრია",
u"ავღანეთი",
u"აზერბაიჯანი",
u"ალბანეთი",
u"ალჟირი",
u"ამერიკის სამოა",
u"ამერიკის ვირჯინიის კუნძულები",
u"ამერიკის შეერთებული შტატები",
u"ანგილია",
u"ანგოლა",
u"ანდორა",
u"ანტიგუა და ბარბუდა",
u"არაბთა გაერთიანებული საამიროები",
u"არგენტინა",
u"არუბა",
u"აღმოსავლეთი ტიმორი",
u"ახალი ზელანდია",
u"ახალი კალედონია",
u"ბანგლადეში",
u"ბარბადოსი",
u"ბასას-და-ინდია",
u"ბაჰამის კუნძულები",
u"ბაჰრეინი",
u"ბელარუსი",
u"ბელგია",
u"ბელიზი",
u"ბენინი",
u"ბერმუდა",
u"ბოლივია",
u"ბოსნია და ჰერცეგოვინა",
u"ბოტსვანა",
u"ბრაზილია",
u"ბრიტანეთის ვირჯინიის კუნძულები",
u"ბრიტანეთის ინდოეთის ოკეანის ტერიტორია",
u"ბრუნეი",
u"ბულგარეთი",
u"ბურკინა ფასო",
u"ბურუნდი",
u"ბუვე",
u"ბჰუტანი",
u"გაბონი",
u"გაიანა",
u"გამბია",
u"განა",
u"გერმანია",
u"გვადელუპა",
u"გვატემალა",
u"გვინეა",
u"გვინეა-ბისაუ",
u"გიბრალტარი",
u"გლორიოზოს კუნძულები",
u"გრენადა",
u"გრენლანდია",
u"გუამი",
u"დანია",
u"დიდი ბრიტანეთი",
u"დომინიკელთა რესპუბლიკა",
u"დომინიკა",
u"ეგვიპტე",
u"ევროპა (კუნძული)",
u"ეთიოპია",
u"ეკვადორი",
u"ეკვატორული გვინეა",
u"ერაყი",
u"ერიტრეა",
u"ესპანეთი",
u"ესტონეთი",
u"ეშმორის და კარტიეს კუნძულები",
u"ვალისი და ფუტუნა",
u"ვანუატუ",
u"ვატიკანი",
u"ვენესუელა",
u"ვიეტნამი",
u"ზამბია",
u"ზიმბაბვე",
u"თურქეთი",
u"თურქმენეთი",
u"იამაიკა",
u"იან მაიენი",
u"იაპონია",
u"იემენი",
u"ინდოეთი",
u"ინდონეზია",
u"იორდანია",
u"ირანი",
u"ირლანდია",
u"ისლანდია",
u"ისრაელი",
u"იტალია",
u"კაბო-ვერდე",
u"კაიმანის კუნძულები",
u"კამბოჯა",
u"კამერუნი",
u"კანადა",
u"კატარი",
u"კენია",
u"კვიპროსი",
u"კინგმენის რიფი",
u"კირიბატი",
u"კლიპერტონი (კუნძული)",
u"ქოქოსის კუნძულები",
u"კოლუმბია",
u"კომორის კუნძულები",
u"კონგოს დემოკრატიული რესპუბლიკა",
u"კონგოს რესპუბლიკა",
u"კორეის რესპუბლიკა",
u"ჩრდილოეთი კორეა",
u"კოსტა-რიკა",
u"კოტ-დ’ივუარი",
u"კუბა",
u"კუკის კუნძულები",
u"ლაოსი",
u"ლატვია",
u"ლესოთო",
u"ლიბანი",
u"ლიბერია",
u"ლიბია",
u"ლიტვა",
u"ლიხტენშტაინი",
u"ლუქსემბურგი",
u"მადაგასკარი",
u"მავრიკი",
u"მავრიტანია",
u"მაიოტა",
u"მაკაო",
u"მაკედონია",
u"მალავი",
u"მალაიზია",
u"მალდივი",
u"მალი",
u"მალტა",
u"მაროკო",
u"მარშალის კუნძულები",
u"მარჯნის ზღვის კუნძულები",
u"მექსიკა",
u"მიანმარი",
u"მიკრონეზია",
u"მოზამბიკი",
u"მოლდოვა",
u"მონაკო",
u"მონსერატი",
u"მონტენეგრო",
u"მონღოლეთი",
u"ნამიბია",
u"ნაურუ",
u"ნეპალი",
u"ნიგერი",
u"ნიგერია",
u"ნიდერლანდი",
u"ნიდერლანდის ანტილები",
u"ნიკარაგუა",
u"ნიუე",
u"ნორვეგია",
u"ნორფოლკის კუნძული",
u"ომანი",
u"პაკისტანი",
u"პალაუ",
u"პალმირა (ატოლი)",
u"პანამა",
u"პაპუა-ახალი გვინეა",
u"პარაგვაი",
u"პერუ",
u"პიტკერნის კუნძულები",
u"პოლონეთი",
u"პორტუგალია",
u"პრინც-ედუარდის კუნძული",
u"პუერტო-რიკო",
u"ჟუან-დი-ნოვა",
u"რეუნიონი",
u"რუანდა",
u"რუმინეთი",
u"რუსეთი",
u"საბერძნეთი",
u"სალვადორი",
u"სამოა",
u"სამხრეთ აფრიკის რესპუბლიკა",
u"სამხრეთი გეორგია და სამხრეთ სენდვიჩის კუნძულები",
u"სამხრეთი სუდანი",
u"სან-მარინო",
u"სან-ტომე და პრინსიპი",
u"საუდის არაბეთი",
u"საფრანგეთი",
u"საფრანგეთის გვიანა",
u"საფრანგეთის პოლინეზია",
u"საფრანგეთის სამხრეთი პოლარული მიწები",
u"საქართველო",
u"სეიშელის კუნძულები",
u"სენეგალი",
u"სენ-პიერი და მიკელონი",
u"სენტ-ვინსენტი და გრენადინები",
u"სენტ-კიტსი და ნევისი",
u"სენტ-ლუსია",
u"სერბია და მონტენეგრო",
u"სეუტა",
u"სვაზილენდი",
u"სვალბარდი",
u"სიერა-ლეონე",
u"სინგაპური",
u"სირია",
u"სლოვაკეთი",
u"სლოვენია",
u"სოლომონის კუნძულები",
u"სომალი",
u"სომხეთი",
u"სუდანი",
u"სურინამი",
u"ტაივანი",
u"ტაილანდი",
u"ტანზანია",
u"ტაჯიკეთი",
u"ტორკსის და კაიკოსის კუნძულები",
u"ტოგო",
u"ტოკელაუ",
u"ტონგა",
u"ტრინიდადი და ტობაგო",
u"ტრომელინი (კუნძული)",
u"ტუვალუ",
u"ტუნისი",
u"უგანდა",
u"უზბეკეთი",
u"უკრაინა",
u"უნგრეთი",
u"ურუგვაი",
u"ფარერის კუნძულები",
u"ფილიპინები",
u"ფინეთი",
u"ფიჯი",
u"ფოლკლენდის კუნძულები",
u"ქუვეითი",
u"ღაზის სექტორი",
u"ყაზახეთი",
u"ყირგიზეთი",
u"შვეიცარია",
u"შვედეთი",
u"შობის კუნძული",
u"შრი-ლანკა",
u"ჩადი",
u"ჩეხეთი",
u"ჩილე",
u"ჩინეთი",
u"ჩრდილოეთი მარიანას კუნძულები",
u"ცენტრალური აფრიკის რესპუბლიკა",
u"წმინდა ელენეს კუნძული",
u"წყნარი ოკეანის კუნძულები",
u"ხორვატია",
u"ჯონსტრონი (ატოლი)",
u"ჯერვისი (კუნძული)",
u"ჯერსი",
u"ჯიბუტი",
u"ჰაიტი",
u"ჰონდურასი",
u"ჰონკონგი",
u"ჰოულენდი (კუნძული)",
u"ჰერდი და მაკდონალდის კუნძულები",
])
demonyms = set([
u"ავსტრალიელი",
u"ავსტრიელი",
u"ავღანელი",
u"აზერბაიჯანელი",
u"ალბანელი",
u"ალჟირელი",
u"სამოელი",
u"ვირჯინიელი",
u"ამერიკელი",
u"ანგლიელი",
u"ანგოლელი",
u"ანდორელი",
u"ანტიგუელი",
u"არაბი",
u"არგენტინელი",
u"არუბელი",
u"ტიმორელი",
u"ახალი ზელანდიელი",
u"კალედონიელი",
u"ბანგლადეშელი",
u"ბარბადოსელი",
u"ბაჰამელი",
u"ბაჰრეინელი",
u"ბელარუსი",
u"ბელგიელი",
u"ბელიზელი",
u"ბენინელი",
u"ბერმუდელი",
u"ბოლივიელი",
u"ბოსნიელი",
u"ბოსტვანელი",
u"ბრაზილიელი",
u"ბრუნეელი",
u"ბულგარელი",
u"ბურუნდიელი",
u"ბჰუტანელი",
u"გაბონელი",
u"გაიანაელი",
u"გამბიელი",
u"განელი",
u"გერმანელი",
u"გვადელუპელი",
u"გვატემალელი",
u"გვინეელი",
u"გიბრალტარელი",
u"გრენადელი",
u"გრენლანდიელი",
u"გუემელი",
u"დანიელი",
u"ინგლისელი",
u"დომინიკელი",
u"ეგვიპტელი",
u"ევროპელი",
u"ეთიოპიელი",
u"ეკვადორელი",
u"ერაყელი",
u"ერიტრიელი",
u"ესპანელი",
u"ესტონელი",
u"ვანუატუელი",
u"ვატიკანელი",
u"ვენესუელელი",
u"ვიეტნამელი",
u"ზამბიელი",
u"თურქი",
u"თურქმენი",
u"იამაიკელი",
u"იაპონელი",
u"იემენელი",
u"ინდოელი",
u"ინდონეზიელი",
u"იორდანიელი",
u"ირანელი",
u"ირლანდიელი",
u"ისლანდიელი",
u"ებრაელი",
u"იტალიელი",
u"კამბოჯელი",
u"კამერუნელი",
u"კანადელი",
u"კატარელი",
u"კენიელი",
u"კვიპროსელი",
u"კირიბატელი",
u"კოლუმბიელი",
u"კონგოელი",
u"კონგოელი",
u"კორეელი",
u"ჩრდილო კორეელი",
u"კოსტა–რიკელი",
u"კორტ–დივუარელი",
u"კუბელი",
u"ლაოსელი",
u"ლატვიელი",
u"ლიბანელი",
u"ლიბერიელი",
u"ლიბიელი",
u"ლიტველი",
u"ლუქსემბურგელი",
u"მავრიტანიელი",
u"მაკაოელი",
u"მაკედონიელი",
u"მალაველი",
u"მალაიზიელი",
u"მალდიველი",
u"მალტელი",
u"მაროკოელი",
u"მექსიკელი",
u"მიკრონეზიელი",
u"მოზამბიკელი",
u"მოლდოველი",
u"მონაკოელი",
u"მონტენეგროელი",
u"მონღოლი",
u"ნამიბიელი",
u"ნაურუელი",
u"ნეპალელი",
u"ნიგერიელი",
u"ჰოლანდიელი",
u"ნიკარაგუელი",
u"ნორვეგიელი",
u"ომანელი",
u"პაკისტანელი",
u"პალაუელი",
u"პალმიელი",
u"პანამელი",
u"პარაგვაელი",
u"პეუელი",
u"პოლონელი",
u"პორტუგალიელი",
u"პუერტო–რიკოელი",
u"რუანდელი",
u"რუმინელი",
u"რუსი",
u"ბერძენი",
u"სალვადორელი",
u"სამოელი",
u"აფრიკელი",
u"სამხრეთ სუდანელი",
u"სან–მარინოელი",
u"არაბი",
u"ფრანგი",
u"ქართველი",
u"სენეგალელი",
u"სენტ–ლუსიელი",
u"სერბი",
u"სვალბარდიელი",
u"სიერა–ლეონელი",
u"სინგაპურელი",
u"სირიელი",
u"სლოვაკი",
u"სლოვენიელი",
u"სომალელი",
u"სომეხი",
u"სუნდანელი",
u"სურინამელი",
u"ტაივანელი",
u"ტაილანდელი",
u"ტანზანიელი",
u"ტაჯიკი",
u"ტოგოელი",
u"ტოკელაუელი",
u"ტონგელი",
u"ტრინიდადელი",
u"ტრომელინელი",
u"ტუვალუელი",
u"ტუნისელი",
u"უგანდელი",
u"უზბეკი",
u"უკრაინელი",
u"უნგრელი",
u"ურუგვაელი",
u"ფილიპინელი",
u"ფინელი",
u"ქუვეითელი",
u"ღაზელი",
u"ყაზახი",
u"ყირგიზი",
u"შვიცარიელი",
u"შვედი",
u"შრი–ლანკელი",
u"ჩადელი",
u"ჩეხი",
u"ჩილელი",
u"ჩინელი",
u"ხორვატი",
u"ჯორსტრონელი",
u"ჯერვისელი",
u"ჯიბუტელი",
u"ჰაიტელი",
u"ჰონდურასელი",
u"ჰონკონგელი",
u"ჰოულენდელი",
])
| mit |
Jusedawg/SickRage | lib/unidecode/x057.py | 252 | 4631 | data = (
'Guo ', # 0x00
'Yin ', # 0x01
'Hun ', # 0x02
'Pu ', # 0x03
'Yu ', # 0x04
'Han ', # 0x05
'Yuan ', # 0x06
'Lun ', # 0x07
'Quan ', # 0x08
'Yu ', # 0x09
'Qing ', # 0x0a
'Guo ', # 0x0b
'Chuan ', # 0x0c
'Wei ', # 0x0d
'Yuan ', # 0x0e
'Quan ', # 0x0f
'Ku ', # 0x10
'Fu ', # 0x11
'Yuan ', # 0x12
'Yuan ', # 0x13
'E ', # 0x14
'Tu ', # 0x15
'Tu ', # 0x16
'Tu ', # 0x17
'Tuan ', # 0x18
'Lue ', # 0x19
'Hui ', # 0x1a
'Yi ', # 0x1b
'Yuan ', # 0x1c
'Luan ', # 0x1d
'Luan ', # 0x1e
'Tu ', # 0x1f
'Ya ', # 0x20
'Tu ', # 0x21
'Ting ', # 0x22
'Sheng ', # 0x23
'Pu ', # 0x24
'Lu ', # 0x25
'Iri ', # 0x26
'Ya ', # 0x27
'Zai ', # 0x28
'Wei ', # 0x29
'Ge ', # 0x2a
'Yu ', # 0x2b
'Wu ', # 0x2c
'Gui ', # 0x2d
'Pi ', # 0x2e
'Yi ', # 0x2f
'Di ', # 0x30
'Qian ', # 0x31
'Qian ', # 0x32
'Zhen ', # 0x33
'Zhuo ', # 0x34
'Dang ', # 0x35
'Qia ', # 0x36
'Akutsu ', # 0x37
'Yama ', # 0x38
'Kuang ', # 0x39
'Chang ', # 0x3a
'Qi ', # 0x3b
'Nie ', # 0x3c
'Mo ', # 0x3d
'Ji ', # 0x3e
'Jia ', # 0x3f
'Zhi ', # 0x40
'Zhi ', # 0x41
'Ban ', # 0x42
'Xun ', # 0x43
'Tou ', # 0x44
'Qin ', # 0x45
'Fen ', # 0x46
'Jun ', # 0x47
'Keng ', # 0x48
'Tun ', # 0x49
'Fang ', # 0x4a
'Fen ', # 0x4b
'Ben ', # 0x4c
'Tan ', # 0x4d
'Kan ', # 0x4e
'Pi ', # 0x4f
'Zuo ', # 0x50
'Keng ', # 0x51
'Bi ', # 0x52
'Xing ', # 0x53
'Di ', # 0x54
'Jing ', # 0x55
'Ji ', # 0x56
'Kuai ', # 0x57
'Di ', # 0x58
'Jing ', # 0x59
'Jian ', # 0x5a
'Tan ', # 0x5b
'Li ', # 0x5c
'Ba ', # 0x5d
'Wu ', # 0x5e
'Fen ', # 0x5f
'Zhui ', # 0x60
'Po ', # 0x61
'Pan ', # 0x62
'Tang ', # 0x63
'Kun ', # 0x64
'Qu ', # 0x65
'Tan ', # 0x66
'Zhi ', # 0x67
'Tuo ', # 0x68
'Gan ', # 0x69
'Ping ', # 0x6a
'Dian ', # 0x6b
'Gua ', # 0x6c
'Ni ', # 0x6d
'Tai ', # 0x6e
'Pi ', # 0x6f
'Jiong ', # 0x70
'Yang ', # 0x71
'Fo ', # 0x72
'Ao ', # 0x73
'Liu ', # 0x74
'Qiu ', # 0x75
'Mu ', # 0x76
'Ke ', # 0x77
'Gou ', # 0x78
'Xue ', # 0x79
'Ba ', # 0x7a
'Chi ', # 0x7b
'Che ', # 0x7c
'Ling ', # 0x7d
'Zhu ', # 0x7e
'Fu ', # 0x7f
'Hu ', # 0x80
'Zhi ', # 0x81
'Chui ', # 0x82
'La ', # 0x83
'Long ', # 0x84
'Long ', # 0x85
'Lu ', # 0x86
'Ao ', # 0x87
'Tay ', # 0x88
'Pao ', # 0x89
'[?] ', # 0x8a
'Xing ', # 0x8b
'Dong ', # 0x8c
'Ji ', # 0x8d
'Ke ', # 0x8e
'Lu ', # 0x8f
'Ci ', # 0x90
'Chi ', # 0x91
'Lei ', # 0x92
'Gai ', # 0x93
'Yin ', # 0x94
'Hou ', # 0x95
'Dui ', # 0x96
'Zhao ', # 0x97
'Fu ', # 0x98
'Guang ', # 0x99
'Yao ', # 0x9a
'Duo ', # 0x9b
'Duo ', # 0x9c
'Gui ', # 0x9d
'Cha ', # 0x9e
'Yang ', # 0x9f
'Yin ', # 0xa0
'Fa ', # 0xa1
'Gou ', # 0xa2
'Yuan ', # 0xa3
'Die ', # 0xa4
'Xie ', # 0xa5
'Ken ', # 0xa6
'Jiong ', # 0xa7
'Shou ', # 0xa8
'E ', # 0xa9
'Ha ', # 0xaa
'Dian ', # 0xab
'Hong ', # 0xac
'Wu ', # 0xad
'Kua ', # 0xae
'[?] ', # 0xaf
'Tao ', # 0xb0
'Dang ', # 0xb1
'Kai ', # 0xb2
'Gake ', # 0xb3
'Nao ', # 0xb4
'An ', # 0xb5
'Xing ', # 0xb6
'Xian ', # 0xb7
'Huan ', # 0xb8
'Bang ', # 0xb9
'Pei ', # 0xba
'Ba ', # 0xbb
'Yi ', # 0xbc
'Yin ', # 0xbd
'Han ', # 0xbe
'Xu ', # 0xbf
'Chui ', # 0xc0
'Cen ', # 0xc1
'Geng ', # 0xc2
'Ai ', # 0xc3
'Peng ', # 0xc4
'Fang ', # 0xc5
'Que ', # 0xc6
'Yong ', # 0xc7
'Xun ', # 0xc8
'Jia ', # 0xc9
'Di ', # 0xca
'Mai ', # 0xcb
'Lang ', # 0xcc
'Xuan ', # 0xcd
'Cheng ', # 0xce
'Yan ', # 0xcf
'Jin ', # 0xd0
'Zhe ', # 0xd1
'Lei ', # 0xd2
'Lie ', # 0xd3
'Bu ', # 0xd4
'Cheng ', # 0xd5
'Gomi ', # 0xd6
'Bu ', # 0xd7
'Shi ', # 0xd8
'Xun ', # 0xd9
'Guo ', # 0xda
'Jiong ', # 0xdb
'Ye ', # 0xdc
'Nian ', # 0xdd
'Di ', # 0xde
'Yu ', # 0xdf
'Bu ', # 0xe0
'Ya ', # 0xe1
'Juan ', # 0xe2
'Sui ', # 0xe3
'Pi ', # 0xe4
'Cheng ', # 0xe5
'Wan ', # 0xe6
'Ju ', # 0xe7
'Lun ', # 0xe8
'Zheng ', # 0xe9
'Kong ', # 0xea
'Chong ', # 0xeb
'Dong ', # 0xec
'Dai ', # 0xed
'Tan ', # 0xee
'An ', # 0xef
'Cai ', # 0xf0
'Shu ', # 0xf1
'Beng ', # 0xf2
'Kan ', # 0xf3
'Zhi ', # 0xf4
'Duo ', # 0xf5
'Yi ', # 0xf6
'Zhi ', # 0xf7
'Yi ', # 0xf8
'Pei ', # 0xf9
'Ji ', # 0xfa
'Zhun ', # 0xfb
'Qi ', # 0xfc
'Sao ', # 0xfd
'Ju ', # 0xfe
'Ni ', # 0xff
)
| gpl-3.0 |
woutersmet/Molmodsummer | lib/molmod/similarity.py | 1 | 5212 | # MolMod is a collection of molecular modelling tools for python.
# Copyright (C) 2007 - 2008 Toon Verstraelen <Toon.Verstraelen@UGent.be>
#
# This file is part of MolMod.
#
# MolMod is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# MolMod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from molmod.ext import similarity_table_labels, similarity_table_distances, similarity_measure
from molmod.molecular_graphs import MolecularGraph
from molmod.molecules import Molecule
import numpy
__all__ = ["DistanceDescriptor", "distances_cor", "distances_dm", "compute_similarity"]
class DistanceDescriptor(object):
def __init__(self, mol_or_graph, labels=None):
if labels is None:
self.labels = mol_or_graph.numbers.astype(numpy.int32)
else:
self.labels = labels.astype(numpy.int32)
self.table_labels = similarity_table_labels(self.labels)
if isinstance(mol_or_graph, Molecule):
self.table_distances = similarity_table_distances(mol_or_graph.distance_matrix)
elif isinstance(mol_or_graph, MolecularGraph):
self.table_distances = similarity_table_distances(mol_or_graph.distances)
#order = self.table_labels.argsort(axis=0,kind='heapsort')
order = numpy.lexsort([self.table_labels[:,1], self.table_labels[:,0]])
self.table_labels = self.table_labels[order]
self.table_distances = self.table_distances[order]
def similarity(self, other, margin=1.0, cutoff=10.0):
return similarity_measure(
self.table_labels, self.table_distances,
other.table_labels, other.table_distances,
margin, cutoff
)
def norm(self, margin=1.0, cutoff=10.0):
return numpy.sqrt(self.similarity(self, margin, cutoff))
def distances_cor(coordinates, labels):
"""Computes all interatomic distances, puts them into a table and sorts the table."""
N = len(coordinates)
if len(labels) != N:
raise SimilarityError("The number of labels must match the size of the molecule.")
all_distances = numpy.zeros((N*(N-1))/2, [("label1",int),("label2",int),("distance",float)])
counter = 0
for i1, l1 in enumerate(labels):
for i2, l2 in enumerate(labels[:i1]):
d = numpy.linalg.norm(coordinates[i1] - coordinates[i2])
if l1 < l2:
all_distances[counter] = (l1,l2,d)
else:
all_distances[counter] = (l2,l1,d)
counter += 1
all_distances.sort()
return all_distances
def distances_dm(distance_matrix, labels):
"""Loads all interatomic distances, puts them into a table and sorts the table."""
N = len(distance_matrix)
if len(labels) != N:
raise SimilarityError("The number of labels must match the size of the molecule.")
all_distances = numpy.zeros((N*(N-1))/2, [("label1",int),("label2",int),("distance",float)])
counter = 0
for i1, l1 in enumerate(labels):
for i2, l2 in enumerate(labels[:i1]):
d = distance_matrix[i1,i2]
if l1 < l2:
all_distances[counter] = (l1,l2,d)
else:
all_distances[counter] = (l2,l1,d)
counter += 1
all_distances.sort()
return all_distances
def compute_similarity(table_dist1, table_dist2, margin=1.0, cutoff=10.0):
similarity = 0.0
#print "table1"
#print table_dist1
#print "table2"
#print table_dist2
#print "="*20
#print "="*20
start2 = 0
la2,lb2,d2 = table_dist2[start2]
for la1,lb1,d1 in table_dist1:
#print "TRY", la1,lb1, " ", la2,lb2, " ", cmp((la1,lb1),(la2,lb2))
if (la1,lb1) < (la2,lb2):
continue
while (la1,lb1) > (la2,lb2):
#print "HERE", start2
start2 += 1
if start2 == len(table_dist2):
start2 = None
break
la2,lb2,d2 = table_dist2[start2]
if (la1,lb1) < (la2,lb2):
continue
#print start2
if start2 is None:
break
#print la1,lb1, " ", la2,lb2, " ", cmp((la1,lb1),(la2,lb2))
current2 = start2
while (la1,lb1) == (la2,lb2):
dav = 0.5*(d1+d2)
if dav < cutoff:
delta = abs(d1-d2)
if abs(delta) < margin:
scale = 1-dav/cutoff
similarity += scale*(numpy.cos(delta/margin/numpy.pi)+1)/2
current2 += 1
if current2 == len(table_dist2):
break
la2,lb2,d2 = table_dist2[current2]
la2,lb2,d2 = table_dist2[start2]
#print "="*20
#print "="*20
return similarity
| gpl-3.0 |
fabioz/Pydev | plugins/org.python.pydev.jython/Lib/json/tests/test_scanstring.py | 97 | 3784 | import sys
from json.tests import PyTest, CTest
class TestScanstring(object):
def test_scanstring(self):
scanstring = self.json.decoder.scanstring
self.assertEqual(
scanstring('"z\\ud834\\udd20x"', 1, None, True),
(u'z\U0001d120x', 16))
if sys.maxunicode == 65535:
self.assertEqual(
scanstring(u'"z\U0001d120x"', 1, None, True),
(u'z\U0001d120x', 6))
else:
self.assertEqual(
scanstring(u'"z\U0001d120x"', 1, None, True),
(u'z\U0001d120x', 5))
self.assertEqual(
scanstring('"\\u007b"', 1, None, True),
(u'{', 8))
self.assertEqual(
scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True),
(u'A JSON payload should be an object or array, not a string.', 60))
self.assertEqual(
scanstring('["Unclosed array"', 2, None, True),
(u'Unclosed array', 17))
self.assertEqual(
scanstring('["extra comma",]', 2, None, True),
(u'extra comma', 14))
self.assertEqual(
scanstring('["double extra comma",,]', 2, None, True),
(u'double extra comma', 21))
self.assertEqual(
scanstring('["Comma after the close"],', 2, None, True),
(u'Comma after the close', 24))
self.assertEqual(
scanstring('["Extra close"]]', 2, None, True),
(u'Extra close', 14))
self.assertEqual(
scanstring('{"Extra comma": true,}', 2, None, True),
(u'Extra comma', 14))
self.assertEqual(
scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True),
(u'Extra value after close', 26))
self.assertEqual(
scanstring('{"Illegal expression": 1 + 2}', 2, None, True),
(u'Illegal expression', 21))
self.assertEqual(
scanstring('{"Illegal invocation": alert()}', 2, None, True),
(u'Illegal invocation', 21))
self.assertEqual(
scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True),
(u'Numbers cannot have leading zeroes', 37))
self.assertEqual(
scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True),
(u'Numbers cannot be hex', 24))
self.assertEqual(
scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True),
(u'Too deep', 30))
self.assertEqual(
scanstring('{"Missing colon" null}', 2, None, True),
(u'Missing colon', 16))
self.assertEqual(
scanstring('{"Double colon":: null}', 2, None, True),
(u'Double colon', 15))
self.assertEqual(
scanstring('{"Comma instead of colon", null}', 2, None, True),
(u'Comma instead of colon', 25))
self.assertEqual(
scanstring('["Colon instead of comma": false]', 2, None, True),
(u'Colon instead of comma', 25))
self.assertEqual(
scanstring('["Bad value", truth]', 2, None, True),
(u'Bad value', 12))
def test_issue3623(self):
self.assertRaises(ValueError, self.json.decoder.scanstring, b"xxx", 1,
"xxx")
self.assertRaises(UnicodeDecodeError,
self.json.encoder.encode_basestring_ascii, b"xx\xff")
def test_overflow(self):
with self.assertRaises(OverflowError):
self.json.decoder.scanstring(b"xxx", sys.maxsize+1)
class TestPyScanstring(TestScanstring, PyTest): pass
class TestCScanstring(TestScanstring, CTest): pass
| epl-1.0 |
surgebiswas/poker | PokerBots_2017/Johnny/numpy/linalg/setup.py | 78 | 1735 | from __future__ import division, print_function
import os
import sys
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('linalg', parent_package, top_path)
config.add_data_dir('tests')
# Configure lapack_lite
src_dir = 'lapack_lite'
lapack_lite_src = [
os.path.join(src_dir, 'python_xerbla.c'),
os.path.join(src_dir, 'zlapack_lite.c'),
os.path.join(src_dir, 'dlapack_lite.c'),
os.path.join(src_dir, 'blas_lite.c'),
os.path.join(src_dir, 'dlamch.c'),
os.path.join(src_dir, 'f2c_lite.c'),
]
all_sources = config.paths(lapack_lite_src)
lapack_info = get_info('lapack_opt', 0) # and {}
def get_lapack_lite_sources(ext, build_dir):
if not lapack_info:
print("### Warning: Using unoptimized lapack ###")
return all_sources
else:
if sys.platform == 'win32':
print("### Warning: python_xerbla.c is disabled ###")
return []
return [all_sources[0]]
config.add_extension(
'lapack_lite',
sources=['lapack_litemodule.c', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
)
# umath_linalg module
config.add_extension(
'_umath_linalg',
sources=['umath_linalg.c.src', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
libraries=['npymath'],
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| mit |
H1ghT0p/kitsune | kitsune/forums/tests/test_posts.py | 16 | 6715 | from datetime import datetime, timedelta
from django.conf import settings
from nose.tools import eq_, raises
from kitsune.forums.models import Thread, Forum, ThreadLockedError
from kitsune.forums.tests import ForumTestCase, thread, post
from kitsune.forums.views import sort_threads
from kitsune.sumo.tests import get
from kitsune.users.tests import user
class PostTestCase(ForumTestCase):
def test_new_post_updates_thread(self):
# Saving a new post in a thread should update the last_post
# key in that thread to point to the new post.
t = thread(save=True)
post(thread=t, save=True)
p = t.new_post(author=t.creator, content='an update')
p.save()
t = Thread.objects.get(id=t.id)
eq_(p.id, t.last_post_id)
def test_new_post_updates_forum(self):
# Saving a new post should update the last_post key in the
# forum to point to the new post.
t = thread(save=True)
post(thread=t, save=True)
p = t.new_post(author=t.creator, content='another update')
p.save()
f = Forum.objects.get(id=t.forum_id)
eq_(p.id, f.last_post_id)
def test_update_post_does_not_update_thread(self):
# Updating/saving an old post in a thread should _not_ update
# the last_post key in that thread.
t = thread(save=True)
old = post(thread=t, save=True)
last = post(thread=t, save=True)
old.content = 'updated content'
old.save()
eq_(last.id, old.thread.last_post_id)
def test_update_forum_does_not_update_thread(self):
# Updating/saving an old post in a forum should _not_ update
# the last_post key in that forum.
t = thread(save=True)
old = post(thread=t, save=True)
last = post(thread=t, save=True)
old.content = 'updated content'
old.save()
eq_(last.id, t.forum.last_post_id)
def test_replies_count(self):
# The Thread.replies value should remain one less than the
# number of posts in the thread.
t = thread(save=True)
post(thread=t, save=True)
post(thread=t, save=True)
post(thread=t, save=True)
old = t.replies
eq_(2, old)
t.new_post(author=t.creator, content='test').save()
eq_(old + 1, t.replies)
def test_sticky_threads_first(self):
# Sticky threads should come before non-sticky threads.
t = post(save=True).thread
sticky = thread(forum=t.forum, is_sticky=True, save=True)
yesterday = datetime.now() - timedelta(days=1)
post(thread=sticky, created=yesterday, save=True)
# The older sticky thread shows up first.
eq_(sticky.id, Thread.objects.all()[0].id)
def test_thread_sorting(self):
# After the sticky threads, threads should be sorted by the
# created date of the last post.
# Make sure the datetimes are different.
post(created=datetime.now() - timedelta(days=1), save=True)
post(save=True)
t = thread(is_sticky=True, save=True)
post(thread=t, save=True)
threads = Thread.objects.filter(is_sticky=False)
self.assert_(threads[0].last_post.created >
threads[1].last_post.created)
def test_post_sorting(self):
"""Posts should be sorted chronologically."""
t = thread(save=True)
post(thread=t, created=datetime.now() - timedelta(days=1), save=True)
post(thread=t, created=datetime.now() - timedelta(days=4), save=True)
post(thread=t, created=datetime.now() - timedelta(days=7), save=True)
post(thread=t, created=datetime.now() - timedelta(days=11), save=True)
post(thread=t, save=True)
posts = t.post_set.all()
for i in range(len(posts) - 1):
self.assert_(posts[i].created <= posts[i + 1].created)
def test_sorting_creator(self):
"""Sorting threads by creator."""
thread(creator=user(username='aaa', save=True), save=True)
thread(creator=user(username='bbb', save=True), save=True)
threads = sort_threads(Thread.objects, 3, 1)
self.assert_(threads[0].creator.username >=
threads[1].creator.username)
def test_sorting_replies(self):
"""Sorting threads by replies."""
t = thread(save=True)
post(thread=t, save=True)
post(thread=t, save=True)
post(thread=t, save=True)
post(save=True)
threads = sort_threads(Thread.objects, 4)
self.assert_(threads[0].replies <= threads[1].replies)
def test_sorting_last_post_desc(self):
"""Sorting threads by last_post descendingly."""
t = thread(save=True)
post(thread=t, save=True)
post(thread=t, save=True)
post(thread=t, save=True)
post(created=datetime.now() - timedelta(days=1), save=True)
threads = sort_threads(Thread.objects, 5, 1)
self.assert_(threads[0].last_post.created >=
threads[1].last_post.created)
def test_thread_last_page(self):
"""Thread's last_page property is accurate."""
t = post(save=True).thread
# Format: (# replies, # of pages to expect)
test_data = ((t.replies, 1), # Test default
(50, 3), # Test a large number
(19, 1), # Test off-by-one error, low
(20, 2)) # Test off-by-one error, high
for replies, pages in test_data:
t.replies = replies
eq_(t.last_page, pages)
@raises(ThreadLockedError)
def test_locked_thread(self):
"""Trying to reply to a locked thread should raise an exception."""
locked = thread(is_locked=True, save=True)
user1 = user(save=True)
# This should raise an exception
locked.new_post(author=user1, content='empty')
def test_unlocked_thread(self):
unlocked = thread(save=True)
user1 = user(save=True)
# This should not raise an exception
unlocked.new_post(author=user1, content='empty')
def test_post_no_session(self):
r = get(self.client, 'forums.new_thread',
kwargs={'forum_slug': 'test-forum'})
assert(settings.LOGIN_URL in r.redirect_chain[0][0])
eq_(302, r.redirect_chain[0][1])
class ThreadTestCase(ForumTestCase):
def test_delete_no_session(self):
"""Delete a thread while logged out redirects."""
r = get(self.client, 'forums.delete_thread',
kwargs={'forum_slug': 'test-forum', 'thread_id': 1})
assert(settings.LOGIN_URL in r.redirect_chain[0][0])
eq_(302, r.redirect_chain[0][1])
| bsd-3-clause |
konsP/synnefo | snf-cyclades-app/synnefo/logic/management/commands/pool-show.py | 8 | 2499 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import CommandError
from optparse import make_option
from synnefo.db.pools import bitarray_to_map
from synnefo.management import pprint, common
from snf_django.management.commands import SynnefoCommand
POOL_CHOICES = ['bridge', 'mac-prefix']
class Command(SynnefoCommand):
args = "<pool_id>"
help = "Show a pool"
output_transaction = True
option_list = SynnefoCommand.option_list + (
make_option('--type', dest='type',
choices=POOL_CHOICES,
help="Type of pool"
),
)
def handle(self, *args, **options):
type_ = options['type']
if not type_:
raise CommandError("Type of pool is mandatory")
pool_table = common.pool_table_from_type(type_)
try:
pool_id = int(args[0])
pool_row = pool_table.objects.get(id=pool_id)
except IndexError:
raise CommandError("Please provide a pool ID")
except (ValueError, pool_table.DoesNotExist):
raise CommandError("Invalid pool ID")
pool = pool_row.pool
kv = {
'id': pool_row.id,
'offset': pool_row.offset,
'base': pool_row.base,
'size': pool_row.size,
'available': pool.count_available(),
'reserved': pool.count_reserved(),
}
for key, val in sorted(kv.items()):
line = '%s: %s\n' % (key.rjust(16), val)
self.stdout.write(line.encode('utf8'))
step = (type_ == 'bridge') and 64 or 80
pprint.pprint_pool('Available', pool.to_map(), step, self.stdout)
pprint.pprint_pool('Reserved',
bitarray_to_map(pool.reserved[:pool_row.size]),
step, self.stdout)
| gpl-3.0 |
ramjothikumar/Diamond | src/collectors/files/files.py | 52 | 1995 | # coding=utf-8
"""
This class collects data from plain text files
#### Dependencies
"""
import diamond.collector
import os
import re
_RE = re.compile(r'([A-Za-z0-9._-]+)[\s=:]+(-?[0-9]+)(\.?\d*)')
class FilesCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(FilesCollector, self).get_default_config_help()
config_help.update({
'path': 'Prefix added to all stats collected by this module, a '
'single dot means don''t add prefix',
'dir': 'The directory that the performance files are in',
'delete': 'Delete files after they are picked up',
})
return config_help
def get_default_config(self):
"""
Returns default collector settings.
"""
config = super(FilesCollector, self).get_default_config()
config.update({
'path': '.',
'dir': '/tmp/diamond',
'delete': False,
})
return config
def collect(self):
if os.path.exists(self.config['dir']):
for fn in os.listdir(self.config['dir']):
if os.path.isfile(os.path.join(self.config['dir'], fn)):
try:
fh = open(os.path.join(self.config['dir'], fn))
found = False
for line in fh:
m = _RE.match(line)
if (m):
self.publish(
m.groups()[0],
m.groups()[1] + m.groups()[2],
precision=max(0, len(m.groups()[2]) - 1))
found = True
fh.close()
if (found and self.config['delete']):
os.unlink(os.path.join(self.config['dir'], fn))
except:
pass
| mit |
davidjb/sqlalchemy | lib/sqlalchemy/util/langhelpers.py | 11 | 41177 | # util/langhelpers.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
import itertools
import inspect
import operator
import re
import sys
import types
import warnings
from functools import update_wrapper
from .. import exc
import hashlib
from . import compat
from . import _collections
def md5_hex(x):
if compat.py3k:
x = x.encode('utf-8')
m = hashlib.md5()
m.update(x)
return m.hexdigest()
class safe_reraise(object):
"""Reraise an exception after invoking some
handler code.
Stores the existing exception info before
invoking so that it is maintained across a potential
coroutine context switch.
e.g.::
try:
sess.commit()
except:
with safe_reraise():
sess.rollback()
"""
def __enter__(self):
self._exc_info = sys.exc_info()
def __exit__(self, type_, value, traceback):
# see #2703 for notes
if type_ is None:
exc_type, exc_value, exc_tb = self._exc_info
self._exc_info = None # remove potential circular references
compat.reraise(exc_type, exc_value, exc_tb)
else:
self._exc_info = None # remove potential circular references
compat.reraise(type_, value, traceback)
def decode_slice(slc):
"""decode a slice object as sent to __getitem__.
takes into account the 2.5 __index__() method, basically.
"""
ret = []
for x in slc.start, slc.stop, slc.step:
if hasattr(x, '__index__'):
x = x.__index__()
ret.append(x)
return tuple(ret)
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain((base,),
compat.itertools_imap(lambda i: base + str(i),
range(1000)))
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def map_bits(fn, n):
"""Call the given function given each nonzero bit from n."""
while n:
b = n & (~n + 1)
yield fn(b)
n ^= b
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn):
raise Exception("not a decoratable function")
spec = compat.inspect_getfullargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.__name__,)
targ_name, fn_name = _unique_symbols(names, 'target', 'fn')
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
metadata['name'] = fn.__name__
code = """\
def %(name)s(%(args)s):
return %(target)s(%(fn)s, %(apply_kw)s)
""" % metadata
decorated = _exec_code_in_env(code,
{targ_name: target, fn_name: fn},
fn.__name__)
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def public_factory(target, location):
"""Produce a wrapping function for the given cls or classmethod.
Rationale here is so that the __init__ method of the
class can serve as documentation for the function.
"""
if isinstance(target, type):
fn = target.__init__
callable_ = target
doc = "Construct a new :class:`.%s` object. \n\n"\
"This constructor is mirrored as a public API function; "\
"see :func:`~%s` "\
"for a full usage and argument description." % (
target.__name__, location, )
else:
fn = callable_ = target
doc = "This function is mirrored; see :func:`~%s` "\
"for a description of arguments." % location
location_name = location.split(".")[-1]
spec = compat.inspect_getfullargspec(fn)
del spec[0][0]
metadata = format_argspec_plus(spec, grouped=False)
metadata['name'] = location_name
code = """\
def %(name)s(%(args)s):
return cls(%(apply_kw)s)
""" % metadata
env = {'cls': callable_, 'symbol': symbol}
exec(code, env)
decorated = env[location_name]
decorated.__doc__ = fn.__doc__
decorated.__module__ = "sqlalchemy" + location.rsplit(".", 1)[0]
if compat.py2k or hasattr(fn, '__func__'):
fn.__func__.__doc__ = doc
else:
fn.__doc__ = doc
return decorated
class PluginLoader(object):
def __init__(self, group, auto_fn=None):
self.group = group
self.impls = {}
self.auto_fn = auto_fn
def load(self, name):
if name in self.impls:
return self.impls[name]()
if self.auto_fn:
loader = self.auto_fn(name)
if loader:
self.impls[name] = loader
return loader()
try:
import pkg_resources
except ImportError:
pass
else:
for impl in pkg_resources.iter_entry_points(
self.group, name):
self.impls[name] = impl.load
return impl.load()
raise exc.NoSuchModuleError(
"Can't load plugin: %s:%s" %
(self.group, name))
def register(self, name, modulepath, objname):
def load():
mod = compat.import_(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def get_cls_kwargs(cls, _set=None):
"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed
to pass along unrecognized keywords to its base classes, and the
collection process is repeated recursively on each of the bases.
Uses a subset of inspect.getargspec() to cut down on method overhead.
No anonymous tuple arguments please !
"""
toplevel = _set is None
if toplevel:
_set = set()
ctr = cls.__dict__.get('__init__', False)
has_init = ctr and isinstance(ctr, types.FunctionType) and \
isinstance(ctr.__code__, types.CodeType)
if has_init:
names, has_kw = inspect_func_args(ctr)
_set.update(names)
if not has_kw and not toplevel:
return None
if not has_init or has_kw:
for c in cls.__bases__:
if get_cls_kwargs(c, _set) is None:
break
_set.discard('self')
return _set
try:
# TODO: who doesn't have this constant?
from inspect import CO_VARKEYWORDS
def inspect_func_args(fn):
co = fn.__code__
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
has_kw = bool(co.co_flags & CO_VARKEYWORDS)
return args, has_kw
except ImportError:
def inspect_func_args(fn):
names, _, has_kw, _ = inspect.getargspec(fn)
return names, bool(has_kw)
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return compat.inspect_getargspec(func)[0]
def get_callable_argspec(fn, no_self=False, _is_init=False):
"""Return the argument signature for any callable.
All pure-Python callables are accepted, including
functions, methods, classes, objects with __call__;
builtins and other edge cases like functools.partial() objects
raise a TypeError.
"""
if inspect.isbuiltin(fn):
raise TypeError("Can't inspect builtin: %s" % fn)
elif inspect.isfunction(fn):
if _is_init and no_self:
spec = compat.inspect_getargspec(fn)
return compat.ArgSpec(spec.args[1:], spec.varargs,
spec.keywords, spec.defaults)
else:
return compat.inspect_getargspec(fn)
elif inspect.ismethod(fn):
if no_self and (_is_init or fn.__self__):
spec = compat.inspect_getargspec(fn.__func__)
return compat.ArgSpec(spec.args[1:], spec.varargs,
spec.keywords, spec.defaults)
else:
return compat.inspect_getargspec(fn.__func__)
elif inspect.isclass(fn):
return get_callable_argspec(
fn.__init__, no_self=no_self, _is_init=True)
elif hasattr(fn, '__func__'):
return compat.inspect_getargspec(fn.__func__)
elif hasattr(fn, '__call__'):
if inspect.ismethod(fn.__call__):
return get_callable_argspec(fn.__call__, no_self=no_self)
else:
raise TypeError("Can't inspect callable: %s" % fn)
else:
raise TypeError("Can't inspect callable: %s" % fn)
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if compat.callable(fn):
spec = compat.inspect_getfullargspec(fn)
else:
# we accept an existing argspec...
spec = fn
args = inspect.formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = '%s[0]' % spec[1]
else:
self_arg = None
if compat.py3k:
apply_pos = inspect.formatargspec(spec[0], spec[1],
spec[2], None, spec[4])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
if spec[4]:
num_defaults += len(spec[4])
name_args = spec[0] + spec[4]
else:
apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
name_args = spec[0]
if num_defaults:
defaulted_vals = name_args[0 - num_defaults:]
else:
defaulted_vals = ()
apply_kw = inspect.formatargspec(name_args, spec[1], spec[2],
defaulted_vals,
formatvalue=lambda x: '=' + x)
if grouped:
return dict(args=args, self_arg=self_arg,
apply_pos=apply_pos, apply_kw=apply_kw)
else:
return dict(args=args[1:-1], self_arg=self_arg,
apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1])
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
args = (grouped and '(self, *args, **kwargs)'
or 'self, *args, **kwargs')
return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return inspect.getargspec(method)
except TypeError:
if method is object.__init__:
return (['self'], None, None, None)
else:
return (['self'], 'args', 'kwargs', None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not
required.
"""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__:
return func_or_cls.__func__
else:
return func_or_cls
def generic_repr(obj, additional_kw=(), to_inspect=None, omit_kwarg=()):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
if to_inspect is None:
to_inspect = [obj]
else:
to_inspect = _collections.to_list(to_inspect)
missing = object()
pos_args = []
kw_args = _collections.OrderedDict()
vargs = None
for i, insp in enumerate(to_inspect):
try:
(_args, _vargs, vkw, defaults) = \
inspect.getargspec(insp.__init__)
except TypeError:
continue
else:
default_len = defaults and len(defaults) or 0
if i == 0:
if _vargs:
vargs = _vargs
if default_len:
pos_args.extend(_args[1:-default_len])
else:
pos_args.extend(_args[1:])
else:
kw_args.update([
(arg, missing) for arg in _args[1:-default_len]
])
if default_len:
kw_args.update([
(arg, default)
for arg, default
in zip(_args[-default_len:], defaults)
])
output = []
output.extend(repr(getattr(obj, arg, None)) for arg in pos_args)
if vargs is not None and hasattr(obj, vargs):
output.extend([repr(val) for val in getattr(obj, vargs)])
for arg, defval in kw_args.items():
if arg in omit_kwarg:
continue
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except Exception:
pass
if additional_kw:
for arg, defval in additional_kw:
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except Exception:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(output))
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
__slots__ = 'target', 'name', '__weakref__'
def __getstate__(self):
return {'target': self.target, 'name': self.name}
def __setstate__(self, state):
self.target = state['target']
self.name = state['name']
def __init__(self, meth):
self.target = meth.__self__
self.name = meth.__name__
def __call__(self, *arg, **kw):
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
if compat.py2k:
if isinstance(cls, types.ClassType):
return list()
hier = set([cls])
process = list(cls.__mro__)
while process:
c = process.pop()
if compat.py2k:
if isinstance(c, types.ClassType):
continue
bases = (_ for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType))
else:
bases = (_ for _ in c.__bases__ if _ not in hier)
for b in bases:
process.append(b)
hier.add(b)
if compat.py3k:
if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'):
continue
else:
if c.__module__ == '__builtin__' or not hasattr(
c, '__subclasses__'):
continue
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, '__call__'):
continue
fn = getattr(fn, 'im_func', fn)
except AttributeError:
continue
try:
spec = inspect.getargspec(fn)
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
compat.exec_(py, env)
try:
env[method].__defaults__ = fn.__defaults__
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
return getattr(meth1, '__func__', meth1) is getattr(
meth2, '__func__', meth2)
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError('a class or collection of method names are required')
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith('_')])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not isinstance(obj, dict):
qualifier = complies is operator.gt and 'any of' or 'all of'
raise TypeError("%r does not implement %s: %s" % (
obj, qualifier, ', '.join(interface)))
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = 'Anonymous' + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not compat.callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError("dictionary does not contain required keys %s" %
', '.join(required - found))
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def _reset(self, obj):
memoized_property.reset(obj, self.__name__)
@classmethod
def reset(cls, obj, name):
obj.__dict__.pop(name, None)
def memoized_instancemethod(fn):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def oneshot(self, *args, **kw):
result = fn(self, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
self.__dict__[fn.__name__] = memo
return result
return update_wrapper(oneshot, fn)
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self, attributes=()):
self.attributes = []
if attributes:
self.attributes.extend(attributes)
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
def method(self, fn):
self.attributes.append(fn.__name__)
return memoized_instancemethod(fn)
class MemoizedSlots(object):
"""Apply memoized items to an object using a __getattr__ scheme.
This allows the functionality of memoized_property and
memoized_instancemethod to be available to a class using __slots__.
"""
__slots__ = ()
def _fallback_getattr(self, key):
raise AttributeError(key)
def __getattr__(self, key):
if key.startswith('_memoized'):
raise AttributeError(key)
elif hasattr(self, '_memoized_attr_%s' % key):
value = getattr(self, '_memoized_attr_%s' % key)()
setattr(self, key, value)
return value
elif hasattr(self, '_memoized_method_%s' % key):
fn = getattr(self, '_memoized_method_%s' % key)
def oneshot(*args, **kw):
result = fn(*args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
setattr(self, key, memo)
return result
oneshot.__doc__ = fn.__doc__
return oneshot
else:
return self._fallback_getattr(key)
def dependency_for(modulename):
def decorate(obj):
# TODO: would be nice to improve on this import silliness,
# unfortunately importlib doesn't work that great either
tokens = modulename.split(".")
mod = compat.import_(
".".join(tokens[0:-1]), globals(), locals(), tokens[-1])
mod = getattr(mod, tokens[-1])
setattr(mod, obj.__name__, obj)
return obj
return decorate
class dependencies(object):
"""Apply imported dependencies as arguments to a function.
E.g.::
@util.dependencies(
"sqlalchemy.sql.widget",
"sqlalchemy.engine.default"
);
def some_func(self, widget, default, arg1, arg2, **kw):
# ...
Rationale is so that the impact of a dependency cycle can be
associated directly with the few functions that cause the cycle,
and not pollute the module-level namespace.
"""
def __init__(self, *deps):
self.import_deps = []
for dep in deps:
tokens = dep.split(".")
self.import_deps.append(
dependencies._importlater(
".".join(tokens[0:-1]),
tokens[-1]
)
)
def __call__(self, fn):
import_deps = self.import_deps
spec = compat.inspect_getfullargspec(fn)
spec_zero = list(spec[0])
hasself = spec_zero[0] in ('self', 'cls')
for i in range(len(import_deps)):
spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i
inner_spec = format_argspec_plus(spec, grouped=False)
for impname in import_deps:
del spec_zero[1 if hasself else 0]
spec[0][:] = spec_zero
outer_spec = format_argspec_plus(spec, grouped=False)
code = 'lambda %(args)s: fn(%(apply_kw)s)' % {
"args": outer_spec['args'],
"apply_kw": inner_spec['apply_kw']
}
decorated = eval(code, locals())
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
return update_wrapper(decorated, fn)
@classmethod
def resolve_all(cls, path):
for m in list(dependencies._unresolved):
if m._full_path.startswith(path):
m._resolve()
_unresolved = set()
_by_key = {}
class _importlater(object):
_unresolved = set()
_by_key = {}
def __new__(cls, path, addtl):
key = path + "." + addtl
if key in dependencies._by_key:
return dependencies._by_key[key]
else:
dependencies._by_key[key] = imp = object.__new__(cls)
return imp
def __init__(self, path, addtl):
self._il_path = path
self._il_addtl = addtl
dependencies._unresolved.add(self)
@property
def _full_path(self):
return self._il_path + "." + self._il_addtl
@memoized_property
def module(self):
if self in dependencies._unresolved:
raise ImportError(
"importlater.resolve_all() hasn't "
"been called (this is %s %s)"
% (self._il_path, self._il_addtl))
return getattr(self._initial_import, self._il_addtl)
def _resolve(self):
dependencies._unresolved.discard(self)
self._initial_import = compat.import_(
self._il_path, globals(), locals(),
[self._il_addtl])
def __getattr__(self, key):
if key == 'module':
raise ImportError("Could not resolve module %s"
% self._full_path)
try:
attr = getattr(self.module, key)
except AttributeError:
raise AttributeError(
"Module %s has no attribute '%s'" %
(self._full_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, compat.string_types):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaluate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and not isinstance(kw[key], type_) and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key])
def constructor_copy(obj, cls, *args, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update(
(k, obj.__dict__[k]) for k in names.difference(kw)
if k in obj.__dict__)
return cls(*args, **kw)
def counter():
"""Return a threadsafe counter function."""
lock = compat.threading.Lock()
counter = itertools.count(1)
# avoid the 2to3 "next" transformation...
def _next():
lock.acquire()
try:
return next(counter)
finally:
lock.release()
return _next
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: the builtin set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set)):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return set
elif hasattr(specimen, 'set'):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'" %
(name, ' or '.join("'%s'" % a for a in argtype), type(arg)))
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'" %
(name, argtype, type(arg)))
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
if compat.py3k:
if hasattr(dictlike, 'items'):
return list(dictlike.items())
else:
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'items'):
return iter(dictlike.items())
getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None))
if getter is None:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, 'iterkeys'):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, 'keys'):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class hybridproperty(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
clsval = self.func(owner)
clsval.__doc__ = self.func.__doc__
return clsval
else:
return self.func(instance)
class hybridmethod(object):
"""Decorate a function as cls- or instance- level."""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self.func.__get__(owner, owner.__class__)
else:
return self.func.__get__(instance, owner)
class _symbol(int):
def __new__(self, name, doc=None, canonical=None):
"""Construct a new named symbol."""
assert isinstance(name, compat.string_types)
if canonical is None:
canonical = hash(name)
v = int.__new__(_symbol, canonical)
v.name = name
if doc:
v.__doc__ = doc
return v
def __reduce__(self):
return symbol, (self.name, "x", int(self))
def __str__(self):
return repr(self)
def __repr__(self):
return "symbol(%r)" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = compat.threading.Lock()
def __new__(cls, name, doc=None, canonical=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc, canonical)
return sym
finally:
symbol._lock.release()
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order += 1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to
a warning.
"""
try:
return func(*args, **kwargs)
except Exception:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def ellipses_string(value, len_=25):
try:
if len(value) > len_:
return "%s..." % value[0:len_]
else:
return value
except TypeError:
return value
class _hash_limit_string(compat.text_type):
"""A string subclass that can only be hashed on a maximum amount
of unique values.
This is used for warnings so that we can send out parameterized warnings
without the __warningregistry__ of the module, or the non-overridable
"once" registry within warnings.py, overloading memory,
"""
def __new__(cls, value, num, args):
interpolated = (value % args) + \
(" (this warning may be suppressed after %d occurrences)" % num)
self = super(_hash_limit_string, cls).__new__(cls, interpolated)
self._hash = hash("%s_%d" % (value, hash(interpolated) % num))
return self
def __hash__(self):
return self._hash
def __eq__(self, other):
return hash(self) == hash(other)
def warn(msg):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
"""
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def warn_limited(msg, args):
"""Issue a warning with a paramterized string, limiting the number
of registrations.
"""
if args:
msg = _hash_limit_string(msg, 10, args)
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def only_once(fn):
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg, **kw):
if once:
once_fn = once.pop()
return once_fn(*arg, **kw)
return go
_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py')
_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)')
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of
``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start:end + 1]
NoneType = type(None)
def attrsetter(attrname):
code = \
"def set(obj, value):"\
" obj.%s = value" % attrname
env = locals().copy()
exec(code, env)
return env['set']
class EnsureKWArgType(type):
"""Apply translation of functions to accept **kw arguments if they
don't already.
"""
def __init__(cls, clsname, bases, clsdict):
fn_reg = cls.ensure_kwarg
if fn_reg:
for key in clsdict:
m = re.match(fn_reg, key)
if m:
fn = clsdict[key]
spec = inspect.getargspec(fn)
if not spec.keywords:
clsdict[key] = wrapped = cls._wrap_w_kw(fn)
setattr(cls, key, wrapped)
super(EnsureKWArgType, cls).__init__(clsname, bases, clsdict)
def _wrap_w_kw(self, fn):
def wrap(*arg, **kw):
return fn(*arg)
return update_wrapper(wrap, fn)
| mit |
Jgarcia-IAS/localizacion | openerp/addons/website_mail/tests/test_controllers.py | 390 | 1644 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
from openerp.addons.website_mail.controllers.main import WebsiteMail
from openerp.tools import mute_logger, email_split
class TestControllers(TestMail):
def test_00_subscribe(self):
# from openerp.addons.web.http import request
# print request
cr, uid = self.cr, self.uid
# context = { }
# email = 'Marcel Dupuis <marcel.dupuis@example.com>'
# website_mail = WebsiteMail()
# pid = website_mail._find_or_create_partner(email, context)
# partner = self.res_partner.browse(cr, uid, pid)
# print partner.name, partner.email
| agpl-3.0 |
candrews/portage | pym/portage/tests/lafilefixer/test_lafilefixer.py | 17 | 6498 | # test_lafilefixer.py -- Portage Unit Testing Functionality
# Copyright 2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
from portage.exception import InvalidData
class test_lafilefixer(TestCase):
def get_test_cases_clean(self):
yield b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' -lm'\n" + \
b"current=6\n" + \
b"age=0\n" + \
b"revision=2\n" + \
b"installed=yes\n" + \
b"dlopen=''\n" + \
b"dlpreopen=''\n" + \
b"libdir='/usr/lib64'\n"
yield b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' -lm'\n" + \
b"current=6\n" + \
b"age=0\n" + \
b"revision=2\n" + \
b"installed=yes\n" + \
b"dlopen=''\n" + \
b"dlpreopen=''\n" + \
b"libdir='/usr/lib64'\n"
yield b"dependency_libs=' liba.la /usr/lib64/bar.la -lc'\n"
def get_test_cases_update(self):
#.la -> -l*
yield b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc'\n", \
b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n"
#move stuff into inherited_linker_flags
yield b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' /usr/lib64/liba.la -pthread /usr/lib64/libb.la -lc'\n" + \
b"inherited_linker_flags=''\n", \
b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n" + \
b"inherited_linker_flags=' -pthread'\n"
#reorder
yield b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' /usr/lib64/liba.la -R/usr/lib64 /usr/lib64/libb.la -lc'\n", \
b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -la -lb -lc'\n"
#remove duplicates from dependency_libs (the original version didn't do it for inherited_linker_flags)
yield b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libc.la -pthread -mt" + \
b" -L/usr/lib -R/usr/lib64 -lc /usr/lib64/libb.la -lc'\n" +\
b"inherited_linker_flags=' -pthread -pthread'\n", \
b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -L/usr/lib -la -lc -lb'\n" +\
b"inherited_linker_flags=' -pthread -pthread -mt'\n"
#-L rewriting
yield b"dependency_libs=' -L/usr/X11R6/lib'\n", \
b"dependency_libs=' -L/usr/lib'\n"
yield b"dependency_libs=' -L/usr/local/lib'\n", \
b"dependency_libs=' -L/usr/lib'\n"
yield b"dependency_libs=' -L/usr/lib64/pkgconfig/../..'\n", \
b"dependency_libs=' -L/usr'\n"
yield b"dependency_libs=' -L/usr/lib/pkgconfig/..'\n", \
b"dependency_libs=' -L/usr/lib'\n"
yield b"dependency_libs=' -L/usr/lib/pkgconfig/../.. -L/usr/lib/pkgconfig/..'\n", \
b"dependency_libs=' -L/usr -L/usr/lib'\n"
#we once got a backtrace on this one
yield b"dependency_libs=' /usr/lib64/libMagickCore.la -L/usr/lib64 -llcms2 /usr/lib64/libtiff.la " + \
b"-ljbig -lc /usr/lib64/libfreetype.la /usr/lib64/libjpeg.la /usr/lib64/libXext.la " + \
b"/usr/lib64/libXt.la /usr/lib64/libSM.la -lICE -luuid /usr/lib64/libICE.la /usr/lib64/libX11.la " + \
b"/usr/lib64/libxcb.la /usr/lib64/libXau.la /usr/lib64/libXdmcp.la -lbz2 -lz -lm " + \
b"/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4/libgomp.la -lrt -lpthread /usr/lib64/libltdl.la -ldl " + \
b"/usr/lib64/libfpx.la -lstdc++'", \
b"dependency_libs=' -L/usr/lib64 -L/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4 -lMagickCore -llcms2 " + \
b"-ltiff -ljbig -lc -lfreetype -ljpeg -lXext -lXt -lSM -lICE -luuid -lX11 -lxcb -lXau -lXdmcp " + \
b"-lbz2 -lz -lm -lgomp -lrt -lpthread -lltdl -ldl -lfpx -lstdc++'"
def get_test_cases_broken(self):
yield b""
#no dependency_libs
yield b"dlname='libfoo.so.1'\n" + \
b"current=6\n" + \
b"age=0\n" + \
b"revision=2\n"
#borken dependency_libs
yield b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc' \n"
#borken dependency_libs
yield b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc\n"
#crap in dependency_libs
yield b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
#dependency_libs twice
yield b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n" +\
b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
#inherited_linker_flags twice
yield b"dlname='libfoo.so.1'\n" + \
b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
b"old_library='libpdf.a'\n" + \
b"inherited_linker_flags=''\n" +\
b"inherited_linker_flags=''\n"
def testlafilefixer(self):
from portage.util.lafilefixer import _parse_lafile_contents, rewrite_lafile
for clean_contents in self.get_test_cases_clean():
self.assertEqual(rewrite_lafile(clean_contents), (False, None))
for original_contents, fixed_contents in self.get_test_cases_update():
self.assertEqual(rewrite_lafile(original_contents), (True, fixed_contents))
for broken_contents in self.get_test_cases_broken():
self.assertRaises(InvalidData, rewrite_lafile, broken_contents)
| gpl-2.0 |
mustajarvi/slick | slick/src/sphinx/exts/includecode.py | 121 | 5444 | import os
import codecs
from os import path
from docutils import nodes
from docutils.parsers.rst import Directive, directives
class IncludeCode(Directive):
"""
Include a code example from a file with sections delimited with special comments.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'section': directives.unchanged_required,
'comment': directives.unchanged_required,
'marker': directives.unchanged_required,
'include': directives.unchanged_required,
'exclude': directives.unchanged_required,
'hideexcludes': directives.flag,
'linenos': directives.flag,
'language': directives.unchanged_required,
'encoding': directives.encoding,
'prepend': directives.unchanged_required,
'append': directives.unchanged_required,
}
def run(self):
document = self.state.document
arg0 = self.arguments[0]
(filename, sep, section) = arg0.partition('#')
if not document.settings.file_insertion_enabled:
return [document.reporter.warning('File insertion disabled',
line=self.lineno)]
env = document.settings.env
if filename.startswith('/') or filename.startswith(os.sep):
rel_fn = filename[1:]
else:
docdir = path.dirname(env.doc2path(env.docname, base=None))
rel_fn = path.join(docdir, filename)
try:
fn = path.join(env.srcdir, rel_fn)
except UnicodeDecodeError:
# the source directory is a bytestring with non-ASCII characters;
# let's try to encode the rel_fn in the file system encoding
rel_fn = rel_fn.encode(sys.getfilesystemencoding())
fn = path.join(env.srcdir, rel_fn)
encoding = self.options.get('encoding', env.config.source_encoding)
codec_info = codecs.lookup(encoding)
try:
f = codecs.StreamReaderWriter(open(fn, 'U'),
codec_info[2], codec_info[3], 'strict')
lines = f.readlines()
f.close()
except (IOError, OSError):
return [document.reporter.warning(
'Include file %r not found or reading it failed' % filename,
line=self.lineno)]
except UnicodeError:
return [document.reporter.warning(
'Encoding %r used for reading included file %r seems to '
'be wrong, try giving an :encoding: option' %
(encoding, filename))]
comment = self.options.get('comment', '//')
marker = self.options.get('marker', comment + '#')
lenm = len(marker)
if not section:
section = self.options.get('section')
include_sections = self.options.get('include', '')
exclude_sections = self.options.get('exclude', '')
include = set(include_sections.split(',')) if include_sections else set()
exclude = set(exclude_sections.split(',')) if exclude_sections else set()
hideexcludes = 'hideexcludes' in self.options
if section:
include |= set([section])
within = set()
res = []
excluding = False
for line in lines:
index = line.find(marker)
if index >= 0:
section_name = line[index+lenm:].strip()
if section_name in within:
within ^= set([section_name])
if excluding and not (exclude & within):
excluding = False
else:
within |= set([section_name])
if not excluding and (exclude & within):
excluding = True
if not hideexcludes:
res.append(' ' * index + comment + ' ' + section_name.replace('-', ' ') + ' ...\n')
elif not (exclude & within) and (not include or (include & within)):
res.append(line)
lines = res
def countwhile(predicate, iterable):
count = 0
for x in iterable:
if predicate(x):
count += 1
else:
return count
nonempty = filter(lambda l: l.strip(), lines)
tabcounts = map(lambda l: countwhile(lambda c: c == ' ', l), nonempty)
tabshift = min(tabcounts) if tabcounts else 0
if tabshift > 0:
lines = map(lambda l: l[tabshift:] if len(l) > tabshift else l, lines)
prepend = self.options.get('prepend')
append = self.options.get('append')
if prepend:
lines.insert(0, prepend + '\n')
if append:
lines.append(append + '\n')
text = ''.join(lines)
retnode = nodes.literal_block(text, text, source=fn)
retnode.line = 1
retnode.attributes['line_number'] = self.lineno
language = self.options.get('language')
if language:
retnode['language'] = language
if 'linenos' in self.options:
retnode['linenos'] = True
document.settings.env.note_dependency(rel_fn)
return [retnode]
def setup(app):
app.require_sphinx('1.0')
app.add_directive('includecode', IncludeCode)
| bsd-2-clause |
KurtDeGreeff/infernal-twin | build/reportlab/src/reportlab/graphics/charts/barcharts.py | 29 | 66757 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/barcharts.py
__version__=''' $Id$ '''
__doc__="""This module defines a variety of Bar Chart components.
The basic flavors are stacked and side-by-side, available in horizontal and
vertical versions.
"""
import copy, functools
from reportlab.lib import colors
from reportlab.lib.validators import isNumber, isNumberOrNone, isColor, isColorOrNone, isString,\
isListOfStrings, SequenceOf, isBoolean, isNoneOrShape, isStringOrNone,\
NoneOr, isListOfNumbersOrNone, EitherOr, OneOf
from reportlab.graphics.widgets.markers import uSymbol2Symbol, isSymbol
from reportlab.lib.formatters import Formatter
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.shapes import Line, Rect, Group, Drawing, NotImplementedError
from reportlab.graphics.charts.axes import XCategoryAxis, YValueAxis, YCategoryAxis, XValueAxis
from reportlab.graphics.charts.textlabels import BarChartLabel, NA_Label, NoneOrInstanceOfNA_Label
from reportlab.graphics.charts.areas import PlotArea
from reportlab.graphics.charts.legends import _objStr
class BarChartProperties(PropHolder):
_attrMap = AttrMap(
strokeColor = AttrMapValue(isColorOrNone, desc='Color of the bar border.'),
fillColor = AttrMapValue(isColorOrNone, desc='Color of the bar interior area.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of the bar border.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array of a line.'),
symbol = AttrMapValue(None, desc='A widget to be used instead of a normal bar.',advancedUsage=1),
name = AttrMapValue(isString, desc='Text to be associated with a bar (eg seriesname)'),
swatchMarker = AttrMapValue(NoneOr(isSymbol), desc="None or makeMarker('Diamond') ...",advancedUsage=1),
minDimen = AttrMapValue(isNumberOrNone, desc='minimum width/height that will be drawn.'),
)
def __init__(self):
self.strokeColor = None
self.fillColor = colors.blue
self.strokeWidth = 0.5
self.symbol = None
self.strokeDashArray = None
# Bar chart classes.
class BarChart(PlotArea):
"Abstract base class, unusable by itself."
_attrMap = AttrMap(BASE=PlotArea,
useAbsolute = AttrMapValue(EitherOr((isBoolean,EitherOr((isString,isNumber)))), desc='Flag to use absolute spacing values; use string of gsb for finer control\n(g=groupSpacing,s=barSpacing,b=barWidth).',advancedUsage=1),
barWidth = AttrMapValue(isNumber, desc='The width of an individual bar.'),
groupSpacing = AttrMapValue(isNumber, desc='Width between groups of bars.'),
barSpacing = AttrMapValue(isNumber, desc='Width between individual bars.'),
bars = AttrMapValue(None, desc='Handle of the individual bars.'),
valueAxis = AttrMapValue(None, desc='Handle of the value axis.'),
categoryAxis = AttrMapValue(None, desc='Handle of the category axis.'),
data = AttrMapValue(None, desc='Data to be plotted, list of (lists of) numbers.'),
barLabels = AttrMapValue(None, desc='Handle to the list of bar labels.'),
barLabelFormat = AttrMapValue(None, desc='Formatting string or function used for bar labels.'),
barLabelCallOut = AttrMapValue(None, desc='Callout function(label)\nlabel._callOutInfo = (self,g,rowNo,colNo,x,y,width,height,x00,y00,x0,y0)',advancedUsage=1),
barLabelArray = AttrMapValue(None, desc='explicit array of bar label values, must match size of data if present.'),
reversePlotOrder = AttrMapValue(isBoolean, desc='If true, reverse common category plot order.',advancedUsage=1),
naLabel = AttrMapValue(NoneOrInstanceOfNA_Label, desc='Label to use for N/A values.',advancedUsage=1),
annotations = AttrMapValue(None, desc='list of callables, will be called with self, xscale, yscale.'),
categoryLabelBarSize = AttrMapValue(isNumber, desc='width to leave for a category label to go between categories.'),
categoryLabelBarOrder = AttrMapValue(OneOf('first','last','auto'), desc='where any label bar should appear first/last'),
barRecord = AttrMapValue(None, desc='callable(bar,label=labelText,value=value,**kwds) to record bar information', advancedUsage=1),
zIndexOverrides = AttrMapValue(isStringOrNone, desc='''None (the default ie use old z ordering scheme) or a ',' separated list of key=value (int/float) for new zIndex ordering. If used defaults are
background=0,
categoryAxis=1,
valueAxis=2,
bars=3,
barLabels=4,
categoryAxisGrid=5,
valueAxisGrid=6,
annotations=7'''),
categoryNALabel = AttrMapValue(NoneOrInstanceOfNA_Label, desc='Label to use for a group of N/A values.',advancedUsage=1),
)
def makeSwatchSample(self, rowNo, x, y, width, height):
baseStyle = self.bars
styleIdx = rowNo % len(baseStyle)
style = baseStyle[styleIdx]
strokeColor = getattr(style, 'strokeColor', getattr(baseStyle,'strokeColor',None))
fillColor = getattr(style, 'fillColor', getattr(baseStyle,'fillColor',None))
strokeDashArray = getattr(style, 'strokeDashArray', getattr(baseStyle,'strokeDashArray',None))
strokeWidth = getattr(style, 'strokeWidth', getattr(style, 'strokeWidth',None))
swatchMarker = getattr(style, 'swatchMarker', getattr(baseStyle, 'swatchMarker',None))
if swatchMarker:
return uSymbol2Symbol(swatchMarker,x+width/2.,y+height/2.,fillColor)
return Rect(x,y,width,height,strokeWidth=strokeWidth,strokeColor=strokeColor,
strokeDashArray=strokeDashArray,fillColor=fillColor)
def getSeriesName(self,i,default=None):
'''return series name i or default'''
return _objStr(getattr(self.bars[i],'name',default))
def __init__(self):
assert self.__class__.__name__ not in ('BarChart','BarChart3D'), 'Abstract Class %s Instantiated' % self.__class__.__name__
if self._flipXY:
self.categoryAxis = YCategoryAxis()
self.valueAxis = XValueAxis()
else:
self.categoryAxis = XCategoryAxis()
self.valueAxis = YValueAxis()
PlotArea.__init__(self)
self.barSpacing = 0
self.reversePlotOrder = 0
# this defines two series of 3 points. Just an example.
self.data = [(100,110,120,130),
(70, 80, 85, 90)]
# control bar spacing. is useAbsolute = 1 then
# the next parameters are in points; otherwise
# they are 'proportions' and are normalized to
# fit the available space. Half a barSpacing
# is allocated at the beginning and end of the
# chart.
self.useAbsolute = 0 #- not done yet
self.barWidth = 10
self.groupSpacing = 5
self.barSpacing = 0
self.barLabels = TypedPropertyCollection(BarChartLabel)
self.barLabels.boxAnchor = 'c'
self.barLabels.textAnchor = 'middle'
self.barLabelFormat = None
self.barLabelArray = None
# this says whether the origin is inside or outside
# the bar - +10 means put the origin ten points
# above the tip of the bar if value > 0, or ten
# points inside if bar value < 0. This is different
# to label dx/dy which are not dependent on the
# sign of the data.
self.barLabels.nudge = 0
# if you have multiple series, by default they butt
# together.
# we really need some well-designed default lists of
# colors e.g. from Tufte. These will be used in a
# cycle to set the fill color of each series.
self.bars = TypedPropertyCollection(BarChartProperties)
self.bars.strokeWidth = 1
self.bars.strokeColor = colors.black
self.bars.strokeDashArray = None
self.bars[0].fillColor = colors.red
self.bars[1].fillColor = colors.green
self.bars[2].fillColor = colors.blue
self.naLabel = self.categoryNALabel = None
self.zIndexOverrides = None
def demo(self):
"""Shows basic use of a bar chart"""
if self.__class__.__name__=='BarChart':
raise NotImplementedError('Abstract Class BarChart has no demo')
drawing = Drawing(200, 100)
bc = self.__class__()
drawing.add(bc)
return drawing
def _getConfigureData(self):
cA = self.categoryAxis
data = self.data
if cA.style not in ('parallel','parallel_3d'):
_data = data
data = max(list(map(len,_data)))*[0]
ndata = data[:]
for d in _data:
for i in xrange(len(d)):
v = d[i] or 0
if v<=-1e-6:
ndata[i] += v
else:
data[i] += v
data = list(_data) + [data] + [ndata]
self._configureData = data
def _getMinMax(self):
'''Attempt to return the data range'''
self._getConfigureData()
self.valueAxis._setRange(self._configureData)
return self.valueAxis._valueMin, self.valueAxis._valueMax
def _drawBegin(self,org,length):
'''Position and configure value axis, return crossing value'''
vA = self.valueAxis
vA.setPosition(self.x, self.y, length)
self._getConfigureData()
vA.configure(self._configureData)
# if zero is in chart, put the other axis there, otherwise use low
crossesAt = vA.scale(0)
if crossesAt > org+length or crossesAt<org:
crossesAt = org
return crossesAt
def _drawFinish(self):
'''finalize the drawing of a barchart'''
cA = self.categoryAxis
vA = self.valueAxis
cA.configure(self._configureData)
self.calcBarPositions()
g = Group()
zIndex = getattr(self,'zIndexOverrides',None)
if not zIndex:
g.add(self.makeBackground())
cAdgl = getattr(cA,'drawGridLast',False)
vAdgl = getattr(vA,'drawGridLast',False)
if not cAdgl: cA.makeGrid(g,parent=self, dim=vA.getGridDims)
if not vAdgl: vA.makeGrid(g,parent=self, dim=cA.getGridDims)
g.add(self.makeBars())
g.add(cA)
g.add(vA)
if cAdgl: cA.makeGrid(g,parent=self, dim=vA.getGridDims)
if vAdgl: vA.makeGrid(g,parent=self, dim=cA.getGridDims)
for a in getattr(self,'annotations',()): g.add(a(self,cA.scale,vA.scale))
else:
Z=dict(
background=0,
categoryAxis=1,
valueAxis=2,
bars=3,
barLabels=4,
categoryAxisGrid=5,
valueAxisGrid=6,
annotations=7,
)
for z in zIndex.strip().split(','):
z = z.strip()
if not z: continue
try:
k,v=z.split('=')
except:
raise ValueError('Badly formatted zIndex clause %r in %r\nallowed variables are\n%s' % (z,zIndex,'\n'.join(['%s=%r'% (k,Z[k]) for k in sorted(Z.keys())])))
if k not in Z:
raise ValueError('Unknown zIndex variable %r in %r\nallowed variables are\n%s' % (k,Z,'\n'.join(['%s=%r'% (k,Z[k]) for k in sorted(Z.keys())])))
try:
v = eval(v,{}) #only constants allowed
assert isinstance(v,(float,int))
except:
raise ValueError('Bad zIndex value %r in clause %r of zIndex\nallowed variables are\n%s' % (v,z,zIndex,'\n'.join(['%s=%r'% (k,Z[k]) for k in sorted(Z.keys())])))
Z[k] = v
Z = [(v,k) for k,v in Z.items()]
Z.sort()
b = self.makeBars()
bl = b.contents.pop(-1)
for v,k in Z:
if k=='background':
g.add(self.makeBackground())
elif k=='categoryAxis':
g.add(cA)
elif k=='categoryAxisGrid':
cA.makeGrid(g,parent=self, dim=vA.getGridDims)
elif k=='valueAxis':
g.add(vA)
elif k=='valueAxisGrid':
vA.makeGrid(g,parent=self, dim=cA.getGridDims)
elif k=='bars':
g.add(b)
elif k=='barLabels':
g.add(bl)
elif k=='annotations':
for a in getattr(self,'annotations',()): g.add(a(self,cA.scale,vA.scale))
del self._configureData
return g
def calcBarPositions(self):
"""Works out where they go. default vertical.
Sets an attribute _barPositions which is a list of
lists of (x, y, width, height) matching the data.
"""
flipXY = self._flipXY
if flipXY:
org = self.y
else:
org = self.x
cA = self.categoryAxis
cScale = cA.scale
data = self.data
seriesCount = self._seriesCount = len(data)
self._rowLength = rowLength = max(list(map(len,data)))
wG = self.groupSpacing
barSpacing = self.barSpacing
barWidth = self.barWidth
clbs = getattr(self,'categoryLabelBarSize',0)
clbo = getattr(self,'categoryLabelBarOrder','auto')
if clbo=='auto': clbo = flipXY and 'last' or 'first'
clbo = clbo=='first'
style = cA.style
if style=='parallel':
wB = seriesCount*barWidth
wS = (seriesCount-1)*barSpacing
bGapB = barWidth
bGapS = barSpacing
else:
accumNeg = rowLength*[0]
accumPos = rowLength*[0]
wB = barWidth
wS = bGapB = bGapS = 0
self._groupWidth = groupWidth = wG+wB+wS
useAbsolute = self.useAbsolute
if useAbsolute:
if not isinstance(useAbsolute,str):
useAbsolute = 7 #all three are fixed
else:
useAbsolute = 0 + 1*('b' in useAbsolute)+2*('g' in useAbsolute)+4*('s' in useAbsolute)
else:
useAbsolute = 0
aW0 = float(cScale(0)[1])
aW = aW0 - clbs
if useAbsolute==0: #case 0 all are free
self._normFactor = fB = fG = fS = aW/groupWidth
elif useAbsolute==7: #all fixed
fB = fG = fS = 1.0
_cscale = cA._scale
elif useAbsolute==1: #case 1 barWidth is fixed
fB = 1.0
fG = fS = (aW-wB)/(wG+wS)
elif useAbsolute==2: #groupspacing is fixed
fG=1.0
fB = fS = (aW-wG)/(wB+wS)
elif useAbsolute==3: #groupspacing & barwidth are fixed
fB = fG = 1.0
fS = (aW-wG-wB)/wS if wS else 0
elif useAbsolute==4: #barspacing is fixed
fS=1.0
fG = fB = (aW-wS)/(wG+wB)
elif useAbsolute==5: #barspacing & barWidth are fixed
fS = fB = 1.0
fG = (aW-wB-wS)/wG
elif useAbsolute==6: #barspacing & groupspacing are fixed
fS = fG = 1
fB = (aW-wS-wG)/wB
self._normFactorB = fB
self._normFactorG = fG
self._normFactorS = fS
# 'Baseline' correction...
vA = self.valueAxis
vScale = vA.scale
vm, vM = vA._valueMin, vA._valueMax
if vm <= 0 <= vM:
baseLine = vScale(0)
elif 0 < vm:
baseLine = vScale(vm)
elif vM < 0:
baseLine = vScale(vM)
self._baseLine = baseLine
nC = max(list(map(len,data)))
width = barWidth*fB
offs = 0.5*wG*fG
bGap = bGapB*fB+bGapS*fS
if clbs:
if clbo: #the lable bar comes first
lbpf = (offs+clbs/6.0)/aW0
offs += clbs
else:
lbpf = (offs+wB*fB+wS*fS+clbs/6.0)/aW0
cA.labels.labelPosFrac = lbpf
self._barPositions = []
aBP = self._barPositions.append
reversePlotOrder = self.reversePlotOrder
for rowNo in xrange(seriesCount):
barRow = []
if reversePlotOrder:
xVal = seriesCount-1 - rowNo
else:
xVal = rowNo
xVal = offs + xVal*bGap
row = data[rowNo]
for colNo in xrange(nC):
datum = row[colNo]
# Ufff...
if useAbsolute==7:
x = groupWidth*_cscale(colNo) + xVal + org
else:
(g, _) = cScale(colNo)
x = g + xVal
if datum is None:
height = None
y = baseLine
else:
if style not in ('parallel','parallel_3d'):
if datum<=-1e-6:
y = vScale(accumNeg[colNo])
if y>baseLine: y = baseLine
accumNeg[colNo] = accumNeg[colNo] + datum
datum = accumNeg[colNo]
else:
y = vScale(accumPos[colNo])
if y<baseLine: y = baseLine
accumPos[colNo] = accumPos[colNo] + datum
datum = accumPos[colNo]
else:
y = baseLine
height = vScale(datum) - y
if -1e-8<height<=1e-8:
height = 1e-8
if datum<-1e-8: height = -1e-8
barRow.append(flipXY and (y,x,height,width) or (x,y,width,height))
aBP(barRow)
def _getLabelText(self, rowNo, colNo):
'''return formatted label text'''
labelFmt = self.barLabelFormat
if labelFmt is None:
labelText = None
elif labelFmt == 'values':
labelText = self.barLabelArray[rowNo][colNo]
elif type(labelFmt) is str:
labelText = labelFmt % self.data[rowNo][colNo]
elif hasattr(labelFmt,'__call__'):
labelText = labelFmt(self.data[rowNo][colNo])
else:
msg = "Unknown formatter type %s, expected string or function" % labelFmt
raise Exception(msg)
return labelText
def _labelXY(self,label,x,y,width,height):
'Compute x, y for a label'
nudge = label.nudge
bt = getattr(label,'boxTarget','normal')
anti = bt=='anti'
if anti: nudge = -nudge
pm = value = height
if anti: value = 0
a = x + 0.5*width
nudge = (height>=0 and 1 or -1)*nudge
if bt=='mid':
b = y+height*0.5
elif bt=='hi':
if value>=0:
b = y + value + nudge
else:
b = y - nudge
pm = -pm
elif bt=='lo':
if value<=0:
b = y + value + nudge
else:
b = y - nudge
pm = -pm
else:
b = y + value + nudge
label._pmv = pm #the plus minus val
return a,b,pm
def _addBarLabel(self, g, rowNo, colNo, x, y, width, height):
text = self._getLabelText(rowNo,colNo)
if text:
self._addLabel(text, self.barLabels[(rowNo, colNo)], g, rowNo, colNo, x, y, width, height)
def _addNABarLabel(self, g, rowNo, colNo, x, y, width, height, calcOnly=False, na=None):
if na is None: na = self.naLabel
if na and na.text:
na = copy.copy(na)
v = self.valueAxis._valueMax<=0 and -1e-8 or 1e-8
if width is None: width = v
if height is None: height = v
return self._addLabel(na.text, na, g, rowNo, colNo, x, y, width, height, calcOnly=calcOnly)
def _addLabel(self, text, label, g, rowNo, colNo, x, y, width, height, calcOnly=False):
if label.visible:
labelWidth = stringWidth(text, label.fontName, label.fontSize)
flipXY = self._flipXY
if flipXY:
y0, x0, pm = self._labelXY(label,y,x,height,width)
else:
x0, y0, pm = self._labelXY(label,x,y,width,height)
fixedEnd = getattr(label,'fixedEnd', None)
if fixedEnd is not None:
v = fixedEnd._getValue(self,pm)
x00, y00 = x0, y0
if flipXY:
x0 = v
else:
y0 = v
else:
if flipXY:
x00 = x0
y00 = y+height/2.0
else:
x00 = x+width/2.0
y00 = y0
fixedStart = getattr(label,'fixedStart', None)
if fixedStart is not None:
v = fixedStart._getValue(self,pm)
if flipXY:
x00 = v
else:
y00 = v
if pm<0:
if flipXY:
dx = -2*label.dx
dy = 0
else:
dy = -2*label.dy
dx = 0
else:
dy = dx = 0
if calcOnly: return x0+dx, y0+dy
label.setOrigin(x0+dx, y0+dy)
label.setText(text)
sC, sW = label.lineStrokeColor, label.lineStrokeWidth
if sC and sW: g.insert(0,Line(x00,y00,x0,y0, strokeColor=sC, strokeWidth=sW))
g.add(label)
alx = getattr(self,'barLabelCallOut',None)
if alx:
label._callOutInfo = (self,g,rowNo,colNo,x,y,width,height,x00,y00,x0,y0)
alx(label)
del label._callOutInfo
def _makeBar(self,g,x,y,width,height,rowNo,style):
r = Rect(x, y, width, height)
r.strokeWidth = style.strokeWidth
r.fillColor = style.fillColor
r.strokeColor = style.strokeColor
if style.strokeDashArray:
r.strokeDashArray = style.strokeDashArray
g.add(r)
def _makeBars(self,g,lg):
lenData = len(self.data)
bars = self.bars
br = getattr(self,'barRecord',None)
BP = self._barPositions
flipXY = self._flipXY
catNAL = self.categoryNALabel
catNNA = {}
if catNAL:
CBL = []
rowNoL = lenData - 1
#find all the categories that have at least one value
for rowNo in xrange(lenData):
row = BP[rowNo]
for colNo in xrange(len(row)):
x, y, width, height = row[colNo]
if None not in (width,height):
catNNA[colNo] = 1
for rowNo in xrange(lenData):
row = BP[rowNo]
styleCount = len(bars)
styleIdx = rowNo % styleCount
rowStyle = bars[styleIdx]
for colNo in xrange(len(row)):
style = (styleIdx,colNo) in bars and bars[(styleIdx,colNo)] or rowStyle
x, y, width, height = row[colNo]
if None in (width,height):
if not catNAL or colNo in catNNA:
self._addNABarLabel(lg,rowNo,colNo,x,y,width,height)
elif catNAL and colNo not in CBL:
r0 = self._addNABarLabel(lg,rowNo,colNo,x,y,width,height,True,catNAL)
if r0:
x, y, width, height = BP[rowNoL][colNo]
r1 = self._addNABarLabel(lg,rowNoL,colNo,x,y,width,height,True,catNAL)
x = (r0[0]+r1[0])/2.0
y = (r0[1]+r1[1])/2.0
self._addNABarLabel(lg,rowNoL,colNo,x,y,0.0001,0.0001,na=catNAL)
CBL.append(colNo)
continue
# Draw a rectangular symbol for each data item,
# or a normal colored rectangle.
symbol = None
if hasattr(style, 'symbol'):
symbol = copy.deepcopy(style.symbol)
elif hasattr(self.bars, 'symbol'):
symbol = self.bars.symbol
minDimen=getattr(style,'minDimen',None)
if minDimen:
if flipXY:
if width<0:
width = min(-style.minDimen,width)
else:
width = max(style.minDimen,width)
else:
if height<0:
height = min(-style.minDimen,height)
else:
height = max(style.minDimen,height)
if symbol:
symbol.x = x
symbol.y = y
symbol.width = width
symbol.height = height
g.add(symbol)
elif abs(width)>1e-7 and abs(height)>=1e-7 and (style.fillColor is not None or style.strokeColor is not None):
self._makeBar(g,x,y,width,height,rowNo,style)
if br: br(g.contents[-1],label=self._getLabelText(rowNo,colNo),value=self.data[rowNo][colNo],rowNo=rowNo,colNo=colNo)
self._addBarLabel(lg,rowNo,colNo,x,y,width,height)
def _computeLabelPosition(self, text, label, rowNo, colNo, x, y, width, height):
if label.visible:
labelWidth = stringWidth(text, label.fontName, label.fontSize)
flipXY = self._flipXY
if flipXY:
y0, x0, pm = self._labelXY(label,y,x,height,width)
else:
x0, y0, pm = self._labelXY(label,x,y,width,height)
fixedEnd = getattr(label,'fixedEnd', None)
if fixedEnd is not None:
v = fixedEnd._getValue(self,pm)
x00, y00 = x0, y0
if flipXY:
x0 = v
else:
y0 = v
else:
if flipXY:
x00 = x0
y00 = y+height/2.0
else:
x00 = x+width/2.0
y00 = y0
fixedStart = getattr(label,'fixedStart', None)
if fixedStart is not None:
v = fixedStart._getValue(self,pm)
if flipXY:
x00 = v
else:
y00 = v
if pm<0:
if flipXY:
dx = -2*label.dx
dy = 0
else:
dy = -2*label.dy
dx = 0
else:
dy = dx = 0
label.setOrigin(x0+dx, y0+dy)
label.setText(text)
return pm,label.getBounds()
def _computeBarPositions(self):
"""Information function, can be called by charts which want to with space around bars"""
cA, vA = self.categoryAxis, self.valueAxis
if vA: vA.joinAxis = cA
if cA: cA.joinAxis = vA
if self._flipXY:
cA.setPosition(self._drawBegin(self.x,self.width), self.y, self.height)
else:
cA.setPosition(self.x, self._drawBegin(self.y,self.height), self.width)
cA.configure(self._configureData)
self.calcBarPositions()
def _computeMaxSpace(self,size,required):
'''helper for madmen who want to put stuff inside their barcharts
basically after _computebarPositions we slide a line of length size
down the bar profile on either side of the bars to find the
maximum space. If the space at any point is >= required then we're
done. Otherwise we return the largest space location and amount.
'''
flipXY = self._flipXY
self._computeBarPositions()
lenData = len(self.data)
BP = self._barPositions
C = []
aC = C.append
if flipXY:
lo = self.x
hi = lo + self.width
end = self.y+self.height
for i in xrange(lenData):
for x, y, w, h in BP[i]:
v = x+w
z = y+h
aC((min(y,z),max(y,z), min(x,v) - lo, hi - max(x,v)))
else:
lo = self.y
hi = lo + self.height
end = self.x+self.width
for i in xrange(lenData):
for x, y, w, h in BP[i]:
v = y+h
z = x+w
aC((min(x,z), max(x,z), min(y,v) - lo, hi - max(y,v)))
C.sort()
R = [C[0]]
for c in C:
r = R[-1]
if r[0]<c[1] and c[0]<r[1]: #merge overlapping space
R[-1] = (min(r[0],c[0]),max(r[1],c[1]),min(r[2],c[2]),min(r[3],c[3]))
else:
R.append(c)
C = R
maxS = -0x7fffffff
maxP = None
nC = len(C)
for i,ci in enumerate(C):
v0 = ci[0]
v1 = v0+size
if v1>end: break
j = i
alo = ahi = 0x7fffffff
while j<nC and C[j][1]<=v1:
alo = min(C[j][2],alo)
ahi = min(C[j][3],ahi)
j += 1
if alo>ahi:
if alo>maxS:
maxS = alo
maxP = flipXY and (lo,v0,lo+alo,v0+size,0) or (v0,lo,v0+size,lo+alo,0)
if maxS >= required: break
elif ahi>maxS:
maxS = ahi
maxP = flipXY and (hi-ahi,v0,hi,v0+size,1) or (v0,hi-ahi,v0+size,hi,1)
if maxS >= required: break
return maxS, maxP
def _computeSimpleBarLabelPositions(self):
"""Information function, can be called by charts which want to mess with labels"""
cA, vA = self.categoryAxis, self.valueAxis
if vA: vA.joinAxis = cA
if cA: cA.joinAxis = vA
if self._flipXY:
cA.setPosition(self._drawBegin(self.x,self.width), self.y, self.height)
else:
cA.setPosition(self.x, self._drawBegin(self.y,self.height), self.width)
cA.configure(self._configureData)
self.calcBarPositions()
lenData = len(self.data)
bars = self.bars
R = [].append
BP = self._barPositions
for rowNo in xrange(lenData):
row = BP[rowNo]
C = [].append
for colNo in xrange(len(row)):
x, y, width, height = row[colNo]
if None in (width,height):
na = self.naLabel
if na and na.text:
na = copy.copy(na)
v = self.valueAxis._valueMax<=0 and -1e-8 or 1e-8
if width is None: width = v
if height is None: height = v
C(self._computeLabelPosition(na.text, na, rowNo, colNo, x, y, width, height))
else:
C(None)
else:
text = self._getLabelText(rowNo,colNo)
if text:
C(self._computeLabelPosition(text, self.barLabels[(rowNo, colNo)], rowNo, colNo, x, y, width, height))
else:
C(None)
R(C.__self__)
return R.__self__
def makeBars(self):
g = Group()
lg = Group()
self._makeBars(g,lg)
g.add(lg)
return g
def _desiredCategoryAxisLength(self):
'''for dynamically computing the desired category axis length'''
style = self.categoryAxis.style
data = self.data
n = len(data)
m = max(list(map(len,data)))
if style=='parallel':
groupWidth = (n-1)*self.barSpacing+n*self.barWidth
else:
groupWidth = self.barWidth
return m*(self.groupSpacing+groupWidth)
def draw(self):
cA, vA = self.categoryAxis, self.valueAxis
if vA: vA.joinAxis = cA
if cA: cA.joinAxis = vA
if self._flipXY:
cA.setPosition(self._drawBegin(self.x,self.width), self.y, self.height)
else:
cA.setPosition(self.x, self._drawBegin(self.y,self.height), self.width)
return self._drawFinish()
class VerticalBarChart(BarChart):
"Vertical bar chart with multiple side-by-side bars."
_flipXY = 0
class HorizontalBarChart(BarChart):
"Horizontal bar chart with multiple side-by-side bars."
_flipXY = 1
class _FakeGroup:
def __init__(self, cmp=None):
self._data = []
self._key = functools.cmp_to_key(cmp)
def add(self,what):
self._data.append(what)
def value(self):
return self._data
def sort(self):
self._data.sort(key=self._key)
class BarChart3D(BarChart):
_attrMap = AttrMap(BASE=BarChart,
theta_x = AttrMapValue(isNumber, desc='dx/dz'),
theta_y = AttrMapValue(isNumber, desc='dy/dz'),
zDepth = AttrMapValue(isNumber, desc='depth of an individual series'),
zSpace = AttrMapValue(isNumber, desc='z gap around series'),
)
theta_x = .5
theta_y = .5
zDepth = None
zSpace = None
def calcBarPositions(self):
BarChart.calcBarPositions(self)
seriesCount = self._seriesCount
zDepth = self.zDepth
if zDepth is None: zDepth = self.barWidth
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
if self.categoryAxis.style=='parallel_3d':
_3d_depth = seriesCount*zDepth+(seriesCount+1)*zSpace
else:
_3d_depth = zDepth + 2*zSpace
_3d_depth *= self._normFactor
self._3d_dx = self.theta_x*_3d_depth
self._3d_dy = self.theta_y*_3d_depth
def _calc_z0(self,rowNo):
zDepth = self.zDepth
if zDepth is None: zDepth = self.barWidth
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
if self.categoryAxis.style=='parallel_3d':
z0 = self._normFactor*(rowNo*(zDepth+zSpace)+zSpace)
else:
z0 = self._normFactor*zSpace
return z0
def _makeBar(self,g,x,y,width,height,rowNo,style):
zDepth = self.zDepth
if zDepth is None: zDepth = self.barWidth
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
z0 = self._calc_z0(rowNo)
z1 = z0 + zDepth*self._normFactor
if width<0:
x += width
width = -width
x += z0*self.theta_x
y += z0*self.theta_y
if self._flipXY:
y += zSpace
else:
x += zSpace
g.add((0,z0,z1,x,y,width,height,rowNo,style))
def _addBarLabel(self, g, rowNo, colNo, x, y, width, height):
z0 = self._calc_z0(rowNo)
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
z1 = z0
x += z0*self.theta_x
y += z0*self.theta_y
if self._flipXY:
y += zSpace
else:
x += zSpace
g.add((1,z0,z1,x,y,width,height,rowNo,colNo))
def makeBars(self):
from reportlab.graphics.charts.utils3d import _draw_3d_bar
fg = _FakeGroup(cmp=self._cmpZ)
self._makeBars(fg,fg)
fg.sort()
g = Group()
theta_x = self.theta_x
theta_y = self.theta_y
if self.categoryAxis.style == 'stacked':
fg_value=fg.value().reverse()
for t in fg.value():
if t[0]==0:
z0,z1,x,y,width,height,rowNo,style = t[1:]
dz = z1 - z0
_draw_3d_bar(g, x, x+width, y, y+height, dz*theta_x, dz*theta_y,
fillColor=style.fillColor, fillColorShaded=None,
strokeColor=style.strokeColor, strokeWidth=style.strokeWidth,
shading=0.45)
for t in fg.value():
if t[0]==1:
z0,z1,x,y,width,height,rowNo,colNo = t[1:]
BarChart._addBarLabel(self,g,rowNo,colNo,x,y,width,height)
return g
class VerticalBarChart3D(BarChart3D,VerticalBarChart):
_cmpZ=lambda self,a,b:cmp((-a[1],a[3],a[0],-a[4]),(-b[1],b[3],b[0],-b[4]))
class HorizontalBarChart3D(BarChart3D,HorizontalBarChart):
_cmpZ = lambda self,a,b: cmp((-a[1],a[4],a[0],-a[3]),(-b[1],b[4],b[0],-b[3])) #t, z0, z1, x, y = a[:5]
# Vertical samples.
def sampleV0a():
"A slightly pathologic bar chart with only TWO data items."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV0b():
"A pathologic bar chart with only ONE data item."
drawing = Drawing(400, 200)
data = [(42,)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 50
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Jan-99']
drawing.add(bc)
return drawing
def sampleV0c():
"A really pathologic bar chart with NO data items at all!"
drawing = Drawing(400, 200)
data = [()]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.categoryNames = []
drawing.add(bc)
return drawing
def sampleV1():
"Sample of multi-series bar chart."
drawing = Drawing(400, 200)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(14, 6, 21, 23, 38, 46, 20, 5)
]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.labels.angle = 30
catNames = 'Jan Feb Mar Apr May Jun Jul Aug'.split(' ')
catNames = [n+'-99' for n in catNames]
bc.categoryAxis.categoryNames = catNames
drawing.add(bc)
return drawing
def sampleV2a():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 0
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dy = -60
drawing.add(bc)
return drawing
def sampleV2b():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 5
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dy = -60
drawing.add(bc)
return drawing
def sampleV2c():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.99),
(0.6, -4.9, -3, 4, 9.99)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 2
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n'
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.labels.dy = -60
bc.barLabels.nudge = 10
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
drawing.add(bc)
return drawing
def sampleV3():
"Faked horizontal bar chart using a vertical real one (deprecated)."
names = ("UK Equities", "US Equities", "European Equities", "Japanese Equities",
"Pacific (ex Japan) Equities", "Emerging Markets Equities",
"UK Bonds", "Overseas Bonds", "UK Index-Linked", "Cash")
series1 = (-1.5, 0.3, 0.5, 1.0, 0.8, 0.7, 0.4, 0.1, 1.0, 0.3)
series2 = (0.0, 0.33, 0.55, 1.1, 0.88, 0.77, 0.44, 0.11, 1.10, 0.33)
assert len(names) == len(series1), "bad data"
assert len(names) == len(series2), "bad data"
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 0
bc.y = 0
bc.height = 100
bc.width = 150
bc.data = (series1,)
bc.bars.fillColor = colors.green
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'w' # irrelevant (becomes 'c')
bc.barLabels.angle = 90
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
bc.barLabels.nudge = 10
bc.valueAxis.visible = 0
bc.valueAxis.valueMin = -2
bc.valueAxis.valueMax = +2
bc.valueAxis.valueStep = 1
bc.categoryAxis.tickUp = 0
bc.categoryAxis.tickDown = 0
bc.categoryAxis.categoryNames = names
bc.categoryAxis.labels.angle = 90
bc.categoryAxis.labels.boxAnchor = 'w'
bc.categoryAxis.labels.dx = 0
bc.categoryAxis.labels.dy = -125
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 6
g = Group(bc)
g.translate(100, 175)
g.rotate(-90)
drawing.add(g)
return drawing
def sampleV4a():
"A bar chart showing value axis region starting at *exactly* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV4b():
"A bar chart showing value axis region starting *below* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV4c():
"A bar chart showing value axis region staring *above* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV4d():
"A bar chart showing value axis region entirely *below* zero."
drawing = Drawing(400, 200)
data = [(-13, -20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -30
bc.valueAxis.valueMax = -10
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
###
##dataSample5 = [(10, 20), (20, 30), (30, 40), (40, 50), (50, 60)]
##dataSample5 = [(10, 60), (20, 50), (30, 40), (40, 30), (50, 20)]
dataSample5 = [(10, 60), (20, 50), (30, 40), (40, 30)]
def sampleV5a():
"A simple bar chart with no expressed spacing attributes."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5b():
"A simple bar chart with proportional spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 0
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c1():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 0
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c2():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c3():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 0
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c4():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
# Horizontal samples
def sampleH0a():
"Make a slightly pathologic bar chart with only TWO data items."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'se'
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH0b():
"Make a pathologic bar chart with only ONE data item."
drawing = Drawing(400, 200)
data = [(42,)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 50
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'se'
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Jan-99']
drawing.add(bc)
return drawing
def sampleH0c():
"Make a really pathologic bar chart with NO data items at all!"
drawing = Drawing(400, 200)
data = [()]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'se'
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = []
drawing.add(bc)
return drawing
def sampleH1():
"Sample of multi-series bar chart."
drawing = Drawing(400, 200)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(14, 6, 21, 23, 38, 46, 20, 5)
]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
catNames = 'Jan Feb Mar Apr May Jun Jul Aug'.split(' ')
catNames = [n+'-99' for n in catNames]
bc.categoryAxis.categoryNames = catNames
drawing.add(bc, 'barchart')
return drawing
def sampleH2a():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 80
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 0
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.valueAxis.configure(bc.data)
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dx = -150
drawing.add(bc)
return drawing
def sampleH2b():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 80
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 5
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dx = -150
drawing.add(bc)
return drawing
def sampleH2c():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.99),
(0.6, -4.9, -3, 4, 9.99)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 80
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 2
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n'
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dx = -150
bc.barLabels.nudge = 10
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
drawing.add(bc)
return drawing
def sampleH3():
"A really horizontal bar chart (compared to the equivalent faked one)."
names = ("UK Equities", "US Equities", "European Equities", "Japanese Equities",
"Pacific (ex Japan) Equities", "Emerging Markets Equities",
"UK Bonds", "Overseas Bonds", "UK Index-Linked", "Cash")
series1 = (-1.5, 0.3, 0.5, 1.0, 0.8, 0.7, 0.4, 0.1, 1.0, 0.3)
series2 = (0.0, 0.33, 0.55, 1.1, 0.88, 0.77, 0.44, 0.11, 1.10, 0.33)
assert len(names) == len(series1), "bad data"
assert len(names) == len(series2), "bad data"
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 100
bc.y = 20
bc.height = 150
bc.width = 250
bc.data = (series1,)
bc.bars.fillColor = colors.green
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'w' # irrelevant (becomes 'c')
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
bc.barLabels.nudge = 10
bc.valueAxis.visible = 0
bc.valueAxis.valueMin = -2
bc.valueAxis.valueMax = +2
bc.valueAxis.valueStep = 1
bc.categoryAxis.tickLeft = 0
bc.categoryAxis.tickRight = 0
bc.categoryAxis.categoryNames = names
bc.categoryAxis.labels.boxAnchor = 'w'
bc.categoryAxis.labels.dx = -170
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 6
g = Group(bc)
drawing.add(g)
return drawing
def sampleH4a():
"A bar chart showing value axis region starting at *exactly* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH4b():
"A bar chart showing value axis region starting *below* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH4c():
"A bar chart showing value axis region starting *above* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH4d():
"A bar chart showing value axis region entirely *below* zero."
drawing = Drawing(400, 200)
data = [(-13, -20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -30
bc.valueAxis.valueMax = -10
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
dataSample5 = [(10, 60), (20, 50), (30, 40), (40, 30)]
def sampleH5a():
"A simple bar chart with no expressed spacing attributes."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5b():
"A simple bar chart with proportional spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 0
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c1():
"A simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 0
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c2():
"Simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 20
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c3():
"Simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 20
bc.height = 155
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 0
bc.barSpacing = 2
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c4():
"Simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleSymbol1():
"Simple bar chart using symbol attribute."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.barWidth = 10
bc.groupSpacing = 15
bc.barSpacing = 3
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
from reportlab.graphics.widgets.grids import ShadedRect
sym1 = ShadedRect()
sym1.fillColorStart = colors.black
sym1.fillColorEnd = colors.blue
sym1.orientation = 'horizontal'
sym1.strokeWidth = 0
sym2 = ShadedRect()
sym2.fillColorStart = colors.black
sym2.fillColorEnd = colors.pink
sym2.orientation = 'horizontal'
sym2.strokeWidth = 0
sym3 = ShadedRect()
sym3.fillColorStart = colors.blue
sym3.fillColorEnd = colors.white
sym3.orientation = 'vertical'
sym3.cylinderMode = 1
sym3.strokeWidth = 0
bc.bars.symbol = sym1
bc.bars[2].symbol = sym2
bc.bars[3].symbol = sym3
drawing.add(bc)
return drawing
def sampleStacked1():
"Simple bar chart using symbol attribute."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.categoryAxis.style = 'stacked'
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.barWidth = 10
bc.groupSpacing = 15
bc.valueAxis.valueMin = 0
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
from reportlab.graphics.widgets.grids import ShadedRect
bc.bars.symbol = ShadedRect()
bc.bars.symbol.fillColorStart = colors.red
bc.bars.symbol.fillColorEnd = colors.white
bc.bars.symbol.orientation = 'vertical'
bc.bars.symbol.cylinderMode = 1
bc.bars.symbol.strokeWidth = 0
bc.bars[1].symbol = ShadedRect()
bc.bars[1].symbol.fillColorStart = colors.magenta
bc.bars[1].symbol.fillColorEnd = colors.white
bc.bars[1].symbol.orientation = 'vertical'
bc.bars[1].symbol.cylinderMode = 1
bc.bars[1].symbol.strokeWidth = 0
bc.bars[2].symbol = ShadedRect()
bc.bars[2].symbol.fillColorStart = colors.green
bc.bars[2].symbol.fillColorEnd = colors.white
bc.bars[2].symbol.orientation = 'vertical'
bc.bars[2].symbol.cylinderMode = 1
bc.bars[2].symbol.strokeWidth = 0
bc.bars[3].symbol = ShadedRect()
bc.bars[3].symbol.fillColorStart = colors.blue
bc.bars[3].symbol.fillColorEnd = colors.white
bc.bars[3].symbol.orientation = 'vertical'
bc.bars[3].symbol.cylinderMode = 1
bc.bars[3].symbol.strokeWidth = 0
drawing.add(bc)
return drawing
#class version of function sampleH5c4 above
class SampleH5c4(Drawing):
"Simple bar chart with absolute spacing."
def __init__(self,width=400,height=200,*args,**kw):
Drawing.__init__(self,width,height,*args,**kw)
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = dataSample5
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
self.add(bc,name='HBC')
| gpl-3.0 |
harry159821/Tickeys-linux | tickeys/kivy/core/camera/__init__.py | 14 | 4426 | '''
Camera
======
Core class for acquiring the camera and converting its input into a
:class:`~kivy.graphics.texture.Texture`.
.. versionchanged:: 1.8.0
There is now 2 distinct Gstreamer implementation: one using Gi/Gst
working for both Python 2+3 with Gstreamer 1.0, and one using PyGST
working only for Python 2 + Gstreamer 0.10.
If you have issue with GStreamer, have a look at
:ref:`gstreamer-compatibility`
'''
__all__ = ('CameraBase', 'Camera')
import sys
from kivy.event import EventDispatcher
from kivy.logger import Logger
from kivy.core import core_select_lib
class CameraBase(EventDispatcher):
'''Abstract Camera Widget class.
Concrete camera classes must implement initialization and
frame capturing to a buffer that can be uploaded to the gpu.
:Parameters:
`index`: int
Source index of the camera.
`size` : tuple (int, int)
Size at which the image is drawn. If no size is specified,
it defaults to the resolution of the camera image.
`resolution` : tuple (int, int)
Resolution to try to request from the camera.
Used in the gstreamer pipeline by forcing the appsink caps
to this resolution. If the camera doesnt support the resolution,
a negotiation error might be thrown.
:Events:
`on_load`
Fired when the camera is loaded and the texture has become
available.
`on_frame`
Fired each time the camera texture is updated.
'''
__events__ = ('on_load', 'on_texture')
def __init__(self, **kwargs):
kwargs.setdefault('stopped', False)
kwargs.setdefault('resolution', (640, 480))
kwargs.setdefault('index', 0)
self.stopped = kwargs.get('stopped')
self._resolution = kwargs.get('resolution')
self._index = kwargs.get('index')
self._buffer = None
self._format = 'rgb'
self._texture = None
self.capture_device = None
kwargs.setdefault('size', self._resolution)
super(CameraBase, self).__init__()
self.init_camera()
if not self.stopped:
self.start()
def _set_resolution(self, res):
self._resolution = res
self.init_camera()
def _get_resolution(self):
return self._resolution
resolution = property(lambda self: self._get_resolution(),
lambda self, x: self._set_resolution(x),
doc='Resolution of camera capture (width, height)')
def _set_index(self, x):
if x == self._index:
return
self._index = x
self.init_camera()
def _get_index(self):
return self._x
index = property(lambda self: self._get_index(),
lambda self, x: self._set_index(x),
doc='Source index of the camera')
def _get_texture(self):
return self._texture
texture = property(lambda self: self._get_texture(),
doc='Return the camera texture with the latest capture')
def init_camera(self):
'''Initialise the camera (internal)'''
pass
def start(self):
'''Start the camera acquire'''
self.stopped = False
def stop(self):
'''Release the camera'''
self.stopped = True
def _update(self, dt):
'''Update the camera (internal)'''
pass
def _copy_to_gpu(self):
'''Copy the the buffer into the texture'''
if self._texture is None:
Logger.debug('Camera: copy_to_gpu() failed, _texture is None !')
return
self._texture.blit_buffer(self._buffer, colorfmt=self._format)
self._buffer = None
self.dispatch('on_texture')
def on_texture(self):
pass
def on_load(self):
pass
# Load the appropriate providers
providers = ()
if sys.platform == 'win32':
providers += (('videocapture', 'camera_videocapture',
'CameraVideoCapture'), )
elif sys.platform == 'darwin':
providers += (('avfoundation', 'camera_avfoundation',
'CameraAVFoundation'), )
else:
#providers += (('gi', 'camera_gi', 'CameraGi'), )
providers += (('pygst', 'camera_pygst', 'CameraPyGst'), )
providers += (('opencv', 'camera_opencv', 'CameraOpenCV'), )
Camera = core_select_lib('camera', (providers))
| mit |
stephane-martin/salt-debian-packaging | salt-2016.3.2/tests/unit/pillar/hg_test.py | 2 | 2302 | # -*- coding: utf-8 -*-
'''test for pillar hg_pillar.py'''
# Import python libs
from __future__ import absolute_import
import os
import tempfile
import shutil
import subprocess
import yaml
# Import Salt Testing libs
from salttesting import TestCase, skipIf
from salttesting.mock import NO_MOCK, NO_MOCK_REASON
import integration
COMMIT_USER_NAME = 'test_user'
# file contents
PILLAR_CONTENT = {'gna': 'hello'}
FILE_DATA = {
'top.sls': {'base': {'*': ['user']}},
'user.sls': PILLAR_CONTENT
}
# Import Salt Libs
from salt.pillar import hg_pillar
HGLIB = hg_pillar.hglib
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HGLIB is None, 'python-hglib no')
class HgPillarTestCase(TestCase, integration.AdaptedConfigurationTestCaseMixIn):
'test hg_pillar pillar'
maxDiff = None
def setUp(self):
super(HgPillarTestCase, self).setUp()
self.tmpdir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
cachedir = os.path.join(self.tmpdir, 'cachedir')
os.makedirs(os.path.join(cachedir, 'hg_pillar'))
self.hg_repo_path = self._create_hg_repo()
hg_pillar.__opts__ = {
'cachedir': cachedir,
'pillar_roots': {},
'file_roots': {},
'state_top': 'top.sls',
'extension_modules': '',
'renderer': 'yaml_jinja',
'pillar_opts': False
}
hg_pillar.__grains__ = {}
def tearDown(self):
shutil.rmtree(self.tmpdir)
super(HgPillarTestCase, self).tearDown()
def _create_hg_repo(self):
'create repo in tempdir'
hg_repo = os.path.join(self.tmpdir, 'repo_pillar')
os.makedirs(hg_repo)
subprocess.check_call(["hg", "init", hg_repo])
for filename in FILE_DATA:
with open(os.path.join(hg_repo, filename), 'w') as data_file:
yaml.dump(FILE_DATA[filename], data_file)
subprocess.check_call(['hg', 'ci', '-A', '-R', hg_repo, '-m', 'first commit', '-u', COMMIT_USER_NAME])
return hg_repo
def test_base(self):
'check hg repo is imported correctly'
mypillar = hg_pillar.ext_pillar('*', None, 'file://{0}'.format(self.hg_repo_path))
self.assertEqual(PILLAR_CONTENT, mypillar)
| apache-2.0 |
ag-wood/ansible-modules-extras | packaging/os/swdepot.py | 102 | 6139 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Raul Melo
# Written by Raul Melo <raulmelo@gmail.com>
# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import re
import pipes
DOCUMENTATION = '''
---
module: swdepot
short_description: Manage packages with swdepot package manager (HP-UX)
description:
- Will install, upgrade and remove packages with swdepot package manager (HP-UX)
version_added: "1.4"
notes: []
author: "Raul Melo (@melodous)"
options:
name:
description:
- package name.
required: true
default: null
choices: []
aliases: []
version_added: 1.4
state:
description:
- whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: true
default: null
choices: [ 'present', 'latest', 'absent']
aliases: []
version_added: 1.4
depot:
description:
- The source repository from which install or upgrade a package.
required: false
default: null
choices: []
aliases: []
version_added: 1.4
'''
EXAMPLES = '''
- swdepot: name=unzip-6.0 state=installed depot=repository:/path
- swdepot: name=unzip state=latest depot=repository:/path
- swdepot: name=unzip state=absent
'''
def compare_package(version1, version2):
""" Compare version packages.
Return values:
-1 first minor
0 equal
1 fisrt greater """
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
return cmp(normalize(version1), normalize(version2))
def query_package(module, name, depot=None):
""" Returns whether a package is installed or not and version. """
cmd_list = '/usr/sbin/swlist -a revision -l product'
if depot:
rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
else:
rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
version = re.sub("\s\s+|\t" , " ", stdout).strip().split()[1]
else:
version = None
return rc, version
def remove_package(module, name):
""" Uninstall package if installed. """
cmd_remove = '/usr/sbin/swremove'
rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name))
if rc == 0:
return rc, stdout
else:
return rc, stderr
def install_package(module, depot, name):
""" Install package if not already installed """
cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false'
rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name))
if rc == 0:
return rc, stdout
else:
return rc, stderr
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg'], required=True),
state = dict(choices=['present', 'absent', 'latest'], required=True),
depot = dict(default=None, required=False)
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
depot = module.params['depot']
changed = False
msg = "No changed"
rc = 0
if ( state == 'present' or state == 'latest' ) and depot == None:
output = "depot parameter is mandatory in present or latest task"
module.fail_json(name=name, msg=output, rc=rc)
#Check local version
rc, version_installed = query_package(module, name)
if not rc:
installed = True
msg = "Already installed"
else:
installed = False
if ( state == 'present' or state == 'latest' ) and installed == False:
if module.check_mode:
module.exit_json(changed=True)
rc, output = install_package(module, depot, name)
if not rc:
changed = True
msg = "Package installed"
else:
module.fail_json(name=name, msg=output, rc=rc)
elif state == 'latest' and installed == True:
#Check depot version
rc, version_depot = query_package(module, name, depot)
if not rc:
if compare_package(version_installed,version_depot) == -1:
if module.check_mode:
module.exit_json(changed=True)
#Install new version
rc, output = install_package(module, depot, name)
if not rc:
msg = "Packge upgraded, Before " + version_installed + " Now " + version_depot
changed = True
else:
module.fail_json(name=name, msg=output, rc=rc)
else:
output = "Software package not in repository " + depot
module.fail_json(name=name, msg=output, rc=rc)
elif state == 'absent' and installed == True:
if module.check_mode:
module.exit_json(changed=True)
rc, output = remove_package(module, name)
if not rc:
changed = True
msg = "Package removed"
else:
module.fail_json(name=name, msg=output, rc=rc)
if module.check_mode:
module.exit_json(changed=False)
module.exit_json(changed=changed, name=name, state=state, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
edina/lockss-daemon | test/frameworks/clean_up_daemon/clean_cache.py | 1 | 7087 | #!/usr/bin/env python
# $Id$
# Copyright (c) 2000-2015 Board of Trustees of Leland Stanford Jr. University,
# all rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Stanford University shall not
# be used in advertising or otherwise to promote the sale, use or other dealings
# in this Software without prior written authorization from Stanford University.
########
#
# This script is no longer supported and may be removed in the future.
#
########
import optparse
import ConfigParser
import os
import sys
import urllib2
import fix_auth_failure
import lockss_daemon
__author__ = "Barry Hayes"
__maintainer__ = "Barry Hayes"
__version__ = "1.0.3"
class _SectionAdder(object):
"""Wrap a python configuration section around a file that doesn't
have one."""
def __init__(self, section, fp):
self.section_done = False
self.section = section
self.fp = fp
def readline(self):
if not self.section_done:
self.section_done = True
return '[%s]' % self.section
else:
return self.fp.readline()
def _parser():
"""Make a parser for the arguments."""
parser = optparse.OptionParser(
description='Move cache directories on a LOCKSS daemon')
parser.add_option('-u', '--username', metavar='U', help='UI username')
parser.add_option('-p', '--password', metavar='P', help='UI password')
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
default=False)
parser.add_option('-q', '--quiet', dest='verbose', action='store_false')
parser.add_option('-f', '--force', dest='verify', action='store_false',
help='ignore auids not present on the daemon, '
'never prompt')
parser.add_option('-i', '--verify', dest='verify', action='store_true',
default=False, help='prompt before each move')
parser.add_option('-c', '--commands', action='store_true', default=False,
help='print mv commands, but do not move files')
parser.add_option('-d', '--directory', default='.',
help='the daemon directory where ./cache is '
'(default: \'%default\')')
parser.add_option('--dest', default='deleted',
help='where under the daemon directory the cache '
'entries are moved to (default: \'%default\')')
return parser
def _process_args():
parser = _parser()
(options, arguments) = parser.parse_args()
if arguments != []:
parser.error('There should be no arguments. Try --help')
return options
def _auid(cache_dir):
"""Return the AUID for the given cache dir."""
# If the #au_id_file isn't present, or doesn't contain an au.id
# entry, the daemon doesn't list the directory in the table, so no
# need to check either condition.
path = os.path.join(cache_dir, '#au_id_file')
config = ConfigParser.ConfigParser()
f = open(os.path.join(path))
try:
config.readfp(_SectionAdder('foo', f))
auid = config.get('foo', 'au.id')
# If this fails, something very odd is going on, and a human
# should check.
assert auid
finally:
f.close()
return auid
def main():
options = _process_args()
src = options.directory
local_txt = os.path.join(src, 'local.txt')
if not os.path.isdir(os.path.join(src, 'cache')):
raise Exception('%s doesn\'t look like a daemon directory. '
'Try --directory.' % src)
if 'LOCKSS_IPADDR' in os.environ: ipAddr = os.environ['LOCKSS_IPADDR']
else: ipAddr = '127.0.0.1'
if 'LOCKSS_UI_PORT' in os.environ:
port = os.environ['LOCKSS_UI_PORT']
else:
if not os.path.isfile(local_txt):
raise Exception('LOCKSS_UI_PORT is not set but there is no'
'%s' % (local_txt,))
config = ConfigParser.ConfigParser()
local_config = open(local_txt)
try:
config.readfp(_SectionAdder('foo', local_config))
port = config.get('foo', 'org.lockss.ui.port')
finally:
local_config.close()
fix_auth_failure.fix_auth_failure()
client = lockss_daemon.Client(ipAddr, port,
options.username, options.password)
repos = client._getStatusTable( 'RepositoryTable' )[ 1 ]
no_auid = [r for r in repos if r['status'] == 'No AUID']
if no_auid:
print 'Warning: These cache directories have no AUID:'
for r in no_auid:
print r['dir']
print
deleted = [r for r in repos if r['status'] == 'Deleted']
for r in deleted:
r['auid'] = _auid(os.path.join(src, r['dir']))
deleted.sort(key=lambda r: r['auid'])
move_all = False
if options.verbose:
if deleted:
print 'These AUs have been deleted on the daemon:'
for r in deleted:
print r['auid']
if options.verify:
move_all = raw_input('move all [y]? ').startswith('y')
else:
print 'No deleted AUs.'
verify_each = options.verify and not move_all
dst = os.path.join(options.directory, options.dest)
for r in deleted:
dir = r['dir']
if not verify_each or \
verify_each and \
raw_input('move %s [n]? ' % r['auid']).startswith('y'):
src_r = os.path.join(src, dir)
if os.path.isabs(dir):
if not dir.startswith(options.directory): print 'Absolute/relative path mismatch: %s' % (dir,)
dst_r = os.path.join(dst, dir[len(options.directory)+1:])
else: dst_r = os.path.join(dst, dir)
if options.commands:
print "mv %s %s # %s" % (src_r, dst_r, r['auid'])
else:
os.renames(src_r, dst_r)
if __name__ == '__main__':
print 'Warning: This script is no longer supported.'
main()
| bsd-3-clause |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/libxml2-2.9.1/python/libxml2class.py | 10 | 321118 | #
# Functions from module HTMLparser
#
def htmlCreateMemoryParserCtxt(buffer, size):
"""Create a parser context for an HTML in-memory document. """
ret = libxml2mod.htmlCreateMemoryParserCtxt(buffer, size)
if ret is None:raise parserError('htmlCreateMemoryParserCtxt() failed')
return parserCtxt(_obj=ret)
def htmlHandleOmittedElem(val):
"""Set and return the previous value for handling HTML omitted
tags. """
ret = libxml2mod.htmlHandleOmittedElem(val)
return ret
def htmlIsScriptAttribute(name):
"""Check if an attribute is of content type Script """
ret = libxml2mod.htmlIsScriptAttribute(name)
return ret
def htmlNewParserCtxt():
"""Allocate and initialize a new parser context. """
ret = libxml2mod.htmlNewParserCtxt()
if ret is None:raise parserError('htmlNewParserCtxt() failed')
return parserCtxt(_obj=ret)
def htmlParseDoc(cur, encoding):
"""parse an HTML in-memory document and build a tree. """
ret = libxml2mod.htmlParseDoc(cur, encoding)
if ret is None:raise parserError('htmlParseDoc() failed')
return xmlDoc(_obj=ret)
def htmlParseFile(filename, encoding):
"""parse an HTML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. """
ret = libxml2mod.htmlParseFile(filename, encoding)
if ret is None:raise parserError('htmlParseFile() failed')
return xmlDoc(_obj=ret)
def htmlReadDoc(cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.htmlReadDoc(cur, URL, encoding, options)
if ret is None:raise treeError('htmlReadDoc() failed')
return xmlDoc(_obj=ret)
def htmlReadFd(fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. """
ret = libxml2mod.htmlReadFd(fd, URL, encoding, options)
if ret is None:raise treeError('htmlReadFd() failed')
return xmlDoc(_obj=ret)
def htmlReadFile(filename, encoding, options):
"""parse an XML file from the filesystem or the network. """
ret = libxml2mod.htmlReadFile(filename, encoding, options)
if ret is None:raise treeError('htmlReadFile() failed')
return xmlDoc(_obj=ret)
def htmlReadMemory(buffer, size, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.htmlReadMemory(buffer, size, URL, encoding, options)
if ret is None:raise treeError('htmlReadMemory() failed')
return xmlDoc(_obj=ret)
#
# Functions from module HTMLtree
#
def htmlIsBooleanAttr(name):
"""Determine if a given attribute is a boolean attribute. """
ret = libxml2mod.htmlIsBooleanAttr(name)
return ret
def htmlNewDoc(URI, ExternalID):
"""Creates a new HTML document """
ret = libxml2mod.htmlNewDoc(URI, ExternalID)
if ret is None:raise treeError('htmlNewDoc() failed')
return xmlDoc(_obj=ret)
def htmlNewDocNoDtD(URI, ExternalID):
"""Creates a new HTML document without a DTD node if @URI and
@ExternalID are None """
ret = libxml2mod.htmlNewDocNoDtD(URI, ExternalID)
if ret is None:raise treeError('htmlNewDocNoDtD() failed')
return xmlDoc(_obj=ret)
#
# Functions from module SAX2
#
def SAXDefaultVersion(version):
"""Set the default version of SAX used globally by the
library. By default, during initialization the default is
set to 2. Note that it is generally a better coding style
to use xmlSAXVersion() to set up the version explicitly for
a given parsing context. """
ret = libxml2mod.xmlSAXDefaultVersion(version)
return ret
def defaultSAXHandlerInit():
"""Initialize the default SAX2 handler """
libxml2mod.xmlDefaultSAXHandlerInit()
def docbDefaultSAXHandlerInit():
"""Initialize the default SAX handler """
libxml2mod.docbDefaultSAXHandlerInit()
def htmlDefaultSAXHandlerInit():
"""Initialize the default SAX handler """
libxml2mod.htmlDefaultSAXHandlerInit()
#
# Functions from module catalog
#
def catalogAdd(type, orig, replace):
"""Add an entry in the catalog, it may overwrite existing but
different entries. If called before any other catalog
routine, allows to override the default shared catalog put
in place by xmlInitializeCatalog(); """
ret = libxml2mod.xmlCatalogAdd(type, orig, replace)
return ret
def catalogCleanup():
"""Free up all the memory associated with catalogs """
libxml2mod.xmlCatalogCleanup()
def catalogConvert():
"""Convert all the SGML catalog entries as XML ones """
ret = libxml2mod.xmlCatalogConvert()
return ret
def catalogDump(out):
"""Dump all the global catalog content to the given file. """
if out is not None: out.flush()
libxml2mod.xmlCatalogDump(out)
def catalogGetPublic(pubID):
"""Try to lookup the catalog reference associated to a public
ID DEPRECATED, use xmlCatalogResolvePublic() """
ret = libxml2mod.xmlCatalogGetPublic(pubID)
return ret
def catalogGetSystem(sysID):
"""Try to lookup the catalog reference associated to a system
ID DEPRECATED, use xmlCatalogResolveSystem() """
ret = libxml2mod.xmlCatalogGetSystem(sysID)
return ret
def catalogRemove(value):
"""Remove an entry from the catalog """
ret = libxml2mod.xmlCatalogRemove(value)
return ret
def catalogResolve(pubID, sysID):
"""Do a complete resolution lookup of an External Identifier """
ret = libxml2mod.xmlCatalogResolve(pubID, sysID)
return ret
def catalogResolvePublic(pubID):
"""Try to lookup the catalog reference associated to a public
ID """
ret = libxml2mod.xmlCatalogResolvePublic(pubID)
return ret
def catalogResolveSystem(sysID):
"""Try to lookup the catalog resource for a system ID """
ret = libxml2mod.xmlCatalogResolveSystem(sysID)
return ret
def catalogResolveURI(URI):
"""Do a complete resolution lookup of an URI """
ret = libxml2mod.xmlCatalogResolveURI(URI)
return ret
def catalogSetDebug(level):
"""Used to set the debug level for catalog operation, 0
disable debugging, 1 enable it """
ret = libxml2mod.xmlCatalogSetDebug(level)
return ret
def initializeCatalog():
"""Do the catalog initialization. this function is not thread
safe, catalog initialization should preferably be done once
at startup """
libxml2mod.xmlInitializeCatalog()
def loadACatalog(filename):
"""Load the catalog and build the associated data structures.
This can be either an XML Catalog or an SGML Catalog It
will recurse in SGML CATALOG entries. On the other hand XML
Catalogs are not handled recursively. """
ret = libxml2mod.xmlLoadACatalog(filename)
if ret is None:raise treeError('xmlLoadACatalog() failed')
return catalog(_obj=ret)
def loadCatalog(filename):
"""Load the catalog and makes its definitions effective for
the default external entity loader. It will recurse in SGML
CATALOG entries. this function is not thread safe, catalog
initialization should preferably be done once at startup """
ret = libxml2mod.xmlLoadCatalog(filename)
return ret
def loadCatalogs(pathss):
"""Load the catalogs and makes their definitions effective for
the default external entity loader. this function is not
thread safe, catalog initialization should preferably be
done once at startup """
libxml2mod.xmlLoadCatalogs(pathss)
def loadSGMLSuperCatalog(filename):
"""Load an SGML super catalog. It won't expand CATALOG or
DELEGATE references. This is only needed for manipulating
SGML Super Catalogs like adding and removing CATALOG or
DELEGATE entries. """
ret = libxml2mod.xmlLoadSGMLSuperCatalog(filename)
if ret is None:raise treeError('xmlLoadSGMLSuperCatalog() failed')
return catalog(_obj=ret)
def newCatalog(sgml):
"""create a new Catalog. """
ret = libxml2mod.xmlNewCatalog(sgml)
if ret is None:raise treeError('xmlNewCatalog() failed')
return catalog(_obj=ret)
def parseCatalogFile(filename):
"""parse an XML file and build a tree. It's like
xmlParseFile() except it bypass all catalog lookups. """
ret = libxml2mod.xmlParseCatalogFile(filename)
if ret is None:raise parserError('xmlParseCatalogFile() failed')
return xmlDoc(_obj=ret)
#
# Functions from module chvalid
#
def isBaseChar(ch):
"""This function is DEPRECATED. Use xmlIsBaseChar_ch or
xmlIsBaseCharQ instead """
ret = libxml2mod.xmlIsBaseChar(ch)
return ret
def isBlank(ch):
"""This function is DEPRECATED. Use xmlIsBlank_ch or
xmlIsBlankQ instead """
ret = libxml2mod.xmlIsBlank(ch)
return ret
def isChar(ch):
"""This function is DEPRECATED. Use xmlIsChar_ch or xmlIsCharQ
instead """
ret = libxml2mod.xmlIsChar(ch)
return ret
def isCombining(ch):
"""This function is DEPRECATED. Use xmlIsCombiningQ instead """
ret = libxml2mod.xmlIsCombining(ch)
return ret
def isDigit(ch):
"""This function is DEPRECATED. Use xmlIsDigit_ch or
xmlIsDigitQ instead """
ret = libxml2mod.xmlIsDigit(ch)
return ret
def isExtender(ch):
"""This function is DEPRECATED. Use xmlIsExtender_ch or
xmlIsExtenderQ instead """
ret = libxml2mod.xmlIsExtender(ch)
return ret
def isIdeographic(ch):
"""This function is DEPRECATED. Use xmlIsIdeographicQ instead """
ret = libxml2mod.xmlIsIdeographic(ch)
return ret
def isPubidChar(ch):
"""This function is DEPRECATED. Use xmlIsPubidChar_ch or
xmlIsPubidCharQ instead """
ret = libxml2mod.xmlIsPubidChar(ch)
return ret
#
# Functions from module debugXML
#
def boolToText(boolval):
"""Convenient way to turn bool into text """
ret = libxml2mod.xmlBoolToText(boolval)
return ret
def debugDumpString(output, str):
"""Dumps informations about the string, shorten it if necessary """
if output is not None: output.flush()
libxml2mod.xmlDebugDumpString(output, str)
def shellPrintXPathError(errorType, arg):
"""Print the xpath error to libxml default error channel """
libxml2mod.xmlShellPrintXPathError(errorType, arg)
#
# Functions from module dict
#
def dictCleanup():
"""Free the dictionary mutex. Do not call unless sure the
library is not in use anymore ! """
libxml2mod.xmlDictCleanup()
def initializeDict():
"""Do the dictionary mutex initialization. this function is
deprecated """
ret = libxml2mod.xmlInitializeDict()
return ret
#
# Functions from module encoding
#
def addEncodingAlias(name, alias):
"""Registers an alias @alias for an encoding named @name.
Existing alias will be overwritten. """
ret = libxml2mod.xmlAddEncodingAlias(name, alias)
return ret
def cleanupCharEncodingHandlers():
"""Cleanup the memory allocated for the char encoding support,
it unregisters all the encoding handlers and the aliases. """
libxml2mod.xmlCleanupCharEncodingHandlers()
def cleanupEncodingAliases():
"""Unregisters all aliases """
libxml2mod.xmlCleanupEncodingAliases()
def delEncodingAlias(alias):
"""Unregisters an encoding alias @alias """
ret = libxml2mod.xmlDelEncodingAlias(alias)
return ret
def encodingAlias(alias):
"""Lookup an encoding name for the given alias. """
ret = libxml2mod.xmlGetEncodingAlias(alias)
return ret
def initCharEncodingHandlers():
"""Initialize the char encoding support, it registers the
default encoding supported. NOTE: while public, this
function usually doesn't need to be called in normal
processing. """
libxml2mod.xmlInitCharEncodingHandlers()
#
# Functions from module entities
#
def cleanupPredefinedEntities():
"""Cleanup up the predefined entities table. Deprecated call """
libxml2mod.xmlCleanupPredefinedEntities()
def initializePredefinedEntities():
"""Set up the predefined entities. Deprecated call """
libxml2mod.xmlInitializePredefinedEntities()
def predefinedEntity(name):
"""Check whether this name is an predefined entity. """
ret = libxml2mod.xmlGetPredefinedEntity(name)
if ret is None:raise treeError('xmlGetPredefinedEntity() failed')
return xmlEntity(_obj=ret)
#
# Functions from module globals
#
def cleanupGlobals():
"""Additional cleanup for multi-threading """
libxml2mod.xmlCleanupGlobals()
def initGlobals():
"""Additional initialisation for multi-threading """
libxml2mod.xmlInitGlobals()
def thrDefDefaultBufferSize(v):
ret = libxml2mod.xmlThrDefDefaultBufferSize(v)
return ret
def thrDefDoValidityCheckingDefaultValue(v):
ret = libxml2mod.xmlThrDefDoValidityCheckingDefaultValue(v)
return ret
def thrDefGetWarningsDefaultValue(v):
ret = libxml2mod.xmlThrDefGetWarningsDefaultValue(v)
return ret
def thrDefIndentTreeOutput(v):
ret = libxml2mod.xmlThrDefIndentTreeOutput(v)
return ret
def thrDefKeepBlanksDefaultValue(v):
ret = libxml2mod.xmlThrDefKeepBlanksDefaultValue(v)
return ret
def thrDefLineNumbersDefaultValue(v):
ret = libxml2mod.xmlThrDefLineNumbersDefaultValue(v)
return ret
def thrDefLoadExtDtdDefaultValue(v):
ret = libxml2mod.xmlThrDefLoadExtDtdDefaultValue(v)
return ret
def thrDefParserDebugEntities(v):
ret = libxml2mod.xmlThrDefParserDebugEntities(v)
return ret
def thrDefPedanticParserDefaultValue(v):
ret = libxml2mod.xmlThrDefPedanticParserDefaultValue(v)
return ret
def thrDefSaveNoEmptyTags(v):
ret = libxml2mod.xmlThrDefSaveNoEmptyTags(v)
return ret
def thrDefSubstituteEntitiesDefaultValue(v):
ret = libxml2mod.xmlThrDefSubstituteEntitiesDefaultValue(v)
return ret
def thrDefTreeIndentString(v):
ret = libxml2mod.xmlThrDefTreeIndentString(v)
return ret
#
# Functions from module nanoftp
#
def nanoFTPCleanup():
"""Cleanup the FTP protocol layer. This cleanup proxy
informations. """
libxml2mod.xmlNanoFTPCleanup()
def nanoFTPInit():
"""Initialize the FTP protocol layer. Currently it just checks
for proxy informations, and get the hostname """
libxml2mod.xmlNanoFTPInit()
def nanoFTPProxy(host, port, user, passwd, type):
"""Setup the FTP proxy informations. This can also be done by
using ftp_proxy ftp_proxy_user and ftp_proxy_password
environment variables. """
libxml2mod.xmlNanoFTPProxy(host, port, user, passwd, type)
def nanoFTPScanProxy(URL):
"""(Re)Initialize the FTP Proxy context by parsing the URL and
finding the protocol host port it indicates. Should be like
ftp://myproxy/ or ftp://myproxy:3128/ A None URL cleans up
proxy informations. """
libxml2mod.xmlNanoFTPScanProxy(URL)
#
# Functions from module nanohttp
#
def nanoHTTPCleanup():
"""Cleanup the HTTP protocol layer. """
libxml2mod.xmlNanoHTTPCleanup()
def nanoHTTPInit():
"""Initialize the HTTP protocol layer. Currently it just
checks for proxy informations """
libxml2mod.xmlNanoHTTPInit()
def nanoHTTPScanProxy(URL):
"""(Re)Initialize the HTTP Proxy context by parsing the URL
and finding the protocol host port it indicates. Should be
like http://myproxy/ or http://myproxy:3128/ A None URL
cleans up proxy informations. """
libxml2mod.xmlNanoHTTPScanProxy(URL)
#
# Functions from module parser
#
def createDocParserCtxt(cur):
"""Creates a parser context for an XML in-memory document. """
ret = libxml2mod.xmlCreateDocParserCtxt(cur)
if ret is None:raise parserError('xmlCreateDocParserCtxt() failed')
return parserCtxt(_obj=ret)
def initParser():
"""Initialization function for the XML parser. This is not
reentrant. Call once before processing in case of use in
multithreaded programs. """
libxml2mod.xmlInitParser()
def keepBlanksDefault(val):
"""Set and return the previous value for default blanks text
nodes support. The 1.x version of the parser used an
heuristic to try to detect ignorable white spaces. As a
result the SAX callback was generating
xmlSAX2IgnorableWhitespace() callbacks instead of
characters() one, and when using the DOM output text nodes
containing those blanks were not generated. The 2.x and
later version will switch to the XML standard way and
ignorableWhitespace() are only generated when running the
parser in validating mode and when the current element
doesn't allow CDATA or mixed content. This function is
provided as a way to force the standard behavior on 1.X
libs and to switch back to the old mode for compatibility
when running 1.X client code on 2.X . Upgrade of 1.X code
should be done by using xmlIsBlankNode() commodity function
to detect the "empty" nodes generated. This value also
affect autogeneration of indentation when saving code if
blanks sections are kept, indentation is not generated. """
ret = libxml2mod.xmlKeepBlanksDefault(val)
return ret
def lineNumbersDefault(val):
"""Set and return the previous value for enabling line numbers
in elements contents. This may break on old application and
is turned off by default. """
ret = libxml2mod.xmlLineNumbersDefault(val)
return ret
def newParserCtxt():
"""Allocate and initialize a new parser context. """
ret = libxml2mod.xmlNewParserCtxt()
if ret is None:raise parserError('xmlNewParserCtxt() failed')
return parserCtxt(_obj=ret)
def parseDTD(ExternalID, SystemID):
"""Load and parse an external subset. """
ret = libxml2mod.xmlParseDTD(ExternalID, SystemID)
if ret is None:raise parserError('xmlParseDTD() failed')
return xmlDtd(_obj=ret)
def parseDoc(cur):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.xmlParseDoc(cur)
if ret is None:raise parserError('xmlParseDoc() failed')
return xmlDoc(_obj=ret)
def parseEntity(filename):
"""parse an XML external entity out of context and build a
tree. [78] extParsedEnt ::= TextDecl? content This
correspond to a "Well Balanced" chunk """
ret = libxml2mod.xmlParseEntity(filename)
if ret is None:raise parserError('xmlParseEntity() failed')
return xmlDoc(_obj=ret)
def parseFile(filename):
"""parse an XML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. """
ret = libxml2mod.xmlParseFile(filename)
if ret is None:raise parserError('xmlParseFile() failed')
return xmlDoc(_obj=ret)
def parseMemory(buffer, size):
"""parse an XML in-memory block and build a tree. """
ret = libxml2mod.xmlParseMemory(buffer, size)
if ret is None:raise parserError('xmlParseMemory() failed')
return xmlDoc(_obj=ret)
def pedanticParserDefault(val):
"""Set and return the previous value for enabling pedantic
warnings. """
ret = libxml2mod.xmlPedanticParserDefault(val)
return ret
def readDoc(cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.xmlReadDoc(cur, URL, encoding, options)
if ret is None:raise treeError('xmlReadDoc() failed')
return xmlDoc(_obj=ret)
def readFd(fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. NOTE
that the file descriptor will not be closed when the reader
is closed or reset. """
ret = libxml2mod.xmlReadFd(fd, URL, encoding, options)
if ret is None:raise treeError('xmlReadFd() failed')
return xmlDoc(_obj=ret)
def readFile(filename, encoding, options):
"""parse an XML file from the filesystem or the network. """
ret = libxml2mod.xmlReadFile(filename, encoding, options)
if ret is None:raise treeError('xmlReadFile() failed')
return xmlDoc(_obj=ret)
def readMemory(buffer, size, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.xmlReadMemory(buffer, size, URL, encoding, options)
if ret is None:raise treeError('xmlReadMemory() failed')
return xmlDoc(_obj=ret)
def recoverDoc(cur):
"""parse an XML in-memory document and build a tree. In the
case the document is not Well Formed, a attempt to build a
tree is tried anyway """
ret = libxml2mod.xmlRecoverDoc(cur)
if ret is None:raise treeError('xmlRecoverDoc() failed')
return xmlDoc(_obj=ret)
def recoverFile(filename):
"""parse an XML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. In the case the document is not Well
Formed, it attempts to build a tree anyway """
ret = libxml2mod.xmlRecoverFile(filename)
if ret is None:raise treeError('xmlRecoverFile() failed')
return xmlDoc(_obj=ret)
def recoverMemory(buffer, size):
"""parse an XML in-memory block and build a tree. In the case
the document is not Well Formed, an attempt to build a tree
is tried anyway """
ret = libxml2mod.xmlRecoverMemory(buffer, size)
if ret is None:raise treeError('xmlRecoverMemory() failed')
return xmlDoc(_obj=ret)
def substituteEntitiesDefault(val):
"""Set and return the previous value for default entity
support. Initially the parser always keep entity references
instead of substituting entity values in the output. This
function has to be used to change the default parser
behavior SAX::substituteEntities() has to be used for
changing that on a file by file basis. """
ret = libxml2mod.xmlSubstituteEntitiesDefault(val)
return ret
#
# Functions from module parserInternals
#
def checkLanguageID(lang):
"""Checks that the value conforms to the LanguageID
production: NOTE: this is somewhat deprecated, those
productions were removed from the XML Second edition. [33]
LanguageID ::= Langcode ('-' Subcode)* [34] Langcode ::=
ISO639Code | IanaCode | UserCode [35] ISO639Code ::=
([a-z] | [A-Z]) ([a-z] | [A-Z]) [36] IanaCode ::= ('i' |
'I') '-' ([a-z] | [A-Z])+ [37] UserCode ::= ('x' | 'X') '-'
([a-z] | [A-Z])+ [38] Subcode ::= ([a-z] | [A-Z])+ The
current REC reference the sucessors of RFC 1766, currently
5646 http://www.rfc-editor.org/rfc/rfc5646.txt langtag
= language ["-" script] ["-" region] *("-" variant) *("-"
extension) ["-" privateuse] language = 2*3ALPHA
; shortest ISO 639 code ["-" extlang] ; sometimes
followed by ; extended language subtags / 4ALPHA
; or reserved for future use / 5*8ALPHA ; or
registered language subtag extlang = 3ALPHA
; selected ISO 639 codes *2("-" 3ALPHA) ; permanently
reserved script = 4ALPHA ; ISO 15924
code region = 2ALPHA ; ISO 3166-1 code
/ 3DIGIT ; UN M.49 code variant =
5*8alphanum ; registered variants / (DIGIT
3alphanum) extension = singleton 1*("-" (2*8alphanum))
; Single alphanumerics ; "x" reserved for private use
singleton = DIGIT ; 0 - 9 / %x41-57
; A - W / %x59-5A ; Y - Z / %x61-77
; a - w / %x79-7A ; y - z it sounds right to
still allow Irregular i-xxx IANA and user codes too The
parser below doesn't try to cope with extension or
privateuse that could be added but that's not interoperable
anyway """
ret = libxml2mod.xmlCheckLanguageID(lang)
return ret
def copyChar(len, out, val):
"""append the char value in the array """
ret = libxml2mod.xmlCopyChar(len, out, val)
return ret
def copyCharMultiByte(out, val):
"""append the char value in the array """
ret = libxml2mod.xmlCopyCharMultiByte(out, val)
return ret
def createEntityParserCtxt(URL, ID, base):
"""Create a parser context for an external entity Automatic
support for ZLIB/Compress compressed document is provided
by default if found at compile-time. """
ret = libxml2mod.xmlCreateEntityParserCtxt(URL, ID, base)
if ret is None:raise parserError('xmlCreateEntityParserCtxt() failed')
return parserCtxt(_obj=ret)
def createFileParserCtxt(filename):
"""Create a parser context for a file content. Automatic
support for ZLIB/Compress compressed document is provided
by default if found at compile-time. """
ret = libxml2mod.xmlCreateFileParserCtxt(filename)
if ret is None:raise parserError('xmlCreateFileParserCtxt() failed')
return parserCtxt(_obj=ret)
def createMemoryParserCtxt(buffer, size):
"""Create a parser context for an XML in-memory document. """
ret = libxml2mod.xmlCreateMemoryParserCtxt(buffer, size)
if ret is None:raise parserError('xmlCreateMemoryParserCtxt() failed')
return parserCtxt(_obj=ret)
def createURLParserCtxt(filename, options):
"""Create a parser context for a file or URL content.
Automatic support for ZLIB/Compress compressed document is
provided by default if found at compile-time and for file
accesses """
ret = libxml2mod.xmlCreateURLParserCtxt(filename, options)
if ret is None:raise parserError('xmlCreateURLParserCtxt() failed')
return parserCtxt(_obj=ret)
def htmlCreateFileParserCtxt(filename, encoding):
"""Create a parser context for a file content. Automatic
support for ZLIB/Compress compressed document is provided
by default if found at compile-time. """
ret = libxml2mod.htmlCreateFileParserCtxt(filename, encoding)
if ret is None:raise parserError('htmlCreateFileParserCtxt() failed')
return parserCtxt(_obj=ret)
def htmlInitAutoClose():
"""Initialize the htmlStartCloseIndex for fast lookup of
closing tags names. This is not reentrant. Call
xmlInitParser() once before processing in case of use in
multithreaded programs. """
libxml2mod.htmlInitAutoClose()
def isLetter(c):
"""Check whether the character is allowed by the production
[84] Letter ::= BaseChar | Ideographic """
ret = libxml2mod.xmlIsLetter(c)
return ret
def namePop(ctxt):
"""Pops the top element name from the name stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.namePop(ctxt__o)
return ret
def namePush(ctxt, value):
"""Pushes a new element name on top of the name stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.namePush(ctxt__o, value)
return ret
def nodePop(ctxt):
"""Pops the top element node from the node stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.nodePop(ctxt__o)
if ret is None:raise treeError('nodePop() failed')
return xmlNode(_obj=ret)
def nodePush(ctxt, value):
"""Pushes a new element node on top of the node stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if value is None: value__o = None
else: value__o = value._o
ret = libxml2mod.nodePush(ctxt__o, value__o)
return ret
#
# Functions from module python
#
def SAXParseFile(SAX, URI, recover):
"""Interface to parse an XML file or resource pointed by an
URI to build an event flow to the SAX object """
libxml2mod.xmlSAXParseFile(SAX, URI, recover)
def createInputBuffer(file, encoding):
"""Create a libxml2 input buffer from a Python file """
ret = libxml2mod.xmlCreateInputBuffer(file, encoding)
if ret is None:raise treeError('xmlCreateInputBuffer() failed')
return inputBuffer(_obj=ret)
def createOutputBuffer(file, encoding):
"""Create a libxml2 output buffer from a Python file """
ret = libxml2mod.xmlCreateOutputBuffer(file, encoding)
if ret is None:raise treeError('xmlCreateOutputBuffer() failed')
return outputBuffer(_obj=ret)
def createPushParser(SAX, chunk, size, URI):
"""Create a progressive XML parser context to build either an
event flow if the SAX object is not None, or a DOM tree
otherwise. """
ret = libxml2mod.xmlCreatePushParser(SAX, chunk, size, URI)
if ret is None:raise parserError('xmlCreatePushParser() failed')
return parserCtxt(_obj=ret)
def debugMemory(activate):
"""Switch on the generation of line number for elements nodes.
Also returns the number of bytes allocated and not freed by
libxml2 since memory debugging was switched on. """
ret = libxml2mod.xmlDebugMemory(activate)
return ret
def dumpMemory():
"""dump the memory allocated in the file .memdump """
libxml2mod.xmlDumpMemory()
def htmlCreatePushParser(SAX, chunk, size, URI):
"""Create a progressive HTML parser context to build either an
event flow if the SAX object is not None, or a DOM tree
otherwise. """
ret = libxml2mod.htmlCreatePushParser(SAX, chunk, size, URI)
if ret is None:raise parserError('htmlCreatePushParser() failed')
return parserCtxt(_obj=ret)
def htmlSAXParseFile(SAX, URI, encoding):
"""Interface to parse an HTML file or resource pointed by an
URI to build an event flow to the SAX object """
libxml2mod.htmlSAXParseFile(SAX, URI, encoding)
def memoryUsed():
"""Returns the total amount of memory allocated by libxml2 """
ret = libxml2mod.xmlMemoryUsed()
return ret
def newNode(name):
"""Create a new Node """
ret = libxml2mod.xmlNewNode(name)
if ret is None:raise treeError('xmlNewNode() failed')
return xmlNode(_obj=ret)
def pythonCleanupParser():
"""Cleanup function for the XML library. It tries to reclaim
all parsing related global memory allocated for the library
processing. It doesn't deallocate any document related
memory. Calling this function should not prevent reusing
the library but one should call xmlCleanupParser() only
when the process has finished using the library or XML
document built with it. """
libxml2mod.xmlPythonCleanupParser()
def setEntityLoader(resolver):
"""Set the entity resolver as a python function """
ret = libxml2mod.xmlSetEntityLoader(resolver)
return ret
#
# Functions from module relaxng
#
def relaxNGCleanupTypes():
"""Cleanup the default Schemas type library associated to
RelaxNG """
libxml2mod.xmlRelaxNGCleanupTypes()
def relaxNGInitTypes():
"""Initilize the default type libraries. """
ret = libxml2mod.xmlRelaxNGInitTypes()
return ret
def relaxNGNewMemParserCtxt(buffer, size):
"""Create an XML RelaxNGs parse context for that memory buffer
expected to contain an XML RelaxNGs file. """
ret = libxml2mod.xmlRelaxNGNewMemParserCtxt(buffer, size)
if ret is None:raise parserError('xmlRelaxNGNewMemParserCtxt() failed')
return relaxNgParserCtxt(_obj=ret)
def relaxNGNewParserCtxt(URL):
"""Create an XML RelaxNGs parse context for that file/resource
expected to contain an XML RelaxNGs file. """
ret = libxml2mod.xmlRelaxNGNewParserCtxt(URL)
if ret is None:raise parserError('xmlRelaxNGNewParserCtxt() failed')
return relaxNgParserCtxt(_obj=ret)
#
# Functions from module tree
#
def buildQName(ncname, prefix, memory, len):
"""Builds the QName @prefix:@ncname in @memory if there is
enough space and prefix is not None nor empty, otherwise
allocate a new string. If prefix is None or empty it
returns ncname. """
ret = libxml2mod.xmlBuildQName(ncname, prefix, memory, len)
return ret
def compressMode():
"""get the default compression mode used, ZLIB based. """
ret = libxml2mod.xmlGetCompressMode()
return ret
def isXHTML(systemID, publicID):
"""Try to find if the document correspond to an XHTML DTD """
ret = libxml2mod.xmlIsXHTML(systemID, publicID)
return ret
def newComment(content):
"""Creation of a new node containing a comment. """
ret = libxml2mod.xmlNewComment(content)
if ret is None:raise treeError('xmlNewComment() failed')
return xmlNode(_obj=ret)
def newDoc(version):
"""Creates a new XML document """
ret = libxml2mod.xmlNewDoc(version)
if ret is None:raise treeError('xmlNewDoc() failed')
return xmlDoc(_obj=ret)
def newPI(name, content):
"""Creation of a processing instruction element. Use
xmlDocNewPI preferably to get string interning """
ret = libxml2mod.xmlNewPI(name, content)
if ret is None:raise treeError('xmlNewPI() failed')
return xmlNode(_obj=ret)
def newText(content):
"""Creation of a new text node. """
ret = libxml2mod.xmlNewText(content)
if ret is None:raise treeError('xmlNewText() failed')
return xmlNode(_obj=ret)
def newTextLen(content, len):
"""Creation of a new text node with an extra parameter for the
content's length """
ret = libxml2mod.xmlNewTextLen(content, len)
if ret is None:raise treeError('xmlNewTextLen() failed')
return xmlNode(_obj=ret)
def setCompressMode(mode):
"""set the default compression mode used, ZLIB based Correct
values: 0 (uncompressed) to 9 (max compression) """
libxml2mod.xmlSetCompressMode(mode)
def validateNCName(value, space):
"""Check that a value conforms to the lexical space of NCName """
ret = libxml2mod.xmlValidateNCName(value, space)
return ret
def validateNMToken(value, space):
"""Check that a value conforms to the lexical space of NMToken """
ret = libxml2mod.xmlValidateNMToken(value, space)
return ret
def validateName(value, space):
"""Check that a value conforms to the lexical space of Name """
ret = libxml2mod.xmlValidateName(value, space)
return ret
def validateQName(value, space):
"""Check that a value conforms to the lexical space of QName """
ret = libxml2mod.xmlValidateQName(value, space)
return ret
#
# Functions from module uri
#
def URIEscape(str):
"""Escaping routine, does not do validity checks ! It will try
to escape the chars needing this, but this is heuristic
based it's impossible to be sure. """
ret = libxml2mod.xmlURIEscape(str)
return ret
def URIEscapeStr(str, list):
"""This routine escapes a string to hex, ignoring reserved
characters (a-z) and the characters in the exception list. """
ret = libxml2mod.xmlURIEscapeStr(str, list)
return ret
def URIUnescapeString(str, len, target):
"""Unescaping routine, but does not check that the string is
an URI. The output is a direct unsigned char translation of
%XX values (no encoding) Note that the length of the result
can only be smaller or same size as the input string. """
ret = libxml2mod.xmlURIUnescapeString(str, len, target)
return ret
def buildRelativeURI(URI, base):
"""Expresses the URI of the reference in terms relative to the
base. Some examples of this operation include: base =
"http://site1.com/docs/book1.html" URI input
URI returned docs/pic1.gif pic1.gif
docs/img/pic1.gif img/pic1.gif img/pic1.gif
../img/pic1.gif http://site1.com/docs/pic1.gif pic1.gif
http://site2.com/docs/pic1.gif
http://site2.com/docs/pic1.gif base = "docs/book1.html"
URI input URI returned docs/pic1.gif
pic1.gif docs/img/pic1.gif img/pic1.gif
img/pic1.gif ../img/pic1.gif
http://site1.com/docs/pic1.gif
http://site1.com/docs/pic1.gif Note: if the URI reference
is really wierd or complicated, it may be worthwhile to
first convert it into a "nice" one by calling xmlBuildURI
(using 'base') before calling this routine, since this
routine (for reasonable efficiency) assumes URI has already
been through some validation. """
ret = libxml2mod.xmlBuildRelativeURI(URI, base)
return ret
def buildURI(URI, base):
"""Computes he final URI of the reference done by checking
that the given URI is valid, and building the final URI
using the base URI. This is processed according to section
5.2 of the RFC 2396 5.2. Resolving Relative References to
Absolute Form """
ret = libxml2mod.xmlBuildURI(URI, base)
return ret
def canonicPath(path):
"""Constructs a canonic path from the specified path. """
ret = libxml2mod.xmlCanonicPath(path)
return ret
def createURI():
"""Simply creates an empty xmlURI """
ret = libxml2mod.xmlCreateURI()
if ret is None:raise uriError('xmlCreateURI() failed')
return URI(_obj=ret)
def normalizeURIPath(path):
"""Applies the 5 normalization steps to a path string--that
is, RFC 2396 Section 5.2, steps 6.c through 6.g.
Normalization occurs directly on the string, no new
allocation is done """
ret = libxml2mod.xmlNormalizeURIPath(path)
return ret
def parseURI(str):
"""Parse an URI based on RFC 3986 URI-reference = [
absoluteURI | relativeURI ] [ "#" fragment ] """
ret = libxml2mod.xmlParseURI(str)
if ret is None:raise uriError('xmlParseURI() failed')
return URI(_obj=ret)
def parseURIRaw(str, raw):
"""Parse an URI but allows to keep intact the original
fragments. URI-reference = URI / relative-ref """
ret = libxml2mod.xmlParseURIRaw(str, raw)
if ret is None:raise uriError('xmlParseURIRaw() failed')
return URI(_obj=ret)
def pathToURI(path):
"""Constructs an URI expressing the existing path """
ret = libxml2mod.xmlPathToURI(path)
return ret
#
# Functions from module valid
#
def newValidCtxt():
"""Allocate a validation context structure. """
ret = libxml2mod.xmlNewValidCtxt()
if ret is None:raise treeError('xmlNewValidCtxt() failed')
return ValidCtxt(_obj=ret)
def validateNameValue(value):
"""Validate that the given value match Name production """
ret = libxml2mod.xmlValidateNameValue(value)
return ret
def validateNamesValue(value):
"""Validate that the given value match Names production """
ret = libxml2mod.xmlValidateNamesValue(value)
return ret
def validateNmtokenValue(value):
"""Validate that the given value match Nmtoken production [
VC: Name Token ] """
ret = libxml2mod.xmlValidateNmtokenValue(value)
return ret
def validateNmtokensValue(value):
"""Validate that the given value match Nmtokens production [
VC: Name Token ] """
ret = libxml2mod.xmlValidateNmtokensValue(value)
return ret
#
# Functions from module xmlIO
#
def checkFilename(path):
"""function checks to see if @path is a valid source (file,
socket...) for XML. if stat is not available on the target
machine, """
ret = libxml2mod.xmlCheckFilename(path)
return ret
def cleanupInputCallbacks():
"""clears the entire input callback table. this includes the
compiled-in I/O. """
libxml2mod.xmlCleanupInputCallbacks()
def cleanupOutputCallbacks():
"""clears the entire output callback table. this includes the
compiled-in I/O callbacks. """
libxml2mod.xmlCleanupOutputCallbacks()
def fileMatch(filename):
"""input from FILE * """
ret = libxml2mod.xmlFileMatch(filename)
return ret
def iOFTPMatch(filename):
"""check if the URI matches an FTP one """
ret = libxml2mod.xmlIOFTPMatch(filename)
return ret
def iOHTTPMatch(filename):
"""check if the URI matches an HTTP one """
ret = libxml2mod.xmlIOHTTPMatch(filename)
return ret
def normalizeWindowsPath(path):
"""This function is obsolete. Please see xmlURIFromPath in
uri.c for a better solution. """
ret = libxml2mod.xmlNormalizeWindowsPath(path)
return ret
def parserGetDirectory(filename):
"""lookup the directory for that file """
ret = libxml2mod.xmlParserGetDirectory(filename)
return ret
def registerDefaultInputCallbacks():
"""Registers the default compiled-in I/O handlers. """
libxml2mod.xmlRegisterDefaultInputCallbacks()
def registerDefaultOutputCallbacks():
"""Registers the default compiled-in I/O handlers. """
libxml2mod.xmlRegisterDefaultOutputCallbacks()
def registerHTTPPostCallbacks():
"""By default, libxml submits HTTP output requests using the
"PUT" method. Calling this method changes the HTTP output
method to use the "POST" method instead. """
libxml2mod.xmlRegisterHTTPPostCallbacks()
#
# Functions from module xmlerror
#
def lastError():
"""Get the last global error registered. This is per thread if
compiled with thread support. """
ret = libxml2mod.xmlGetLastError()
if ret is None:raise treeError('xmlGetLastError() failed')
return Error(_obj=ret)
def resetLastError():
"""Cleanup the last global error registered. For parsing error
this does not change the well-formedness result. """
libxml2mod.xmlResetLastError()
#
# Functions from module xmlreader
#
def newTextReaderFilename(URI):
"""Create an xmlTextReader structure fed with the resource at
@URI """
ret = libxml2mod.xmlNewTextReaderFilename(URI)
if ret is None:raise treeError('xmlNewTextReaderFilename() failed')
return xmlTextReader(_obj=ret)
def readerForDoc(cur, URL, encoding, options):
"""Create an xmltextReader for an XML in-memory document. The
parsing flags @options are a combination of xmlParserOption. """
ret = libxml2mod.xmlReaderForDoc(cur, URL, encoding, options)
if ret is None:raise treeError('xmlReaderForDoc() failed')
return xmlTextReader(_obj=ret)
def readerForFd(fd, URL, encoding, options):
"""Create an xmltextReader for an XML from a file descriptor.
The parsing flags @options are a combination of
xmlParserOption. NOTE that the file descriptor will not be
closed when the reader is closed or reset. """
ret = libxml2mod.xmlReaderForFd(fd, URL, encoding, options)
if ret is None:raise treeError('xmlReaderForFd() failed')
return xmlTextReader(_obj=ret)
def readerForFile(filename, encoding, options):
"""parse an XML file from the filesystem or the network. The
parsing flags @options are a combination of xmlParserOption. """
ret = libxml2mod.xmlReaderForFile(filename, encoding, options)
if ret is None:raise treeError('xmlReaderForFile() failed')
return xmlTextReader(_obj=ret)
def readerForMemory(buffer, size, URL, encoding, options):
"""Create an xmltextReader for an XML in-memory document. The
parsing flags @options are a combination of xmlParserOption. """
ret = libxml2mod.xmlReaderForMemory(buffer, size, URL, encoding, options)
if ret is None:raise treeError('xmlReaderForMemory() failed')
return xmlTextReader(_obj=ret)
#
# Functions from module xmlregexp
#
def regexpCompile(regexp):
"""Parses a regular expression conforming to XML Schemas Part
2 Datatype Appendix F and builds an automata suitable for
testing strings against that regular expression """
ret = libxml2mod.xmlRegexpCompile(regexp)
if ret is None:raise treeError('xmlRegexpCompile() failed')
return xmlReg(_obj=ret)
#
# Functions from module xmlschemas
#
def schemaNewMemParserCtxt(buffer, size):
"""Create an XML Schemas parse context for that memory buffer
expected to contain an XML Schemas file. """
ret = libxml2mod.xmlSchemaNewMemParserCtxt(buffer, size)
if ret is None:raise parserError('xmlSchemaNewMemParserCtxt() failed')
return SchemaParserCtxt(_obj=ret)
def schemaNewParserCtxt(URL):
"""Create an XML Schemas parse context for that file/resource
expected to contain an XML Schemas file. """
ret = libxml2mod.xmlSchemaNewParserCtxt(URL)
if ret is None:raise parserError('xmlSchemaNewParserCtxt() failed')
return SchemaParserCtxt(_obj=ret)
#
# Functions from module xmlschemastypes
#
def schemaCleanupTypes():
"""Cleanup the default XML Schemas type library """
libxml2mod.xmlSchemaCleanupTypes()
def schemaCollapseString(value):
"""Removes and normalize white spaces in the string """
ret = libxml2mod.xmlSchemaCollapseString(value)
return ret
def schemaInitTypes():
"""Initialize the default XML Schemas type library """
libxml2mod.xmlSchemaInitTypes()
def schemaWhiteSpaceReplace(value):
"""Replaces 0xd, 0x9 and 0xa with a space. """
ret = libxml2mod.xmlSchemaWhiteSpaceReplace(value)
return ret
#
# Functions from module xmlstring
#
def UTF8Charcmp(utf1, utf2):
"""compares the two UCS4 values """
ret = libxml2mod.xmlUTF8Charcmp(utf1, utf2)
return ret
def UTF8Size(utf):
"""calculates the internal size of a UTF8 character """
ret = libxml2mod.xmlUTF8Size(utf)
return ret
def UTF8Strlen(utf):
"""compute the length of an UTF8 string, it doesn't do a full
UTF8 checking of the content of the string. """
ret = libxml2mod.xmlUTF8Strlen(utf)
return ret
def UTF8Strloc(utf, utfchar):
"""a function to provide the relative location of a UTF8 char """
ret = libxml2mod.xmlUTF8Strloc(utf, utfchar)
return ret
def UTF8Strndup(utf, len):
"""a strndup for array of UTF8's """
ret = libxml2mod.xmlUTF8Strndup(utf, len)
return ret
def UTF8Strpos(utf, pos):
"""a function to provide the equivalent of fetching a
character from a string array """
ret = libxml2mod.xmlUTF8Strpos(utf, pos)
return ret
def UTF8Strsize(utf, len):
"""storage size of an UTF8 string the behaviour is not
garanteed if the input string is not UTF-8 """
ret = libxml2mod.xmlUTF8Strsize(utf, len)
return ret
def UTF8Strsub(utf, start, len):
"""Create a substring from a given UTF-8 string Note:
positions are given in units of UTF-8 chars """
ret = libxml2mod.xmlUTF8Strsub(utf, start, len)
return ret
def checkUTF8(utf):
"""Checks @utf for being valid UTF-8. @utf is assumed to be
null-terminated. This function is not super-strict, as it
will allow longer UTF-8 sequences than necessary. Note that
Java is capable of producing these sequences if provoked.
Also note, this routine checks for the 4-byte maximum size,
but does not check for 0x10ffff maximum value. """
ret = libxml2mod.xmlCheckUTF8(utf)
return ret
#
# Functions from module xmlunicode
#
def uCSIsAegeanNumbers(code):
"""Check whether the character is part of AegeanNumbers UCS
Block """
ret = libxml2mod.xmlUCSIsAegeanNumbers(code)
return ret
def uCSIsAlphabeticPresentationForms(code):
"""Check whether the character is part of
AlphabeticPresentationForms UCS Block """
ret = libxml2mod.xmlUCSIsAlphabeticPresentationForms(code)
return ret
def uCSIsArabic(code):
"""Check whether the character is part of Arabic UCS Block """
ret = libxml2mod.xmlUCSIsArabic(code)
return ret
def uCSIsArabicPresentationFormsA(code):
"""Check whether the character is part of
ArabicPresentationForms-A UCS Block """
ret = libxml2mod.xmlUCSIsArabicPresentationFormsA(code)
return ret
def uCSIsArabicPresentationFormsB(code):
"""Check whether the character is part of
ArabicPresentationForms-B UCS Block """
ret = libxml2mod.xmlUCSIsArabicPresentationFormsB(code)
return ret
def uCSIsArmenian(code):
"""Check whether the character is part of Armenian UCS Block """
ret = libxml2mod.xmlUCSIsArmenian(code)
return ret
def uCSIsArrows(code):
"""Check whether the character is part of Arrows UCS Block """
ret = libxml2mod.xmlUCSIsArrows(code)
return ret
def uCSIsBasicLatin(code):
"""Check whether the character is part of BasicLatin UCS Block """
ret = libxml2mod.xmlUCSIsBasicLatin(code)
return ret
def uCSIsBengali(code):
"""Check whether the character is part of Bengali UCS Block """
ret = libxml2mod.xmlUCSIsBengali(code)
return ret
def uCSIsBlock(code, block):
"""Check whether the character is part of the UCS Block """
ret = libxml2mod.xmlUCSIsBlock(code, block)
return ret
def uCSIsBlockElements(code):
"""Check whether the character is part of BlockElements UCS
Block """
ret = libxml2mod.xmlUCSIsBlockElements(code)
return ret
def uCSIsBopomofo(code):
"""Check whether the character is part of Bopomofo UCS Block """
ret = libxml2mod.xmlUCSIsBopomofo(code)
return ret
def uCSIsBopomofoExtended(code):
"""Check whether the character is part of BopomofoExtended UCS
Block """
ret = libxml2mod.xmlUCSIsBopomofoExtended(code)
return ret
def uCSIsBoxDrawing(code):
"""Check whether the character is part of BoxDrawing UCS Block """
ret = libxml2mod.xmlUCSIsBoxDrawing(code)
return ret
def uCSIsBraillePatterns(code):
"""Check whether the character is part of BraillePatterns UCS
Block """
ret = libxml2mod.xmlUCSIsBraillePatterns(code)
return ret
def uCSIsBuhid(code):
"""Check whether the character is part of Buhid UCS Block """
ret = libxml2mod.xmlUCSIsBuhid(code)
return ret
def uCSIsByzantineMusicalSymbols(code):
"""Check whether the character is part of
ByzantineMusicalSymbols UCS Block """
ret = libxml2mod.xmlUCSIsByzantineMusicalSymbols(code)
return ret
def uCSIsCJKCompatibility(code):
"""Check whether the character is part of CJKCompatibility UCS
Block """
ret = libxml2mod.xmlUCSIsCJKCompatibility(code)
return ret
def uCSIsCJKCompatibilityForms(code):
"""Check whether the character is part of
CJKCompatibilityForms UCS Block """
ret = libxml2mod.xmlUCSIsCJKCompatibilityForms(code)
return ret
def uCSIsCJKCompatibilityIdeographs(code):
"""Check whether the character is part of
CJKCompatibilityIdeographs UCS Block """
ret = libxml2mod.xmlUCSIsCJKCompatibilityIdeographs(code)
return ret
def uCSIsCJKCompatibilityIdeographsSupplement(code):
"""Check whether the character is part of
CJKCompatibilityIdeographsSupplement UCS Block """
ret = libxml2mod.xmlUCSIsCJKCompatibilityIdeographsSupplement(code)
return ret
def uCSIsCJKRadicalsSupplement(code):
"""Check whether the character is part of
CJKRadicalsSupplement UCS Block """
ret = libxml2mod.xmlUCSIsCJKRadicalsSupplement(code)
return ret
def uCSIsCJKSymbolsandPunctuation(code):
"""Check whether the character is part of
CJKSymbolsandPunctuation UCS Block """
ret = libxml2mod.xmlUCSIsCJKSymbolsandPunctuation(code)
return ret
def uCSIsCJKUnifiedIdeographs(code):
"""Check whether the character is part of CJKUnifiedIdeographs
UCS Block """
ret = libxml2mod.xmlUCSIsCJKUnifiedIdeographs(code)
return ret
def uCSIsCJKUnifiedIdeographsExtensionA(code):
"""Check whether the character is part of
CJKUnifiedIdeographsExtensionA UCS Block """
ret = libxml2mod.xmlUCSIsCJKUnifiedIdeographsExtensionA(code)
return ret
def uCSIsCJKUnifiedIdeographsExtensionB(code):
"""Check whether the character is part of
CJKUnifiedIdeographsExtensionB UCS Block """
ret = libxml2mod.xmlUCSIsCJKUnifiedIdeographsExtensionB(code)
return ret
def uCSIsCat(code, cat):
"""Check whether the character is part of the UCS Category """
ret = libxml2mod.xmlUCSIsCat(code, cat)
return ret
def uCSIsCatC(code):
"""Check whether the character is part of C UCS Category """
ret = libxml2mod.xmlUCSIsCatC(code)
return ret
def uCSIsCatCc(code):
"""Check whether the character is part of Cc UCS Category """
ret = libxml2mod.xmlUCSIsCatCc(code)
return ret
def uCSIsCatCf(code):
"""Check whether the character is part of Cf UCS Category """
ret = libxml2mod.xmlUCSIsCatCf(code)
return ret
def uCSIsCatCo(code):
"""Check whether the character is part of Co UCS Category """
ret = libxml2mod.xmlUCSIsCatCo(code)
return ret
def uCSIsCatCs(code):
"""Check whether the character is part of Cs UCS Category """
ret = libxml2mod.xmlUCSIsCatCs(code)
return ret
def uCSIsCatL(code):
"""Check whether the character is part of L UCS Category """
ret = libxml2mod.xmlUCSIsCatL(code)
return ret
def uCSIsCatLl(code):
"""Check whether the character is part of Ll UCS Category """
ret = libxml2mod.xmlUCSIsCatLl(code)
return ret
def uCSIsCatLm(code):
"""Check whether the character is part of Lm UCS Category """
ret = libxml2mod.xmlUCSIsCatLm(code)
return ret
def uCSIsCatLo(code):
"""Check whether the character is part of Lo UCS Category """
ret = libxml2mod.xmlUCSIsCatLo(code)
return ret
def uCSIsCatLt(code):
"""Check whether the character is part of Lt UCS Category """
ret = libxml2mod.xmlUCSIsCatLt(code)
return ret
def uCSIsCatLu(code):
"""Check whether the character is part of Lu UCS Category """
ret = libxml2mod.xmlUCSIsCatLu(code)
return ret
def uCSIsCatM(code):
"""Check whether the character is part of M UCS Category """
ret = libxml2mod.xmlUCSIsCatM(code)
return ret
def uCSIsCatMc(code):
"""Check whether the character is part of Mc UCS Category """
ret = libxml2mod.xmlUCSIsCatMc(code)
return ret
def uCSIsCatMe(code):
"""Check whether the character is part of Me UCS Category """
ret = libxml2mod.xmlUCSIsCatMe(code)
return ret
def uCSIsCatMn(code):
"""Check whether the character is part of Mn UCS Category """
ret = libxml2mod.xmlUCSIsCatMn(code)
return ret
def uCSIsCatN(code):
"""Check whether the character is part of N UCS Category """
ret = libxml2mod.xmlUCSIsCatN(code)
return ret
def uCSIsCatNd(code):
"""Check whether the character is part of Nd UCS Category """
ret = libxml2mod.xmlUCSIsCatNd(code)
return ret
def uCSIsCatNl(code):
"""Check whether the character is part of Nl UCS Category """
ret = libxml2mod.xmlUCSIsCatNl(code)
return ret
def uCSIsCatNo(code):
"""Check whether the character is part of No UCS Category """
ret = libxml2mod.xmlUCSIsCatNo(code)
return ret
def uCSIsCatP(code):
"""Check whether the character is part of P UCS Category """
ret = libxml2mod.xmlUCSIsCatP(code)
return ret
def uCSIsCatPc(code):
"""Check whether the character is part of Pc UCS Category """
ret = libxml2mod.xmlUCSIsCatPc(code)
return ret
def uCSIsCatPd(code):
"""Check whether the character is part of Pd UCS Category """
ret = libxml2mod.xmlUCSIsCatPd(code)
return ret
def uCSIsCatPe(code):
"""Check whether the character is part of Pe UCS Category """
ret = libxml2mod.xmlUCSIsCatPe(code)
return ret
def uCSIsCatPf(code):
"""Check whether the character is part of Pf UCS Category """
ret = libxml2mod.xmlUCSIsCatPf(code)
return ret
def uCSIsCatPi(code):
"""Check whether the character is part of Pi UCS Category """
ret = libxml2mod.xmlUCSIsCatPi(code)
return ret
def uCSIsCatPo(code):
"""Check whether the character is part of Po UCS Category """
ret = libxml2mod.xmlUCSIsCatPo(code)
return ret
def uCSIsCatPs(code):
"""Check whether the character is part of Ps UCS Category """
ret = libxml2mod.xmlUCSIsCatPs(code)
return ret
def uCSIsCatS(code):
"""Check whether the character is part of S UCS Category """
ret = libxml2mod.xmlUCSIsCatS(code)
return ret
def uCSIsCatSc(code):
"""Check whether the character is part of Sc UCS Category """
ret = libxml2mod.xmlUCSIsCatSc(code)
return ret
def uCSIsCatSk(code):
"""Check whether the character is part of Sk UCS Category """
ret = libxml2mod.xmlUCSIsCatSk(code)
return ret
def uCSIsCatSm(code):
"""Check whether the character is part of Sm UCS Category """
ret = libxml2mod.xmlUCSIsCatSm(code)
return ret
def uCSIsCatSo(code):
"""Check whether the character is part of So UCS Category """
ret = libxml2mod.xmlUCSIsCatSo(code)
return ret
def uCSIsCatZ(code):
"""Check whether the character is part of Z UCS Category """
ret = libxml2mod.xmlUCSIsCatZ(code)
return ret
def uCSIsCatZl(code):
"""Check whether the character is part of Zl UCS Category """
ret = libxml2mod.xmlUCSIsCatZl(code)
return ret
def uCSIsCatZp(code):
"""Check whether the character is part of Zp UCS Category """
ret = libxml2mod.xmlUCSIsCatZp(code)
return ret
def uCSIsCatZs(code):
"""Check whether the character is part of Zs UCS Category """
ret = libxml2mod.xmlUCSIsCatZs(code)
return ret
def uCSIsCherokee(code):
"""Check whether the character is part of Cherokee UCS Block """
ret = libxml2mod.xmlUCSIsCherokee(code)
return ret
def uCSIsCombiningDiacriticalMarks(code):
"""Check whether the character is part of
CombiningDiacriticalMarks UCS Block """
ret = libxml2mod.xmlUCSIsCombiningDiacriticalMarks(code)
return ret
def uCSIsCombiningDiacriticalMarksforSymbols(code):
"""Check whether the character is part of
CombiningDiacriticalMarksforSymbols UCS Block """
ret = libxml2mod.xmlUCSIsCombiningDiacriticalMarksforSymbols(code)
return ret
def uCSIsCombiningHalfMarks(code):
"""Check whether the character is part of CombiningHalfMarks
UCS Block """
ret = libxml2mod.xmlUCSIsCombiningHalfMarks(code)
return ret
def uCSIsCombiningMarksforSymbols(code):
"""Check whether the character is part of
CombiningMarksforSymbols UCS Block """
ret = libxml2mod.xmlUCSIsCombiningMarksforSymbols(code)
return ret
def uCSIsControlPictures(code):
"""Check whether the character is part of ControlPictures UCS
Block """
ret = libxml2mod.xmlUCSIsControlPictures(code)
return ret
def uCSIsCurrencySymbols(code):
"""Check whether the character is part of CurrencySymbols UCS
Block """
ret = libxml2mod.xmlUCSIsCurrencySymbols(code)
return ret
def uCSIsCypriotSyllabary(code):
"""Check whether the character is part of CypriotSyllabary UCS
Block """
ret = libxml2mod.xmlUCSIsCypriotSyllabary(code)
return ret
def uCSIsCyrillic(code):
"""Check whether the character is part of Cyrillic UCS Block """
ret = libxml2mod.xmlUCSIsCyrillic(code)
return ret
def uCSIsCyrillicSupplement(code):
"""Check whether the character is part of CyrillicSupplement
UCS Block """
ret = libxml2mod.xmlUCSIsCyrillicSupplement(code)
return ret
def uCSIsDeseret(code):
"""Check whether the character is part of Deseret UCS Block """
ret = libxml2mod.xmlUCSIsDeseret(code)
return ret
def uCSIsDevanagari(code):
"""Check whether the character is part of Devanagari UCS Block """
ret = libxml2mod.xmlUCSIsDevanagari(code)
return ret
def uCSIsDingbats(code):
"""Check whether the character is part of Dingbats UCS Block """
ret = libxml2mod.xmlUCSIsDingbats(code)
return ret
def uCSIsEnclosedAlphanumerics(code):
"""Check whether the character is part of
EnclosedAlphanumerics UCS Block """
ret = libxml2mod.xmlUCSIsEnclosedAlphanumerics(code)
return ret
def uCSIsEnclosedCJKLettersandMonths(code):
"""Check whether the character is part of
EnclosedCJKLettersandMonths UCS Block """
ret = libxml2mod.xmlUCSIsEnclosedCJKLettersandMonths(code)
return ret
def uCSIsEthiopic(code):
"""Check whether the character is part of Ethiopic UCS Block """
ret = libxml2mod.xmlUCSIsEthiopic(code)
return ret
def uCSIsGeneralPunctuation(code):
"""Check whether the character is part of GeneralPunctuation
UCS Block """
ret = libxml2mod.xmlUCSIsGeneralPunctuation(code)
return ret
def uCSIsGeometricShapes(code):
"""Check whether the character is part of GeometricShapes UCS
Block """
ret = libxml2mod.xmlUCSIsGeometricShapes(code)
return ret
def uCSIsGeorgian(code):
"""Check whether the character is part of Georgian UCS Block """
ret = libxml2mod.xmlUCSIsGeorgian(code)
return ret
def uCSIsGothic(code):
"""Check whether the character is part of Gothic UCS Block """
ret = libxml2mod.xmlUCSIsGothic(code)
return ret
def uCSIsGreek(code):
"""Check whether the character is part of Greek UCS Block """
ret = libxml2mod.xmlUCSIsGreek(code)
return ret
def uCSIsGreekExtended(code):
"""Check whether the character is part of GreekExtended UCS
Block """
ret = libxml2mod.xmlUCSIsGreekExtended(code)
return ret
def uCSIsGreekandCoptic(code):
"""Check whether the character is part of GreekandCoptic UCS
Block """
ret = libxml2mod.xmlUCSIsGreekandCoptic(code)
return ret
def uCSIsGujarati(code):
"""Check whether the character is part of Gujarati UCS Block """
ret = libxml2mod.xmlUCSIsGujarati(code)
return ret
def uCSIsGurmukhi(code):
"""Check whether the character is part of Gurmukhi UCS Block """
ret = libxml2mod.xmlUCSIsGurmukhi(code)
return ret
def uCSIsHalfwidthandFullwidthForms(code):
"""Check whether the character is part of
HalfwidthandFullwidthForms UCS Block """
ret = libxml2mod.xmlUCSIsHalfwidthandFullwidthForms(code)
return ret
def uCSIsHangulCompatibilityJamo(code):
"""Check whether the character is part of
HangulCompatibilityJamo UCS Block """
ret = libxml2mod.xmlUCSIsHangulCompatibilityJamo(code)
return ret
def uCSIsHangulJamo(code):
"""Check whether the character is part of HangulJamo UCS Block """
ret = libxml2mod.xmlUCSIsHangulJamo(code)
return ret
def uCSIsHangulSyllables(code):
"""Check whether the character is part of HangulSyllables UCS
Block """
ret = libxml2mod.xmlUCSIsHangulSyllables(code)
return ret
def uCSIsHanunoo(code):
"""Check whether the character is part of Hanunoo UCS Block """
ret = libxml2mod.xmlUCSIsHanunoo(code)
return ret
def uCSIsHebrew(code):
"""Check whether the character is part of Hebrew UCS Block """
ret = libxml2mod.xmlUCSIsHebrew(code)
return ret
def uCSIsHighPrivateUseSurrogates(code):
"""Check whether the character is part of
HighPrivateUseSurrogates UCS Block """
ret = libxml2mod.xmlUCSIsHighPrivateUseSurrogates(code)
return ret
def uCSIsHighSurrogates(code):
"""Check whether the character is part of HighSurrogates UCS
Block """
ret = libxml2mod.xmlUCSIsHighSurrogates(code)
return ret
def uCSIsHiragana(code):
"""Check whether the character is part of Hiragana UCS Block """
ret = libxml2mod.xmlUCSIsHiragana(code)
return ret
def uCSIsIPAExtensions(code):
"""Check whether the character is part of IPAExtensions UCS
Block """
ret = libxml2mod.xmlUCSIsIPAExtensions(code)
return ret
def uCSIsIdeographicDescriptionCharacters(code):
"""Check whether the character is part of
IdeographicDescriptionCharacters UCS Block """
ret = libxml2mod.xmlUCSIsIdeographicDescriptionCharacters(code)
return ret
def uCSIsKanbun(code):
"""Check whether the character is part of Kanbun UCS Block """
ret = libxml2mod.xmlUCSIsKanbun(code)
return ret
def uCSIsKangxiRadicals(code):
"""Check whether the character is part of KangxiRadicals UCS
Block """
ret = libxml2mod.xmlUCSIsKangxiRadicals(code)
return ret
def uCSIsKannada(code):
"""Check whether the character is part of Kannada UCS Block """
ret = libxml2mod.xmlUCSIsKannada(code)
return ret
def uCSIsKatakana(code):
"""Check whether the character is part of Katakana UCS Block """
ret = libxml2mod.xmlUCSIsKatakana(code)
return ret
def uCSIsKatakanaPhoneticExtensions(code):
"""Check whether the character is part of
KatakanaPhoneticExtensions UCS Block """
ret = libxml2mod.xmlUCSIsKatakanaPhoneticExtensions(code)
return ret
def uCSIsKhmer(code):
"""Check whether the character is part of Khmer UCS Block """
ret = libxml2mod.xmlUCSIsKhmer(code)
return ret
def uCSIsKhmerSymbols(code):
"""Check whether the character is part of KhmerSymbols UCS
Block """
ret = libxml2mod.xmlUCSIsKhmerSymbols(code)
return ret
def uCSIsLao(code):
"""Check whether the character is part of Lao UCS Block """
ret = libxml2mod.xmlUCSIsLao(code)
return ret
def uCSIsLatin1Supplement(code):
"""Check whether the character is part of Latin-1Supplement
UCS Block """
ret = libxml2mod.xmlUCSIsLatin1Supplement(code)
return ret
def uCSIsLatinExtendedA(code):
"""Check whether the character is part of LatinExtended-A UCS
Block """
ret = libxml2mod.xmlUCSIsLatinExtendedA(code)
return ret
def uCSIsLatinExtendedAdditional(code):
"""Check whether the character is part of
LatinExtendedAdditional UCS Block """
ret = libxml2mod.xmlUCSIsLatinExtendedAdditional(code)
return ret
def uCSIsLatinExtendedB(code):
"""Check whether the character is part of LatinExtended-B UCS
Block """
ret = libxml2mod.xmlUCSIsLatinExtendedB(code)
return ret
def uCSIsLetterlikeSymbols(code):
"""Check whether the character is part of LetterlikeSymbols
UCS Block """
ret = libxml2mod.xmlUCSIsLetterlikeSymbols(code)
return ret
def uCSIsLimbu(code):
"""Check whether the character is part of Limbu UCS Block """
ret = libxml2mod.xmlUCSIsLimbu(code)
return ret
def uCSIsLinearBIdeograms(code):
"""Check whether the character is part of LinearBIdeograms UCS
Block """
ret = libxml2mod.xmlUCSIsLinearBIdeograms(code)
return ret
def uCSIsLinearBSyllabary(code):
"""Check whether the character is part of LinearBSyllabary UCS
Block """
ret = libxml2mod.xmlUCSIsLinearBSyllabary(code)
return ret
def uCSIsLowSurrogates(code):
"""Check whether the character is part of LowSurrogates UCS
Block """
ret = libxml2mod.xmlUCSIsLowSurrogates(code)
return ret
def uCSIsMalayalam(code):
"""Check whether the character is part of Malayalam UCS Block """
ret = libxml2mod.xmlUCSIsMalayalam(code)
return ret
def uCSIsMathematicalAlphanumericSymbols(code):
"""Check whether the character is part of
MathematicalAlphanumericSymbols UCS Block """
ret = libxml2mod.xmlUCSIsMathematicalAlphanumericSymbols(code)
return ret
def uCSIsMathematicalOperators(code):
"""Check whether the character is part of
MathematicalOperators UCS Block """
ret = libxml2mod.xmlUCSIsMathematicalOperators(code)
return ret
def uCSIsMiscellaneousMathematicalSymbolsA(code):
"""Check whether the character is part of
MiscellaneousMathematicalSymbols-A UCS Block """
ret = libxml2mod.xmlUCSIsMiscellaneousMathematicalSymbolsA(code)
return ret
def uCSIsMiscellaneousMathematicalSymbolsB(code):
"""Check whether the character is part of
MiscellaneousMathematicalSymbols-B UCS Block """
ret = libxml2mod.xmlUCSIsMiscellaneousMathematicalSymbolsB(code)
return ret
def uCSIsMiscellaneousSymbols(code):
"""Check whether the character is part of MiscellaneousSymbols
UCS Block """
ret = libxml2mod.xmlUCSIsMiscellaneousSymbols(code)
return ret
def uCSIsMiscellaneousSymbolsandArrows(code):
"""Check whether the character is part of
MiscellaneousSymbolsandArrows UCS Block """
ret = libxml2mod.xmlUCSIsMiscellaneousSymbolsandArrows(code)
return ret
def uCSIsMiscellaneousTechnical(code):
"""Check whether the character is part of
MiscellaneousTechnical UCS Block """
ret = libxml2mod.xmlUCSIsMiscellaneousTechnical(code)
return ret
def uCSIsMongolian(code):
"""Check whether the character is part of Mongolian UCS Block """
ret = libxml2mod.xmlUCSIsMongolian(code)
return ret
def uCSIsMusicalSymbols(code):
"""Check whether the character is part of MusicalSymbols UCS
Block """
ret = libxml2mod.xmlUCSIsMusicalSymbols(code)
return ret
def uCSIsMyanmar(code):
"""Check whether the character is part of Myanmar UCS Block """
ret = libxml2mod.xmlUCSIsMyanmar(code)
return ret
def uCSIsNumberForms(code):
"""Check whether the character is part of NumberForms UCS Block """
ret = libxml2mod.xmlUCSIsNumberForms(code)
return ret
def uCSIsOgham(code):
"""Check whether the character is part of Ogham UCS Block """
ret = libxml2mod.xmlUCSIsOgham(code)
return ret
def uCSIsOldItalic(code):
"""Check whether the character is part of OldItalic UCS Block """
ret = libxml2mod.xmlUCSIsOldItalic(code)
return ret
def uCSIsOpticalCharacterRecognition(code):
"""Check whether the character is part of
OpticalCharacterRecognition UCS Block """
ret = libxml2mod.xmlUCSIsOpticalCharacterRecognition(code)
return ret
def uCSIsOriya(code):
"""Check whether the character is part of Oriya UCS Block """
ret = libxml2mod.xmlUCSIsOriya(code)
return ret
def uCSIsOsmanya(code):
"""Check whether the character is part of Osmanya UCS Block """
ret = libxml2mod.xmlUCSIsOsmanya(code)
return ret
def uCSIsPhoneticExtensions(code):
"""Check whether the character is part of PhoneticExtensions
UCS Block """
ret = libxml2mod.xmlUCSIsPhoneticExtensions(code)
return ret
def uCSIsPrivateUse(code):
"""Check whether the character is part of PrivateUse UCS Block """
ret = libxml2mod.xmlUCSIsPrivateUse(code)
return ret
def uCSIsPrivateUseArea(code):
"""Check whether the character is part of PrivateUseArea UCS
Block """
ret = libxml2mod.xmlUCSIsPrivateUseArea(code)
return ret
def uCSIsRunic(code):
"""Check whether the character is part of Runic UCS Block """
ret = libxml2mod.xmlUCSIsRunic(code)
return ret
def uCSIsShavian(code):
"""Check whether the character is part of Shavian UCS Block """
ret = libxml2mod.xmlUCSIsShavian(code)
return ret
def uCSIsSinhala(code):
"""Check whether the character is part of Sinhala UCS Block """
ret = libxml2mod.xmlUCSIsSinhala(code)
return ret
def uCSIsSmallFormVariants(code):
"""Check whether the character is part of SmallFormVariants
UCS Block """
ret = libxml2mod.xmlUCSIsSmallFormVariants(code)
return ret
def uCSIsSpacingModifierLetters(code):
"""Check whether the character is part of
SpacingModifierLetters UCS Block """
ret = libxml2mod.xmlUCSIsSpacingModifierLetters(code)
return ret
def uCSIsSpecials(code):
"""Check whether the character is part of Specials UCS Block """
ret = libxml2mod.xmlUCSIsSpecials(code)
return ret
def uCSIsSuperscriptsandSubscripts(code):
"""Check whether the character is part of
SuperscriptsandSubscripts UCS Block """
ret = libxml2mod.xmlUCSIsSuperscriptsandSubscripts(code)
return ret
def uCSIsSupplementalArrowsA(code):
"""Check whether the character is part of SupplementalArrows-A
UCS Block """
ret = libxml2mod.xmlUCSIsSupplementalArrowsA(code)
return ret
def uCSIsSupplementalArrowsB(code):
"""Check whether the character is part of SupplementalArrows-B
UCS Block """
ret = libxml2mod.xmlUCSIsSupplementalArrowsB(code)
return ret
def uCSIsSupplementalMathematicalOperators(code):
"""Check whether the character is part of
SupplementalMathematicalOperators UCS Block """
ret = libxml2mod.xmlUCSIsSupplementalMathematicalOperators(code)
return ret
def uCSIsSupplementaryPrivateUseAreaA(code):
"""Check whether the character is part of
SupplementaryPrivateUseArea-A UCS Block """
ret = libxml2mod.xmlUCSIsSupplementaryPrivateUseAreaA(code)
return ret
def uCSIsSupplementaryPrivateUseAreaB(code):
"""Check whether the character is part of
SupplementaryPrivateUseArea-B UCS Block """
ret = libxml2mod.xmlUCSIsSupplementaryPrivateUseAreaB(code)
return ret
def uCSIsSyriac(code):
"""Check whether the character is part of Syriac UCS Block """
ret = libxml2mod.xmlUCSIsSyriac(code)
return ret
def uCSIsTagalog(code):
"""Check whether the character is part of Tagalog UCS Block """
ret = libxml2mod.xmlUCSIsTagalog(code)
return ret
def uCSIsTagbanwa(code):
"""Check whether the character is part of Tagbanwa UCS Block """
ret = libxml2mod.xmlUCSIsTagbanwa(code)
return ret
def uCSIsTags(code):
"""Check whether the character is part of Tags UCS Block """
ret = libxml2mod.xmlUCSIsTags(code)
return ret
def uCSIsTaiLe(code):
"""Check whether the character is part of TaiLe UCS Block """
ret = libxml2mod.xmlUCSIsTaiLe(code)
return ret
def uCSIsTaiXuanJingSymbols(code):
"""Check whether the character is part of TaiXuanJingSymbols
UCS Block """
ret = libxml2mod.xmlUCSIsTaiXuanJingSymbols(code)
return ret
def uCSIsTamil(code):
"""Check whether the character is part of Tamil UCS Block """
ret = libxml2mod.xmlUCSIsTamil(code)
return ret
def uCSIsTelugu(code):
"""Check whether the character is part of Telugu UCS Block """
ret = libxml2mod.xmlUCSIsTelugu(code)
return ret
def uCSIsThaana(code):
"""Check whether the character is part of Thaana UCS Block """
ret = libxml2mod.xmlUCSIsThaana(code)
return ret
def uCSIsThai(code):
"""Check whether the character is part of Thai UCS Block """
ret = libxml2mod.xmlUCSIsThai(code)
return ret
def uCSIsTibetan(code):
"""Check whether the character is part of Tibetan UCS Block """
ret = libxml2mod.xmlUCSIsTibetan(code)
return ret
def uCSIsUgaritic(code):
"""Check whether the character is part of Ugaritic UCS Block """
ret = libxml2mod.xmlUCSIsUgaritic(code)
return ret
def uCSIsUnifiedCanadianAboriginalSyllabics(code):
"""Check whether the character is part of
UnifiedCanadianAboriginalSyllabics UCS Block """
ret = libxml2mod.xmlUCSIsUnifiedCanadianAboriginalSyllabics(code)
return ret
def uCSIsVariationSelectors(code):
"""Check whether the character is part of VariationSelectors
UCS Block """
ret = libxml2mod.xmlUCSIsVariationSelectors(code)
return ret
def uCSIsVariationSelectorsSupplement(code):
"""Check whether the character is part of
VariationSelectorsSupplement UCS Block """
ret = libxml2mod.xmlUCSIsVariationSelectorsSupplement(code)
return ret
def uCSIsYiRadicals(code):
"""Check whether the character is part of YiRadicals UCS Block """
ret = libxml2mod.xmlUCSIsYiRadicals(code)
return ret
def uCSIsYiSyllables(code):
"""Check whether the character is part of YiSyllables UCS Block """
ret = libxml2mod.xmlUCSIsYiSyllables(code)
return ret
def uCSIsYijingHexagramSymbols(code):
"""Check whether the character is part of
YijingHexagramSymbols UCS Block """
ret = libxml2mod.xmlUCSIsYijingHexagramSymbols(code)
return ret
#
# Functions from module xmlversion
#
def checkVersion(version):
"""check the compiled lib version against the include one.
This can warn or immediately kill the application """
libxml2mod.xmlCheckVersion(version)
#
# Functions from module xpathInternals
#
def valuePop(ctxt):
"""Pops the top XPath object from the value stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.valuePop(ctxt__o)
return ret
class xmlNode(xmlCore):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlNode got a wrong wrapper object type')
self._o = _obj
xmlCore.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlNode (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
# accessors for xmlNode
def ns(self):
"""Get the namespace of a node """
ret = libxml2mod.xmlNodeGetNs(self._o)
if ret is None:return None
__tmp = xmlNs(_obj=ret)
return __tmp
def nsDefs(self):
"""Get the namespace of a node """
ret = libxml2mod.xmlNodeGetNsDefs(self._o)
if ret is None:return None
__tmp = xmlNs(_obj=ret)
return __tmp
#
# xmlNode functions from module debugXML
#
def debugDumpNode(self, output, depth):
"""Dumps debug information for the element node, it is
recursive """
libxml2mod.xmlDebugDumpNode(output, self._o, depth)
def debugDumpNodeList(self, output, depth):
"""Dumps debug information for the list of element node, it is
recursive """
libxml2mod.xmlDebugDumpNodeList(output, self._o, depth)
def debugDumpOneNode(self, output, depth):
"""Dumps debug information for the element node, it is not
recursive """
libxml2mod.xmlDebugDumpOneNode(output, self._o, depth)
def lsCountNode(self):
"""Count the children of @node. """
ret = libxml2mod.xmlLsCountNode(self._o)
return ret
def lsOneNode(self, output):
"""Dump to @output the type and name of @node. """
libxml2mod.xmlLsOneNode(output, self._o)
def shellPrintNode(self):
"""Print node to the output FILE """
libxml2mod.xmlShellPrintNode(self._o)
#
# xmlNode functions from module tree
#
def addChild(self, cur):
"""Add a new node to @parent, at the end of the child (or
property) list merging adjacent TEXT nodes (in which case
@cur is freed) If the new node is ATTRIBUTE, it is added
into properties instead of children. If there is an
attribute with equal name, it is first destroyed. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlAddChild(self._o, cur__o)
if ret is None:raise treeError('xmlAddChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def addChildList(self, cur):
"""Add a list of node at the end of the child list of the
parent merging adjacent TEXT nodes (@cur may be freed) """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlAddChildList(self._o, cur__o)
if ret is None:raise treeError('xmlAddChildList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def addContent(self, content):
"""Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContent(), @content is supposed to be
raw text, so unescaped XML special chars are allowed,
entity references are not supported. """
libxml2mod.xmlNodeAddContent(self._o, content)
def addContentLen(self, content, len):
"""Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContentLen(), @content is supposed to
be raw text, so unescaped XML special chars are allowed,
entity references are not supported. """
libxml2mod.xmlNodeAddContentLen(self._o, content, len)
def addNextSibling(self, elem):
"""Add a new node @elem as the next sibling of @cur If the new
node was already inserted in a document it is first
unlinked from its existing context. As a result of text
merging @elem may be freed. If the new node is ATTRIBUTE,
it is added into properties instead of children. If there
is an attribute with equal name, it is first destroyed. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlAddNextSibling(self._o, elem__o)
if ret is None:raise treeError('xmlAddNextSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def addPrevSibling(self, elem):
"""Add a new node @elem as the previous sibling of @cur
merging adjacent TEXT nodes (@elem may be freed) If the new
node was already inserted in a document it is first
unlinked from its existing context. If the new node is
ATTRIBUTE, it is added into properties instead of children.
If there is an attribute with equal name, it is first
destroyed. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlAddPrevSibling(self._o, elem__o)
if ret is None:raise treeError('xmlAddPrevSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def addSibling(self, elem):
"""Add a new element @elem to the list of siblings of @cur
merging adjacent TEXT nodes (@elem may be freed) If the new
element was already inserted in a document it is first
unlinked from its existing context. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlAddSibling(self._o, elem__o)
if ret is None:raise treeError('xmlAddSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def copyNode(self, extended):
"""Do a copy of the node. """
ret = libxml2mod.xmlCopyNode(self._o, extended)
if ret is None:raise treeError('xmlCopyNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def copyNodeList(self):
"""Do a recursive copy of the node list. Use
xmlDocCopyNodeList() if possible to ensure string interning. """
ret = libxml2mod.xmlCopyNodeList(self._o)
if ret is None:raise treeError('xmlCopyNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def copyProp(self, cur):
"""Do a copy of the attribute. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlCopyProp(self._o, cur__o)
if ret is None:raise treeError('xmlCopyProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def copyPropList(self, cur):
"""Do a copy of an attribute list. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlCopyPropList(self._o, cur__o)
if ret is None:raise treeError('xmlCopyPropList() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def docCopyNode(self, doc, extended):
"""Do a copy of the node to a given document. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlDocCopyNode(self._o, doc__o, extended)
if ret is None:raise treeError('xmlDocCopyNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def docCopyNodeList(self, doc):
"""Do a recursive copy of the node list. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlDocCopyNodeList(doc__o, self._o)
if ret is None:raise treeError('xmlDocCopyNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def docSetRootElement(self, doc):
"""Set the root element of the document (doc->children is a
list containing possibly comments, PIs, etc ...). """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlDocSetRootElement(doc__o, self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def firstElementChild(self):
"""Finds the first child node of that element which is a
Element node Note the handling of entities references is
different than in the W3C DOM element traversal spec since
we don't have back reference from entities content to
entities references. """
ret = libxml2mod.xmlFirstElementChild(self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def freeNode(self):
"""Free a node, this is a recursive behaviour, all the
children are freed too. This doesn't unlink the child from
the list, use xmlUnlinkNode() first. """
libxml2mod.xmlFreeNode(self._o)
def freeNodeList(self):
"""Free a node and all its siblings, this is a recursive
behaviour, all the children are freed too. """
libxml2mod.xmlFreeNodeList(self._o)
def getBase(self, doc):
"""Searches for the BASE URL. The code should work on both XML
and HTML document even if base mechanisms are completely
different. It returns the base as defined in RFC 2396
sections 5.1.1. Base URI within Document Content and 5.1.2.
Base URI from the Encapsulating Entity However it does not
return the document base (5.1.3), use doc->URL in this case """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNodeGetBase(doc__o, self._o)
return ret
def getContent(self):
"""Read the value of a node, this can be either the text
carried directly by this node if it's a TEXT node or the
aggregate string of the values carried by this node child's
(TEXT and ENTITY_REF). Entity references are substituted. """
ret = libxml2mod.xmlNodeGetContent(self._o)
return ret
def getLang(self):
"""Searches the language of a node, i.e. the values of the
xml:lang attribute or the one carried by the nearest
ancestor. """
ret = libxml2mod.xmlNodeGetLang(self._o)
return ret
def getSpacePreserve(self):
"""Searches the space preserving behaviour of a node, i.e. the
values of the xml:space attribute or the one carried by the
nearest ancestor. """
ret = libxml2mod.xmlNodeGetSpacePreserve(self._o)
return ret
def hasNsProp(self, name, nameSpace):
"""Search for an attribute associated to a node This attribute
has to be anchored in the namespace specified. This does
the entity substitution. This function looks in DTD
attribute declaration for #FIXED or default declaration
values unless DTD use has been turned off. Note that a
namespace of None indicates to use the default namespace. """
ret = libxml2mod.xmlHasNsProp(self._o, name, nameSpace)
if ret is None:return None
__tmp = xmlAttr(_obj=ret)
return __tmp
def hasProp(self, name):
"""Search an attribute associated to a node This function also
looks in DTD attribute declaration for #FIXED or default
declaration values unless DTD use has been turned off. """
ret = libxml2mod.xmlHasProp(self._o, name)
if ret is None:return None
__tmp = xmlAttr(_obj=ret)
return __tmp
def isBlankNode(self):
"""Checks whether this node is an empty or whitespace only
(and possibly ignorable) text-node. """
ret = libxml2mod.xmlIsBlankNode(self._o)
return ret
def isText(self):
"""Is this node a Text node ? """
ret = libxml2mod.xmlNodeIsText(self._o)
return ret
def lastChild(self):
"""Search the last child of a node. """
ret = libxml2mod.xmlGetLastChild(self._o)
if ret is None:raise treeError('xmlGetLastChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def lastElementChild(self):
"""Finds the last child node of that element which is a
Element node Note the handling of entities references is
different than in the W3C DOM element traversal spec since
we don't have back reference from entities content to
entities references. """
ret = libxml2mod.xmlLastElementChild(self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def lineNo(self):
"""Get line number of @node. Try to override the limitation of
lines being store in 16 bits ints if XML_PARSE_BIG_LINES
parser option was used """
ret = libxml2mod.xmlGetLineNo(self._o)
return ret
def listGetRawString(self, doc, inLine):
"""Builds the string equivalent to the text contained in the
Node list made of TEXTs and ENTITY_REFs, contrary to
xmlNodeListGetString() this function doesn't do any
character encoding handling. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNodeListGetRawString(doc__o, self._o, inLine)
return ret
def listGetString(self, doc, inLine):
"""Build the string equivalent to the text contained in the
Node list made of TEXTs and ENTITY_REFs """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNodeListGetString(doc__o, self._o, inLine)
return ret
def newChild(self, ns, name, content):
"""Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child list containing the TEXTs and ENTITY_REFs node will
be created. NOTE: @content is supposed to be a piece of XML
CDATA, so it allows entity references. XML special chars
must be escaped first by using
xmlEncodeEntitiesReentrant(), or xmlNewTextChild() should
be used. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewChild(self._o, ns__o, name, content)
if ret is None:raise treeError('xmlNewChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newNs(self, href, prefix):
"""Creation of a new Namespace. This function will refuse to
create a namespace with a similar prefix than an existing
one present on this node. We use href==None in the case of
an element creation where the namespace was not defined. """
ret = libxml2mod.xmlNewNs(self._o, href, prefix)
if ret is None:raise treeError('xmlNewNs() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def newNsProp(self, ns, name, value):
"""Create a new property tagged with a namespace and carried
by a node. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewNsProp(self._o, ns__o, name, value)
if ret is None:raise treeError('xmlNewNsProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newNsPropEatName(self, ns, name, value):
"""Create a new property tagged with a namespace and carried
by a node. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewNsPropEatName(self._o, ns__o, name, value)
if ret is None:raise treeError('xmlNewNsPropEatName() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newProp(self, name, value):
"""Create a new property carried by a node. """
ret = libxml2mod.xmlNewProp(self._o, name, value)
if ret is None:raise treeError('xmlNewProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newTextChild(self, ns, name, content):
"""Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child TEXT node will be created containing the string
@content. NOTE: Use xmlNewChild() if @content will contain
entities that need to be preserved. Use this function,
xmlNewTextChild(), if you need to ensure that reserved XML
chars that might appear in @content, such as the ampersand,
greater-than or less-than signs, are automatically replaced
by their XML escaped entity representations. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewTextChild(self._o, ns__o, name, content)
if ret is None:raise treeError('xmlNewTextChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def nextElementSibling(self):
"""Finds the first closest next sibling of the node which is
an element node. Note the handling of entities references
is different than in the W3C DOM element traversal spec
since we don't have back reference from entities content to
entities references. """
ret = libxml2mod.xmlNextElementSibling(self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def noNsProp(self, name):
"""Search and get the value of an attribute associated to a
node This does the entity substitution. This function looks
in DTD attribute declaration for #FIXED or default
declaration values unless DTD use has been turned off. This
function is similar to xmlGetProp except it will accept
only an attribute in no namespace. """
ret = libxml2mod.xmlGetNoNsProp(self._o, name)
return ret
def nodePath(self):
"""Build a structure based Path for the given node """
ret = libxml2mod.xmlGetNodePath(self._o)
return ret
def nsProp(self, name, nameSpace):
"""Search and get the value of an attribute associated to a
node This attribute has to be anchored in the namespace
specified. This does the entity substitution. This function
looks in DTD attribute declaration for #FIXED or default
declaration values unless DTD use has been turned off. """
ret = libxml2mod.xmlGetNsProp(self._o, name, nameSpace)
return ret
def previousElementSibling(self):
"""Finds the first closest previous sibling of the node which
is an element node. Note the handling of entities
references is different than in the W3C DOM element
traversal spec since we don't have back reference from
entities content to entities references. """
ret = libxml2mod.xmlPreviousElementSibling(self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def prop(self, name):
"""Search and get the value of an attribute associated to a
node This does the entity substitution. This function looks
in DTD attribute declaration for #FIXED or default
declaration values unless DTD use has been turned off.
NOTE: this function acts independently of namespaces
associated to the attribute. Use xmlGetNsProp() or
xmlGetNoNsProp() for namespace aware processing. """
ret = libxml2mod.xmlGetProp(self._o, name)
return ret
def reconciliateNs(self, doc):
"""This function checks that all the namespaces declared
within the given tree are properly declared. This is needed
for example after Copy or Cut and then paste operations.
The subtree may still hold pointers to namespace
declarations outside the subtree or invalid/masked. As much
as possible the function try to reuse the existing
namespaces found in the new environment. If not possible
the new namespaces are redeclared on @tree at the top of
the given subtree. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlReconciliateNs(doc__o, self._o)
return ret
def replaceNode(self, cur):
"""Unlink the old node from its current context, prune the new
one at the same place. If @cur was already inserted in a
document it is first unlinked from its existing context. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlReplaceNode(self._o, cur__o)
if ret is None:raise treeError('xmlReplaceNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def searchNs(self, doc, nameSpace):
"""Search a Ns registered under a given name space for a
document. recurse on the parents until it finds the defined
namespace or return None otherwise. @nameSpace can be None,
this is a search for the default namespace. We don't allow
to cross entities boundaries. If you don't declare the
namespace within those you will be in troubles !!! A
warning is generated to cover this case. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlSearchNs(doc__o, self._o, nameSpace)
if ret is None:raise treeError('xmlSearchNs() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def searchNsByHref(self, doc, href):
"""Search a Ns aliasing a given URI. Recurse on the parents
until it finds the defined namespace or return None
otherwise. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlSearchNsByHref(doc__o, self._o, href)
if ret is None:raise treeError('xmlSearchNsByHref() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def setBase(self, uri):
"""Set (or reset) the base URI of a node, i.e. the value of
the xml:base attribute. """
libxml2mod.xmlNodeSetBase(self._o, uri)
def setContent(self, content):
"""Replace the content of a node. NOTE: @content is supposed
to be a piece of XML CDATA, so it allows entity references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant() resp. xmlEncodeSpecialChars(). """
libxml2mod.xmlNodeSetContent(self._o, content)
def setContentLen(self, content, len):
"""Replace the content of a node. NOTE: @content is supposed
to be a piece of XML CDATA, so it allows entity references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant() resp. xmlEncodeSpecialChars(). """
libxml2mod.xmlNodeSetContentLen(self._o, content, len)
def setLang(self, lang):
"""Set the language of a node, i.e. the values of the xml:lang
attribute. """
libxml2mod.xmlNodeSetLang(self._o, lang)
def setListDoc(self, doc):
"""update all nodes in the list to point to the right document """
if doc is None: doc__o = None
else: doc__o = doc._o
libxml2mod.xmlSetListDoc(self._o, doc__o)
def setName(self, name):
"""Set (or reset) the name of a node. """
libxml2mod.xmlNodeSetName(self._o, name)
def setNs(self, ns):
"""Associate a namespace to a node, a posteriori. """
if ns is None: ns__o = None
else: ns__o = ns._o
libxml2mod.xmlSetNs(self._o, ns__o)
def setNsProp(self, ns, name, value):
"""Set (or reset) an attribute carried by a node. The ns
structure must be in scope, this is not checked """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlSetNsProp(self._o, ns__o, name, value)
if ret is None:raise treeError('xmlSetNsProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def setProp(self, name, value):
"""Set (or reset) an attribute carried by a node. If @name has
a prefix, then the corresponding namespace-binding will be
used, if in scope; it is an error it there's no such
ns-binding for the prefix in scope. """
ret = libxml2mod.xmlSetProp(self._o, name, value)
if ret is None:raise treeError('xmlSetProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def setSpacePreserve(self, val):
"""Set (or reset) the space preserving behaviour of a node,
i.e. the value of the xml:space attribute. """
libxml2mod.xmlNodeSetSpacePreserve(self._o, val)
def setTreeDoc(self, doc):
"""update all nodes under the tree to point to the right
document """
if doc is None: doc__o = None
else: doc__o = doc._o
libxml2mod.xmlSetTreeDoc(self._o, doc__o)
def textConcat(self, content, len):
"""Concat the given string at the end of the existing node
content """
ret = libxml2mod.xmlTextConcat(self._o, content, len)
return ret
def textMerge(self, second):
"""Merge two text nodes into one """
if second is None: second__o = None
else: second__o = second._o
ret = libxml2mod.xmlTextMerge(self._o, second__o)
if ret is None:raise treeError('xmlTextMerge() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def unlinkNode(self):
"""Unlink a node from it's current context, the node is not
freed If one need to free the node, use xmlFreeNode()
routine after the unlink to discard it. Note that namespace
nodes can't be unlinked as they do not have pointer to
their parent. """
libxml2mod.xmlUnlinkNode(self._o)
def unsetNsProp(self, ns, name):
"""Remove an attribute carried by a node. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlUnsetNsProp(self._o, ns__o, name)
return ret
def unsetProp(self, name):
"""Remove an attribute carried by a node. This handles only
attributes in no namespace. """
ret = libxml2mod.xmlUnsetProp(self._o, name)
return ret
#
# xmlNode functions from module valid
#
def isID(self, doc, attr):
"""Determine whether an attribute is of type ID. In case we
have DTD(s) then this is done if DTD loading has been
requested. In the case of HTML documents parsed with the
HTML parser, then ID detection is done systematically. """
if doc is None: doc__o = None
else: doc__o = doc._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlIsID(doc__o, self._o, attr__o)
return ret
def isRef(self, doc, attr):
"""Determine whether an attribute is of type Ref. In case we
have DTD(s) then this is simple, otherwise we use an
heuristic: name Ref (upper or lowercase). """
if doc is None: doc__o = None
else: doc__o = doc._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlIsRef(doc__o, self._o, attr__o)
return ret
def validNormalizeAttributeValue(self, doc, name, value):
"""Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidNormalizeAttributeValue(doc__o, self._o, name, value)
return ret
#
# xmlNode functions from module xinclude
#
def xincludeProcessTree(self):
"""Implement the XInclude substitution for the given subtree """
ret = libxml2mod.xmlXIncludeProcessTree(self._o)
return ret
def xincludeProcessTreeFlags(self, flags):
"""Implement the XInclude substitution for the given subtree """
ret = libxml2mod.xmlXIncludeProcessTreeFlags(self._o, flags)
return ret
#
# xmlNode functions from module xmlschemas
#
def schemaValidateOneElement(self, ctxt):
"""Validate a branch of a tree, starting with the given @elem. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlSchemaValidateOneElement(ctxt__o, self._o)
return ret
#
# xmlNode functions from module xpath
#
def xpathCastNodeToNumber(self):
"""Converts a node to its number value """
ret = libxml2mod.xmlXPathCastNodeToNumber(self._o)
return ret
def xpathCastNodeToString(self):
"""Converts a node to its string value. """
ret = libxml2mod.xmlXPathCastNodeToString(self._o)
return ret
def xpathCmpNodes(self, node2):
"""Compare two nodes w.r.t document order """
if node2 is None: node2__o = None
else: node2__o = node2._o
ret = libxml2mod.xmlXPathCmpNodes(self._o, node2__o)
return ret
def xpathNodeEval(self, str, ctx):
"""Evaluate the XPath Location Path in the given context. The
node 'node' is set as the context node. The context node is
not restored. """
if ctx is None: ctx__o = None
else: ctx__o = ctx._o
ret = libxml2mod.xmlXPathNodeEval(self._o, str, ctx__o)
if ret is None:raise xpathError('xmlXPathNodeEval() failed')
return xpathObjectRet(ret)
#
# xmlNode functions from module xpathInternals
#
def xpathNewNodeSet(self):
"""Create a new xmlXPathObjectPtr of type NodeSet and
initialize it with the single Node @val """
ret = libxml2mod.xmlXPathNewNodeSet(self._o)
if ret is None:raise xpathError('xmlXPathNewNodeSet() failed')
return xpathObjectRet(ret)
def xpathNewValueTree(self):
"""Create a new xmlXPathObjectPtr of type Value Tree (XSLT)
and initialize it with the tree root @val """
ret = libxml2mod.xmlXPathNewValueTree(self._o)
if ret is None:raise xpathError('xmlXPathNewValueTree() failed')
return xpathObjectRet(ret)
def xpathNextAncestor(self, ctxt):
"""Traversal function for the "ancestor" direction the
ancestor axis contains the ancestors of the context node;
the ancestors of the context node consist of the parent of
context node and the parent's parent and so on; the nodes
are ordered in reverse document order; thus the parent is
the first node on the axis, and the parent's parent is the
second node on the axis """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextAncestor(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextAncestor() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextAncestorOrSelf(self, ctxt):
"""Traversal function for the "ancestor-or-self" direction he
ancestor-or-self axis contains the context node and
ancestors of the context node in reverse document order;
thus the context node is the first node on the axis, and
the context node's parent the second; parent here is
defined the same as with the parent axis. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextAncestorOrSelf(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextAncestorOrSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextAttribute(self, ctxt):
"""Traversal function for the "attribute" direction TODO:
support DTD inherited default attributes """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextAttribute(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextAttribute() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextChild(self, ctxt):
"""Traversal function for the "child" direction The child axis
contains the children of the context node in document order. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextChild(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextDescendant(self, ctxt):
"""Traversal function for the "descendant" direction the
descendant axis contains the descendants of the context
node in document order; a descendant is a child or a child
of a child and so on. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextDescendant(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextDescendant() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextDescendantOrSelf(self, ctxt):
"""Traversal function for the "descendant-or-self" direction
the descendant-or-self axis contains the context node and
the descendants of the context node in document order; thus
the context node is the first node on the axis, and the
first child of the context node is the second node on the
axis """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextDescendantOrSelf(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextDescendantOrSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextFollowing(self, ctxt):
"""Traversal function for the "following" direction The
following axis contains all nodes in the same document as
the context node that are after the context node in
document order, excluding any descendants and excluding
attribute nodes and namespace nodes; the nodes are ordered
in document order """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextFollowing(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextFollowing() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextFollowingSibling(self, ctxt):
"""Traversal function for the "following-sibling" direction
The following-sibling axis contains the following siblings
of the context node in document order. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextFollowingSibling(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextFollowingSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextNamespace(self, ctxt):
"""Traversal function for the "namespace" direction the
namespace axis contains the namespace nodes of the context
node; the order of nodes on this axis is
implementation-defined; the axis will be empty unless the
context node is an element We keep the XML namespace node
at the end of the list. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextNamespace(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextNamespace() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextParent(self, ctxt):
"""Traversal function for the "parent" direction The parent
axis contains the parent of the context node, if there is
one. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextParent(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextParent() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextPreceding(self, ctxt):
"""Traversal function for the "preceding" direction the
preceding axis contains all nodes in the same document as
the context node that are before the context node in
document order, excluding any ancestors and excluding
attribute nodes and namespace nodes; the nodes are ordered
in reverse document order """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextPreceding(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextPreceding() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextPrecedingSibling(self, ctxt):
"""Traversal function for the "preceding-sibling" direction
The preceding-sibling axis contains the preceding siblings
of the context node in reverse document order; the first
preceding sibling is first on the axis; the sibling
preceding that node is the second on the axis and so on. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextPrecedingSibling(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextPrecedingSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextSelf(self, ctxt):
"""Traversal function for the "self" direction The self axis
contains just the context node itself """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextSelf(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
#
# xmlNode functions from module xpointer
#
def xpointerNewCollapsedRange(self):
"""Create a new xmlXPathObjectPtr of type range using a single
nodes """
ret = libxml2mod.xmlXPtrNewCollapsedRange(self._o)
if ret is None:raise treeError('xmlXPtrNewCollapsedRange() failed')
return xpathObjectRet(ret)
def xpointerNewContext(self, doc, origin):
"""Create a new XPointer context """
if doc is None: doc__o = None
else: doc__o = doc._o
if origin is None: origin__o = None
else: origin__o = origin._o
ret = libxml2mod.xmlXPtrNewContext(doc__o, self._o, origin__o)
if ret is None:raise treeError('xmlXPtrNewContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp
def xpointerNewLocationSetNodes(self, end):
"""Create a new xmlXPathObjectPtr of type LocationSet and
initialize it with the single range made of the two nodes
@start and @end """
if end is None: end__o = None
else: end__o = end._o
ret = libxml2mod.xmlXPtrNewLocationSetNodes(self._o, end__o)
if ret is None:raise treeError('xmlXPtrNewLocationSetNodes() failed')
return xpathObjectRet(ret)
def xpointerNewRange(self, startindex, end, endindex):
"""Create a new xmlXPathObjectPtr of type range """
if end is None: end__o = None
else: end__o = end._o
ret = libxml2mod.xmlXPtrNewRange(self._o, startindex, end__o, endindex)
if ret is None:raise treeError('xmlXPtrNewRange() failed')
return xpathObjectRet(ret)
def xpointerNewRangeNodes(self, end):
"""Create a new xmlXPathObjectPtr of type range using 2 nodes """
if end is None: end__o = None
else: end__o = end._o
ret = libxml2mod.xmlXPtrNewRangeNodes(self._o, end__o)
if ret is None:raise treeError('xmlXPtrNewRangeNodes() failed')
return xpathObjectRet(ret)
class xmlDoc(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlDoc got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlDoc (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
#
# xmlDoc functions from module HTMLparser
#
def htmlAutoCloseTag(self, name, elem):
"""The HTML DTD allows a tag to implicitly close other tags.
The list is kept in htmlStartClose array. This function
checks if the element or one of it's children would
autoclose the given tag. """
ret = libxml2mod.htmlAutoCloseTag(self._o, name, elem)
return ret
def htmlIsAutoClosed(self, elem):
"""The HTML DTD allows a tag to implicitly close other tags.
The list is kept in htmlStartClose array. This function
checks if a tag is autoclosed by one of it's child """
ret = libxml2mod.htmlIsAutoClosed(self._o, elem)
return ret
#
# xmlDoc functions from module HTMLtree
#
def htmlDocContentDumpFormatOutput(self, buf, encoding, format):
"""Dump an HTML document. """
if buf is None: buf__o = None
else: buf__o = buf._o
libxml2mod.htmlDocContentDumpFormatOutput(buf__o, self._o, encoding, format)
def htmlDocContentDumpOutput(self, buf, encoding):
"""Dump an HTML document. Formating return/spaces are added. """
if buf is None: buf__o = None
else: buf__o = buf._o
libxml2mod.htmlDocContentDumpOutput(buf__o, self._o, encoding)
def htmlDocDump(self, f):
"""Dump an HTML document to an open FILE. """
ret = libxml2mod.htmlDocDump(f, self._o)
return ret
def htmlGetMetaEncoding(self):
"""Encoding definition lookup in the Meta tags """
ret = libxml2mod.htmlGetMetaEncoding(self._o)
return ret
def htmlNodeDumpFile(self, out, cur):
"""Dump an HTML node, recursive behaviour,children are printed
too, and formatting returns are added. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpFile(out, self._o, cur__o)
def htmlNodeDumpFileFormat(self, out, cur, encoding, format):
"""Dump an HTML node, recursive behaviour,children are printed
too. TODO: if encoding == None try to save in the doc
encoding """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.htmlNodeDumpFileFormat(out, self._o, cur__o, encoding, format)
return ret
def htmlNodeDumpFormatOutput(self, buf, cur, encoding, format):
"""Dump an HTML node, recursive behaviour,children are printed
too. """
if buf is None: buf__o = None
else: buf__o = buf._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpFormatOutput(buf__o, self._o, cur__o, encoding, format)
def htmlNodeDumpOutput(self, buf, cur, encoding):
"""Dump an HTML node, recursive behaviour,children are printed
too, and formatting returns/spaces are added. """
if buf is None: buf__o = None
else: buf__o = buf._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpOutput(buf__o, self._o, cur__o, encoding)
def htmlSaveFile(self, filename):
"""Dump an HTML document to a file. If @filename is "-" the
stdout file is used. """
ret = libxml2mod.htmlSaveFile(filename, self._o)
return ret
def htmlSaveFileEnc(self, filename, encoding):
"""Dump an HTML document to a file using a given encoding and
formatting returns/spaces are added. """
ret = libxml2mod.htmlSaveFileEnc(filename, self._o, encoding)
return ret
def htmlSaveFileFormat(self, filename, encoding, format):
"""Dump an HTML document to a file using a given encoding. """
ret = libxml2mod.htmlSaveFileFormat(filename, self._o, encoding, format)
return ret
def htmlSetMetaEncoding(self, encoding):
"""Sets the current encoding in the Meta tags NOTE: this will
not change the document content encoding, just the META
flag associated. """
ret = libxml2mod.htmlSetMetaEncoding(self._o, encoding)
return ret
#
# xmlDoc functions from module debugXML
#
def debugCheckDocument(self, output):
"""Check the document for potential content problems, and
output the errors to @output """
ret = libxml2mod.xmlDebugCheckDocument(output, self._o)
return ret
def debugDumpDocument(self, output):
"""Dumps debug information for the document, it's recursive """
libxml2mod.xmlDebugDumpDocument(output, self._o)
def debugDumpDocumentHead(self, output):
"""Dumps debug information cncerning the document, not
recursive """
libxml2mod.xmlDebugDumpDocumentHead(output, self._o)
def debugDumpEntities(self, output):
"""Dumps debug information for all the entities in use by the
document """
libxml2mod.xmlDebugDumpEntities(output, self._o)
#
# xmlDoc functions from module entities
#
def addDocEntity(self, name, type, ExternalID, SystemID, content):
"""Register a new entity for this document. """
ret = libxml2mod.xmlAddDocEntity(self._o, name, type, ExternalID, SystemID, content)
if ret is None:raise treeError('xmlAddDocEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def addDtdEntity(self, name, type, ExternalID, SystemID, content):
"""Register a new entity for this document DTD external subset. """
ret = libxml2mod.xmlAddDtdEntity(self._o, name, type, ExternalID, SystemID, content)
if ret is None:raise treeError('xmlAddDtdEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def docEntity(self, name):
"""Do an entity lookup in the document entity hash table and """
ret = libxml2mod.xmlGetDocEntity(self._o, name)
if ret is None:raise treeError('xmlGetDocEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def dtdEntity(self, name):
"""Do an entity lookup in the DTD entity hash table and """
ret = libxml2mod.xmlGetDtdEntity(self._o, name)
if ret is None:raise treeError('xmlGetDtdEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def encodeEntities(self, input):
"""TODO: remove xmlEncodeEntities, once we are not afraid of
breaking binary compatibility People must migrate their
code to xmlEncodeEntitiesReentrant ! This routine will
issue a warning when encountered. """
ret = libxml2mod.xmlEncodeEntities(self._o, input)
return ret
def encodeEntitiesReentrant(self, input):
"""Do a global encoding of a string, replacing the predefined
entities and non ASCII values with their entities and
CharRef counterparts. Contrary to xmlEncodeEntities, this
routine is reentrant, and result must be deallocated. """
ret = libxml2mod.xmlEncodeEntitiesReentrant(self._o, input)
return ret
def encodeSpecialChars(self, input):
"""Do a global encoding of a string, replacing the predefined
entities this routine is reentrant, and result must be
deallocated. """
ret = libxml2mod.xmlEncodeSpecialChars(self._o, input)
return ret
def newEntity(self, name, type, ExternalID, SystemID, content):
"""Create a new entity, this differs from xmlAddDocEntity()
that if the document is None or has no internal subset
defined, then an unlinked entity structure will be
returned, it is then the responsability of the caller to
link it to the document later or free it when not needed
anymore. """
ret = libxml2mod.xmlNewEntity(self._o, name, type, ExternalID, SystemID, content)
if ret is None:raise treeError('xmlNewEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def parameterEntity(self, name):
"""Do an entity lookup in the internal and external subsets and """
ret = libxml2mod.xmlGetParameterEntity(self._o, name)
if ret is None:raise treeError('xmlGetParameterEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
#
# xmlDoc functions from module relaxng
#
def relaxNGNewDocParserCtxt(self):
"""Create an XML RelaxNGs parser context for that document.
Note: since the process of compiling a RelaxNG schemas
modifies the document, the @doc parameter is duplicated
internally. """
ret = libxml2mod.xmlRelaxNGNewDocParserCtxt(self._o)
if ret is None:raise parserError('xmlRelaxNGNewDocParserCtxt() failed')
__tmp = relaxNgParserCtxt(_obj=ret)
return __tmp
def relaxNGValidateDoc(self, ctxt):
"""Validate a document tree in memory. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlRelaxNGValidateDoc(ctxt__o, self._o)
return ret
def relaxNGValidateFullElement(self, ctxt, elem):
"""Validate a full subtree when
xmlRelaxNGValidatePushElement() returned 0 and the content
of the node has been expanded. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidateFullElement(ctxt__o, self._o, elem__o)
return ret
def relaxNGValidatePopElement(self, ctxt, elem):
"""Pop the element end from the RelaxNG validation stack. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidatePopElement(ctxt__o, self._o, elem__o)
return ret
def relaxNGValidatePushElement(self, ctxt, elem):
"""Push a new element start on the RelaxNG validation stack. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidatePushElement(ctxt__o, self._o, elem__o)
return ret
#
# xmlDoc functions from module tree
#
def copyDoc(self, recursive):
"""Do a copy of the document info. If recursive, the content
tree will be copied too as well as DTD, namespaces and
entities. """
ret = libxml2mod.xmlCopyDoc(self._o, recursive)
if ret is None:raise treeError('xmlCopyDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def copyNode(self, node, extended):
"""Do a copy of the node to a given document. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlDocCopyNode(node__o, self._o, extended)
if ret is None:raise treeError('xmlDocCopyNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def copyNodeList(self, node):
"""Do a recursive copy of the node list. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlDocCopyNodeList(self._o, node__o)
if ret is None:raise treeError('xmlDocCopyNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def createIntSubset(self, name, ExternalID, SystemID):
"""Create the internal subset of a document """
ret = libxml2mod.xmlCreateIntSubset(self._o, name, ExternalID, SystemID)
if ret is None:raise treeError('xmlCreateIntSubset() failed')
__tmp = xmlDtd(_obj=ret)
return __tmp
def docCompressMode(self):
"""get the compression ratio for a document, ZLIB based """
ret = libxml2mod.xmlGetDocCompressMode(self._o)
return ret
def dump(self, f):
"""Dump an XML document to an open FILE. """
ret = libxml2mod.xmlDocDump(f, self._o)
return ret
def elemDump(self, f, cur):
"""Dump an XML/HTML node, recursive behaviour, children are
printed too. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.xmlElemDump(f, self._o, cur__o)
def formatDump(self, f, format):
"""Dump an XML document to an open FILE. """
ret = libxml2mod.xmlDocFormatDump(f, self._o, format)
return ret
def freeDoc(self):
"""Free up all the structures used by a document, tree
included. """
libxml2mod.xmlFreeDoc(self._o)
def getRootElement(self):
"""Get the root element of the document (doc->children is a
list containing possibly comments, PIs, etc ...). """
ret = libxml2mod.xmlDocGetRootElement(self._o)
if ret is None:raise treeError('xmlDocGetRootElement() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def intSubset(self):
"""Get the internal subset of a document """
ret = libxml2mod.xmlGetIntSubset(self._o)
if ret is None:raise treeError('xmlGetIntSubset() failed')
__tmp = xmlDtd(_obj=ret)
return __tmp
def newCDataBlock(self, content, len):
"""Creation of a new node containing a CDATA block. """
ret = libxml2mod.xmlNewCDataBlock(self._o, content, len)
if ret is None:raise treeError('xmlNewCDataBlock() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newCharRef(self, name):
"""Creation of a new character reference node. """
ret = libxml2mod.xmlNewCharRef(self._o, name)
if ret is None:raise treeError('xmlNewCharRef() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocComment(self, content):
"""Creation of a new node containing a comment within a
document. """
ret = libxml2mod.xmlNewDocComment(self._o, content)
if ret is None:raise treeError('xmlNewDocComment() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocFragment(self):
"""Creation of a new Fragment node. """
ret = libxml2mod.xmlNewDocFragment(self._o)
if ret is None:raise treeError('xmlNewDocFragment() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocNode(self, ns, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). NOTE: @content is supposed to
be a piece of XML CDATA, so it allow entities references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant(). Use xmlNewDocRawNode() if you
don't need entities support. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewDocNode(self._o, ns__o, name, content)
if ret is None:raise treeError('xmlNewDocNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocNodeEatName(self, ns, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). NOTE: @content is supposed to
be a piece of XML CDATA, so it allow entities references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant(). Use xmlNewDocRawNode() if you
don't need entities support. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewDocNodeEatName(self._o, ns__o, name, content)
if ret is None:raise treeError('xmlNewDocNodeEatName() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocPI(self, name, content):
"""Creation of a processing instruction element. """
ret = libxml2mod.xmlNewDocPI(self._o, name, content)
if ret is None:raise treeError('xmlNewDocPI() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocProp(self, name, value):
"""Create a new property carried by a document. """
ret = libxml2mod.xmlNewDocProp(self._o, name, value)
if ret is None:raise treeError('xmlNewDocProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newDocRawNode(self, ns, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewDocRawNode(self._o, ns__o, name, content)
if ret is None:raise treeError('xmlNewDocRawNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocText(self, content):
"""Creation of a new text node within a document. """
ret = libxml2mod.xmlNewDocText(self._o, content)
if ret is None:raise treeError('xmlNewDocText() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocTextLen(self, content, len):
"""Creation of a new text node with an extra content length
parameter. The text node pertain to a given document. """
ret = libxml2mod.xmlNewDocTextLen(self._o, content, len)
if ret is None:raise treeError('xmlNewDocTextLen() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDtd(self, name, ExternalID, SystemID):
"""Creation of a new DTD for the external subset. To create an
internal subset, use xmlCreateIntSubset(). """
ret = libxml2mod.xmlNewDtd(self._o, name, ExternalID, SystemID)
if ret is None:raise treeError('xmlNewDtd() failed')
__tmp = xmlDtd(_obj=ret)
return __tmp
def newGlobalNs(self, href, prefix):
"""Creation of a Namespace, the old way using PI and without
scoping DEPRECATED !!! """
ret = libxml2mod.xmlNewGlobalNs(self._o, href, prefix)
if ret is None:raise treeError('xmlNewGlobalNs() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def newReference(self, name):
"""Creation of a new reference node. """
ret = libxml2mod.xmlNewReference(self._o, name)
if ret is None:raise treeError('xmlNewReference() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def nodeDumpOutput(self, buf, cur, level, format, encoding):
"""Dump an XML node, recursive behaviour, children are printed
too. Note that @format = 1 provide node indenting only if
xmlIndentTreeOutput = 1 or xmlKeepBlanksDefault(0) was
called """
if buf is None: buf__o = None
else: buf__o = buf._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.xmlNodeDumpOutput(buf__o, self._o, cur__o, level, format, encoding)
def nodeGetBase(self, cur):
"""Searches for the BASE URL. The code should work on both XML
and HTML document even if base mechanisms are completely
different. It returns the base as defined in RFC 2396
sections 5.1.1. Base URI within Document Content and 5.1.2.
Base URI from the Encapsulating Entity However it does not
return the document base (5.1.3), use doc->URL in this case """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlNodeGetBase(self._o, cur__o)
return ret
def nodeListGetRawString(self, list, inLine):
"""Builds the string equivalent to the text contained in the
Node list made of TEXTs and ENTITY_REFs, contrary to
xmlNodeListGetString() this function doesn't do any
character encoding handling. """
if list is None: list__o = None
else: list__o = list._o
ret = libxml2mod.xmlNodeListGetRawString(self._o, list__o, inLine)
return ret
def nodeListGetString(self, list, inLine):
"""Build the string equivalent to the text contained in the
Node list made of TEXTs and ENTITY_REFs """
if list is None: list__o = None
else: list__o = list._o
ret = libxml2mod.xmlNodeListGetString(self._o, list__o, inLine)
return ret
def reconciliateNs(self, tree):
"""This function checks that all the namespaces declared
within the given tree are properly declared. This is needed
for example after Copy or Cut and then paste operations.
The subtree may still hold pointers to namespace
declarations outside the subtree or invalid/masked. As much
as possible the function try to reuse the existing
namespaces found in the new environment. If not possible
the new namespaces are redeclared on @tree at the top of
the given subtree. """
if tree is None: tree__o = None
else: tree__o = tree._o
ret = libxml2mod.xmlReconciliateNs(self._o, tree__o)
return ret
def saveFile(self, filename):
"""Dump an XML document to a file. Will use compression if
compiled in and enabled. If @filename is "-" the stdout
file is used. """
ret = libxml2mod.xmlSaveFile(filename, self._o)
return ret
def saveFileEnc(self, filename, encoding):
"""Dump an XML document, converting it to the given encoding """
ret = libxml2mod.xmlSaveFileEnc(filename, self._o, encoding)
return ret
def saveFileTo(self, buf, encoding):
"""Dump an XML document to an I/O buffer. Warning ! This call
xmlOutputBufferClose() on buf which is not available after
this call. """
if buf is None: buf__o = None
else: buf__o = buf._o
ret = libxml2mod.xmlSaveFileTo(buf__o, self._o, encoding)
return ret
def saveFormatFile(self, filename, format):
"""Dump an XML document to a file. Will use compression if
compiled in and enabled. If @filename is "-" the stdout
file is used. If @format is set then the document will be
indented on output. Note that @format = 1 provide node
indenting only if xmlIndentTreeOutput = 1 or
xmlKeepBlanksDefault(0) was called """
ret = libxml2mod.xmlSaveFormatFile(filename, self._o, format)
return ret
def saveFormatFileEnc(self, filename, encoding, format):
"""Dump an XML document to a file or an URL. """
ret = libxml2mod.xmlSaveFormatFileEnc(filename, self._o, encoding, format)
return ret
def saveFormatFileTo(self, buf, encoding, format):
"""Dump an XML document to an I/O buffer. Warning ! This call
xmlOutputBufferClose() on buf which is not available after
this call. """
if buf is None: buf__o = None
else: buf__o = buf._o
ret = libxml2mod.xmlSaveFormatFileTo(buf__o, self._o, encoding, format)
return ret
def searchNs(self, node, nameSpace):
"""Search a Ns registered under a given name space for a
document. recurse on the parents until it finds the defined
namespace or return None otherwise. @nameSpace can be None,
this is a search for the default namespace. We don't allow
to cross entities boundaries. If you don't declare the
namespace within those you will be in troubles !!! A
warning is generated to cover this case. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlSearchNs(self._o, node__o, nameSpace)
if ret is None:raise treeError('xmlSearchNs() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def searchNsByHref(self, node, href):
"""Search a Ns aliasing a given URI. Recurse on the parents
until it finds the defined namespace or return None
otherwise. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlSearchNsByHref(self._o, node__o, href)
if ret is None:raise treeError('xmlSearchNsByHref() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def setDocCompressMode(self, mode):
"""set the compression ratio for a document, ZLIB based
Correct values: 0 (uncompressed) to 9 (max compression) """
libxml2mod.xmlSetDocCompressMode(self._o, mode)
def setListDoc(self, list):
"""update all nodes in the list to point to the right document """
if list is None: list__o = None
else: list__o = list._o
libxml2mod.xmlSetListDoc(list__o, self._o)
def setRootElement(self, root):
"""Set the root element of the document (doc->children is a
list containing possibly comments, PIs, etc ...). """
if root is None: root__o = None
else: root__o = root._o
ret = libxml2mod.xmlDocSetRootElement(self._o, root__o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def setTreeDoc(self, tree):
"""update all nodes under the tree to point to the right
document """
if tree is None: tree__o = None
else: tree__o = tree._o
libxml2mod.xmlSetTreeDoc(tree__o, self._o)
def stringGetNodeList(self, value):
"""Parse the value string and build the node list associated.
Should produce a flat tree with only TEXTs and ENTITY_REFs. """
ret = libxml2mod.xmlStringGetNodeList(self._o, value)
if ret is None:raise treeError('xmlStringGetNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def stringLenGetNodeList(self, value, len):
"""Parse the value string and build the node list associated.
Should produce a flat tree with only TEXTs and ENTITY_REFs. """
ret = libxml2mod.xmlStringLenGetNodeList(self._o, value, len)
if ret is None:raise treeError('xmlStringLenGetNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
#
# xmlDoc functions from module valid
#
def ID(self, ID):
"""Search the attribute declaring the given ID """
ret = libxml2mod.xmlGetID(self._o, ID)
if ret is None:raise treeError('xmlGetID() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def isID(self, elem, attr):
"""Determine whether an attribute is of type ID. In case we
have DTD(s) then this is done if DTD loading has been
requested. In the case of HTML documents parsed with the
HTML parser, then ID detection is done systematically. """
if elem is None: elem__o = None
else: elem__o = elem._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlIsID(self._o, elem__o, attr__o)
return ret
def isMixedElement(self, name):
"""Search in the DtDs whether an element accept Mixed content
(or ANY) basically if it is supposed to accept text childs """
ret = libxml2mod.xmlIsMixedElement(self._o, name)
return ret
def isRef(self, elem, attr):
"""Determine whether an attribute is of type Ref. In case we
have DTD(s) then this is simple, otherwise we use an
heuristic: name Ref (upper or lowercase). """
if elem is None: elem__o = None
else: elem__o = elem._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlIsRef(self._o, elem__o, attr__o)
return ret
def removeID(self, attr):
"""Remove the given attribute from the ID table maintained
internally. """
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlRemoveID(self._o, attr__o)
return ret
def removeRef(self, attr):
"""Remove the given attribute from the Ref table maintained
internally. """
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlRemoveRef(self._o, attr__o)
return ret
def validCtxtNormalizeAttributeValue(self, ctxt, elem, name, value):
"""Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character.
Also check VC: Standalone Document Declaration in P32, and
update ctxt->valid accordingly """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidCtxtNormalizeAttributeValue(ctxt__o, self._o, elem__o, name, value)
return ret
def validNormalizeAttributeValue(self, elem, name, value):
"""Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidNormalizeAttributeValue(self._o, elem__o, name, value)
return ret
def validateDocument(self, ctxt):
"""Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocument(ctxt__o, self._o)
return ret
def validateDocumentFinal(self, ctxt):
"""Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocumentFinal(ctxt__o, self._o)
return ret
def validateDtd(self, ctxt, dtd):
"""Try to validate the document against the dtd instance
Basically it does check all the definitions in the DtD.
Note the the internal subset (if present) is de-coupled
(i.e. not used), which could give problems if ID or IDREF
is present. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if dtd is None: dtd__o = None
else: dtd__o = dtd._o
ret = libxml2mod.xmlValidateDtd(ctxt__o, self._o, dtd__o)
return ret
def validateDtdFinal(self, ctxt):
"""Does the final step for the dtds validation once all the
subsets have been parsed basically it does the following
checks described by the XML Rec - check that ENTITY and
ENTITIES type attributes default or possible values matches
one of the defined entities. - check that NOTATION type
attributes default or possible values matches one of the
defined notations. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDtdFinal(ctxt__o, self._o)
return ret
def validateElement(self, ctxt, elem):
"""Try to validate the subtree under an element """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateElement(ctxt__o, self._o, elem__o)
return ret
def validateNotationUse(self, ctxt, notationName):
"""Validate that the given name match a notation declaration.
- [ VC: Notation Declared ] """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateNotationUse(ctxt__o, self._o, notationName)
return ret
def validateOneAttribute(self, ctxt, elem, attr, value):
"""Try to validate a single attribute for an element basically
it does the following checks as described by the XML-1.0
recommendation: - [ VC: Attribute Value Type ] - [ VC:
Fixed Attribute Default ] - [ VC: Entity Name ] - [ VC:
Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC: Entity
Name ] - [ VC: Notation Attributes ] The ID/IDREF
uniqueness and matching are done separately """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlValidateOneAttribute(ctxt__o, self._o, elem__o, attr__o, value)
return ret
def validateOneElement(self, ctxt, elem):
"""Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
for each attribute present. The ID/IDREF checkings are
done separately """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateOneElement(ctxt__o, self._o, elem__o)
return ret
def validateOneNamespace(self, ctxt, elem, prefix, ns, value):
"""Try to validate a single namespace declaration for an
element basically it does the following checks as described
by the XML-1.0 recommendation: - [ VC: Attribute Value Type
] - [ VC: Fixed Attribute Default ] - [ VC: Entity Name ] -
[ VC: Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC:
Entity Name ] - [ VC: Notation Attributes ] The ID/IDREF
uniqueness and matching are done separately """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlValidateOneNamespace(ctxt__o, self._o, elem__o, prefix, ns__o, value)
return ret
def validatePopElement(self, ctxt, elem, qname):
"""Pop the element end from the validation stack. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidatePopElement(ctxt__o, self._o, elem__o, qname)
return ret
def validatePushElement(self, ctxt, elem, qname):
"""Push a new element start on the validation stack. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidatePushElement(ctxt__o, self._o, elem__o, qname)
return ret
def validateRoot(self, ctxt):
"""Try to validate a the root element basically it does the
following check as described by the XML-1.0 recommendation:
- [ VC: Root Element Type ] it doesn't try to recurse or
apply other check to the element """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateRoot(ctxt__o, self._o)
return ret
#
# xmlDoc functions from module xinclude
#
def xincludeProcess(self):
"""Implement the XInclude substitution on the XML document @doc """
ret = libxml2mod.xmlXIncludeProcess(self._o)
return ret
def xincludeProcessFlags(self, flags):
"""Implement the XInclude substitution on the XML document @doc """
ret = libxml2mod.xmlXIncludeProcessFlags(self._o, flags)
return ret
#
# xmlDoc functions from module xmlreader
#
def NewWalker(self, reader):
"""Setup an xmltextReader to parse a preparsed XML document.
This reuses the existing @reader xmlTextReader. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlReaderNewWalker(reader__o, self._o)
return ret
def readerWalker(self):
"""Create an xmltextReader for a preparsed document. """
ret = libxml2mod.xmlReaderWalker(self._o)
if ret is None:raise treeError('xmlReaderWalker() failed')
__tmp = xmlTextReader(_obj=ret)
return __tmp
#
# xmlDoc functions from module xmlschemas
#
def schemaNewDocParserCtxt(self):
"""Create an XML Schemas parse context for that document. NB.
The document may be modified during the parsing process. """
ret = libxml2mod.xmlSchemaNewDocParserCtxt(self._o)
if ret is None:raise parserError('xmlSchemaNewDocParserCtxt() failed')
__tmp = SchemaParserCtxt(_obj=ret)
return __tmp
def schemaValidateDoc(self, ctxt):
"""Validate a document tree in memory. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlSchemaValidateDoc(ctxt__o, self._o)
return ret
#
# xmlDoc functions from module xpath
#
def xpathNewContext(self):
"""Create a new xmlXPathContext """
ret = libxml2mod.xmlXPathNewContext(self._o)
if ret is None:raise xpathError('xmlXPathNewContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp
def xpathOrderDocElems(self):
"""Call this routine to speed up XPath computation on static
documents. This stamps all the element nodes with the
document order Like for line information, the order is kept
in the element->content field, the value stored is actually
- the node number (starting at -1) to be able to
differentiate from line numbers. """
ret = libxml2mod.xmlXPathOrderDocElems(self._o)
return ret
#
# xmlDoc functions from module xpointer
#
def xpointerNewContext(self, here, origin):
"""Create a new XPointer context """
if here is None: here__o = None
else: here__o = here._o
if origin is None: origin__o = None
else: origin__o = origin._o
ret = libxml2mod.xmlXPtrNewContext(self._o, here__o, origin__o)
if ret is None:raise treeError('xmlXPtrNewContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp
class parserCtxt(parserCtxtCore):
def __init__(self, _obj=None):
self._o = _obj
parserCtxtCore.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeParserCtxt(self._o)
self._o = None
# accessors for parserCtxt
def doc(self):
"""Get the document tree from a parser context. """
ret = libxml2mod.xmlParserGetDoc(self._o)
if ret is None:raise parserError('xmlParserGetDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def isValid(self):
"""Get the validity information from a parser context. """
ret = libxml2mod.xmlParserGetIsValid(self._o)
return ret
def lineNumbers(self, linenumbers):
"""Switch on the generation of line number for elements nodes. """
libxml2mod.xmlParserSetLineNumbers(self._o, linenumbers)
def loadSubset(self, loadsubset):
"""Switch the parser to load the DTD without validating. """
libxml2mod.xmlParserSetLoadSubset(self._o, loadsubset)
def pedantic(self, pedantic):
"""Switch the parser to be pedantic. """
libxml2mod.xmlParserSetPedantic(self._o, pedantic)
def replaceEntities(self, replaceEntities):
"""Switch the parser to replace entities. """
libxml2mod.xmlParserSetReplaceEntities(self._o, replaceEntities)
def validate(self, validate):
"""Switch the parser to validation mode. """
libxml2mod.xmlParserSetValidate(self._o, validate)
def wellFormed(self):
"""Get the well formed information from a parser context. """
ret = libxml2mod.xmlParserGetWellFormed(self._o)
return ret
#
# parserCtxt functions from module HTMLparser
#
def htmlCtxtReadDoc(self, cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. This
reuses the existing @ctxt parser context """
ret = libxml2mod.htmlCtxtReadDoc(self._o, cur, URL, encoding, options)
if ret is None:raise treeError('htmlCtxtReadDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def htmlCtxtReadFd(self, fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. This
reuses the existing @ctxt parser context """
ret = libxml2mod.htmlCtxtReadFd(self._o, fd, URL, encoding, options)
if ret is None:raise treeError('htmlCtxtReadFd() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def htmlCtxtReadFile(self, filename, encoding, options):
"""parse an XML file from the filesystem or the network. This
reuses the existing @ctxt parser context """
ret = libxml2mod.htmlCtxtReadFile(self._o, filename, encoding, options)
if ret is None:raise treeError('htmlCtxtReadFile() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def htmlCtxtReadMemory(self, buffer, size, URL, encoding, options):
"""parse an XML in-memory document and build a tree. This
reuses the existing @ctxt parser context """
ret = libxml2mod.htmlCtxtReadMemory(self._o, buffer, size, URL, encoding, options)
if ret is None:raise treeError('htmlCtxtReadMemory() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def htmlCtxtReset(self):
"""Reset a parser context """
libxml2mod.htmlCtxtReset(self._o)
def htmlCtxtUseOptions(self, options):
"""Applies the options to the parser context """
ret = libxml2mod.htmlCtxtUseOptions(self._o, options)
return ret
def htmlFreeParserCtxt(self):
"""Free all the memory used by a parser context. However the
parsed document in ctxt->myDoc is not freed. """
libxml2mod.htmlFreeParserCtxt(self._o)
def htmlParseCharRef(self):
"""parse Reference declarations [66] CharRef ::= '&#' [0-9]+
';' | '&#x' [0-9a-fA-F]+ ';' """
ret = libxml2mod.htmlParseCharRef(self._o)
return ret
def htmlParseChunk(self, chunk, size, terminate):
"""Parse a Chunk of memory """
ret = libxml2mod.htmlParseChunk(self._o, chunk, size, terminate)
return ret
def htmlParseDocument(self):
"""parse an HTML document (and build a tree if using the
standard SAX interface). """
ret = libxml2mod.htmlParseDocument(self._o)
return ret
def htmlParseElement(self):
"""parse an HTML element, this is highly recursive this is
kept for compatibility with previous code versions [39]
element ::= EmptyElemTag | STag content ETag [41]
Attribute ::= Name Eq AttValue """
libxml2mod.htmlParseElement(self._o)
#
# parserCtxt functions from module parser
#
def byteConsumed(self):
"""This function provides the current index of the parser
relative to the start of the current entity. This function
is computed in bytes from the beginning starting at zero
and finishing at the size in byte of the file if parsing a
file. The function is of constant cost if the input is
UTF-8 but can be costly if run on non-UTF-8 input. """
ret = libxml2mod.xmlByteConsumed(self._o)
return ret
def clearParserCtxt(self):
"""Clear (release owned resources) and reinitialize a parser
context """
libxml2mod.xmlClearParserCtxt(self._o)
def ctxtReadDoc(self, cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. This
reuses the existing @ctxt parser context """
ret = libxml2mod.xmlCtxtReadDoc(self._o, cur, URL, encoding, options)
if ret is None:raise treeError('xmlCtxtReadDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def ctxtReadFd(self, fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. This
reuses the existing @ctxt parser context NOTE that the file
descriptor will not be closed when the reader is closed or
reset. """
ret = libxml2mod.xmlCtxtReadFd(self._o, fd, URL, encoding, options)
if ret is None:raise treeError('xmlCtxtReadFd() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def ctxtReadFile(self, filename, encoding, options):
"""parse an XML file from the filesystem or the network. This
reuses the existing @ctxt parser context """
ret = libxml2mod.xmlCtxtReadFile(self._o, filename, encoding, options)
if ret is None:raise treeError('xmlCtxtReadFile() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def ctxtReadMemory(self, buffer, size, URL, encoding, options):
"""parse an XML in-memory document and build a tree. This
reuses the existing @ctxt parser context """
ret = libxml2mod.xmlCtxtReadMemory(self._o, buffer, size, URL, encoding, options)
if ret is None:raise treeError('xmlCtxtReadMemory() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def ctxtReset(self):
"""Reset a parser context """
libxml2mod.xmlCtxtReset(self._o)
def ctxtResetPush(self, chunk, size, filename, encoding):
"""Reset a push parser context """
ret = libxml2mod.xmlCtxtResetPush(self._o, chunk, size, filename, encoding)
return ret
def ctxtUseOptions(self, options):
"""Applies the options to the parser context """
ret = libxml2mod.xmlCtxtUseOptions(self._o, options)
return ret
def initParserCtxt(self):
"""Initialize a parser context """
ret = libxml2mod.xmlInitParserCtxt(self._o)
return ret
def parseChunk(self, chunk, size, terminate):
"""Parse a Chunk of memory """
ret = libxml2mod.xmlParseChunk(self._o, chunk, size, terminate)
return ret
def parseDocument(self):
"""parse an XML document (and build a tree if using the
standard SAX interface). [1] document ::= prolog element
Misc* [22] prolog ::= XMLDecl? Misc* (doctypedecl Misc*)? """
ret = libxml2mod.xmlParseDocument(self._o)
return ret
def parseExtParsedEnt(self):
"""parse a general parsed entity An external general parsed
entity is well-formed if it matches the production labeled
extParsedEnt. [78] extParsedEnt ::= TextDecl? content """
ret = libxml2mod.xmlParseExtParsedEnt(self._o)
return ret
def setupParserForBuffer(self, buffer, filename):
"""Setup the parser context to parse a new buffer; Clears any
prior contents from the parser context. The buffer
parameter must not be None, but the filename parameter can
be """
libxml2mod.xmlSetupParserForBuffer(self._o, buffer, filename)
def stopParser(self):
"""Blocks further parser processing """
libxml2mod.xmlStopParser(self._o)
#
# parserCtxt functions from module parserInternals
#
def decodeEntities(self, len, what, end, end2, end3):
"""This function is deprecated, we now always process entities
content through xmlStringDecodeEntities TODO: remove it in
next major release. [67] Reference ::= EntityRef | CharRef
[69] PEReference ::= '%' Name ';' """
ret = libxml2mod.xmlDecodeEntities(self._o, len, what, end, end2, end3)
return ret
def handleEntity(self, entity):
"""Default handling of defined entities, when should we define
a new input stream ? When do we just handle that as a set
of chars ? OBSOLETE: to be removed at some point. """
if entity is None: entity__o = None
else: entity__o = entity._o
libxml2mod.xmlHandleEntity(self._o, entity__o)
def namespaceParseNCName(self):
"""parse an XML namespace name. TODO: this seems not in use
anymore, the namespace handling is done on top of the SAX
interfaces, i.e. not on raw input. [NS 3] NCName ::=
(Letter | '_') (NCNameChar)* [NS 4] NCNameChar ::= Letter
| Digit | '.' | '-' | '_' | CombiningChar | Extender """
ret = libxml2mod.xmlNamespaceParseNCName(self._o)
return ret
def namespaceParseNSDef(self):
"""parse a namespace prefix declaration TODO: this seems not
in use anymore, the namespace handling is done on top of
the SAX interfaces, i.e. not on raw input. [NS 1] NSDef
::= PrefixDef Eq SystemLiteral [NS 2] PrefixDef ::=
'xmlns' (':' NCName)? """
ret = libxml2mod.xmlNamespaceParseNSDef(self._o)
return ret
def nextChar(self):
"""Skip to the next char input char. """
libxml2mod.xmlNextChar(self._o)
def parseAttValue(self):
"""parse a value for an attribute Note: the parser won't do
substitution of entities here, this will be handled later
in xmlStringGetNodeList [10] AttValue ::= '"' ([^<&"] |
Reference)* '"' | "'" ([^<&'] | Reference)* "'" 3.3.3
Attribute-Value Normalization: Before the value of an
attribute is passed to the application or checked for
validity, the XML processor must normalize it as follows: -
a character reference is processed by appending the
referenced character to the attribute value - an entity
reference is processed by recursively processing the
replacement text of the entity - a whitespace character
(#x20, #xD, #xA, #x9) is processed by appending #x20 to the
normalized value, except that only a single #x20 is
appended for a "#xD#xA" sequence that is part of an
external parsed entity or the literal entity value of an
internal parsed entity - other characters are processed by
appending them to the normalized value If the declared
value is not CDATA, then the XML processor must further
process the normalized attribute value by discarding any
leading and trailing space (#x20) characters, and by
replacing sequences of space (#x20) characters by a single
space (#x20) character. All attributes for which no
declaration has been read should be treated by a
non-validating parser as if declared CDATA. """
ret = libxml2mod.xmlParseAttValue(self._o)
return ret
def parseAttributeListDecl(self):
""": parse the Attribute list def for an element [52]
AttlistDecl ::= '<!ATTLIST' S Name AttDef* S? '>' [53]
AttDef ::= S Name S AttType S DefaultDecl """
libxml2mod.xmlParseAttributeListDecl(self._o)
def parseCDSect(self):
"""Parse escaped pure raw content. [18] CDSect ::= CDStart
CData CDEnd [19] CDStart ::= '<![CDATA[' [20] Data ::=
(Char* - (Char* ']]>' Char*)) [21] CDEnd ::= ']]>' """
libxml2mod.xmlParseCDSect(self._o)
def parseCharData(self, cdata):
"""parse a CharData section. if we are within a CDATA section
']]>' marks an end of section. The right angle bracket (>)
may be represented using the string ">", and must, for
compatibility, be escaped using ">" or a character
reference when it appears in the string "]]>" in content,
when that string is not marking the end of a CDATA section.
[14] CharData ::= [^<&]* - ([^<&]* ']]>' [^<&]*) """
libxml2mod.xmlParseCharData(self._o, cdata)
def parseCharRef(self):
"""parse Reference declarations [66] CharRef ::= '&#' [0-9]+
';' | '&#x' [0-9a-fA-F]+ ';' [ WFC: Legal Character ]
Characters referred to using character references must
match the production for Char. """
ret = libxml2mod.xmlParseCharRef(self._o)
return ret
def parseComment(self):
"""Skip an XML (SGML) comment <!-- .... --> The spec says that
"For compatibility, the string "--" (double-hyphen) must
not occur within comments. " [15] Comment ::= '<!--'
((Char - '-') | ('-' (Char - '-')))* '-->' """
libxml2mod.xmlParseComment(self._o)
def parseContent(self):
"""Parse a content: [43] content ::= (element | CharData |
Reference | CDSect | PI | Comment)* """
libxml2mod.xmlParseContent(self._o)
def parseDocTypeDecl(self):
"""parse a DOCTYPE declaration [28] doctypedecl ::=
'<!DOCTYPE' S Name (S ExternalID)? S? ('[' (markupdecl |
PEReference | S)* ']' S?)? '>' [ VC: Root Element Type ]
The Name in the document type declaration must match the
element type of the root element. """
libxml2mod.xmlParseDocTypeDecl(self._o)
def parseElement(self):
"""parse an XML element, this is highly recursive [39]
element ::= EmptyElemTag | STag content ETag [ WFC:
Element Type Match ] The Name in an element's end-tag must
match the element type in the start-tag. """
libxml2mod.xmlParseElement(self._o)
def parseElementDecl(self):
"""parse an Element declaration. [45] elementdecl ::=
'<!ELEMENT' S Name S contentspec S? '>' [ VC: Unique
Element Type Declaration ] No element type may be declared
more than once """
ret = libxml2mod.xmlParseElementDecl(self._o)
return ret
def parseEncName(self):
"""parse the XML encoding name [81] EncName ::= [A-Za-z]
([A-Za-z0-9._] | '-')* """
ret = libxml2mod.xmlParseEncName(self._o)
return ret
def parseEncodingDecl(self):
"""parse the XML encoding declaration [80] EncodingDecl ::= S
'encoding' Eq ('"' EncName '"' | "'" EncName "'") this
setups the conversion filters. """
ret = libxml2mod.xmlParseEncodingDecl(self._o)
return ret
def parseEndTag(self):
"""parse an end of tag [42] ETag ::= '</' Name S? '>' With
namespace [NS 9] ETag ::= '</' QName S? '>' """
libxml2mod.xmlParseEndTag(self._o)
def parseEntityDecl(self):
"""parse <!ENTITY declarations [70] EntityDecl ::= GEDecl |
PEDecl [71] GEDecl ::= '<!ENTITY' S Name S EntityDef S?
'>' [72] PEDecl ::= '<!ENTITY' S '%' S Name S PEDef S? '>'
[73] EntityDef ::= EntityValue | (ExternalID NDataDecl?)
[74] PEDef ::= EntityValue | ExternalID [76] NDataDecl ::=
S 'NDATA' S Name [ VC: Notation Declared ] The Name must
match the declared name of a notation. """
libxml2mod.xmlParseEntityDecl(self._o)
def parseEntityRef(self):
"""parse ENTITY references declarations [68] EntityRef ::=
'&' Name ';' [ WFC: Entity Declared ] In a document
without any DTD, a document with only an internal DTD
subset which contains no parameter entity references, or a
document with "standalone='yes'", the Name given in the
entity reference must match that in an entity declaration,
except that well-formed documents need not declare any of
the following entities: amp, lt, gt, apos, quot. The
declaration of a parameter entity must precede any
reference to it. Similarly, the declaration of a general
entity must precede any reference to it which appears in a
default value in an attribute-list declaration. Note that
if entities are declared in the external subset or in
external parameter entities, a non-validating processor is
not obligated to read and process their declarations; for
such documents, the rule that an entity must be declared is
a well-formedness constraint only if standalone='yes'. [
WFC: Parsed Entity ] An entity reference must not contain
the name of an unparsed entity """
ret = libxml2mod.xmlParseEntityRef(self._o)
if ret is None:raise parserError('xmlParseEntityRef() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def parseExternalSubset(self, ExternalID, SystemID):
"""parse Markup declarations from an external subset [30]
extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl
::= (markupdecl | conditionalSect | PEReference | S) * """
libxml2mod.xmlParseExternalSubset(self._o, ExternalID, SystemID)
def parseMarkupDecl(self):
"""parse Markup declarations [29] markupdecl ::= elementdecl
| AttlistDecl | EntityDecl | NotationDecl | PI | Comment [
VC: Proper Declaration/PE Nesting ] Parameter-entity
replacement text must be properly nested with markup
declarations. That is to say, if either the first character
or the last character of a markup declaration (markupdecl
above) is contained in the replacement text for a
parameter-entity reference, both must be contained in the
same replacement text. [ WFC: PEs in Internal Subset ] In
the internal DTD subset, parameter-entity references can
occur only where markup declarations can occur, not within
markup declarations. (This does not apply to references
that occur in external parameter entities or to the
external subset.) """
libxml2mod.xmlParseMarkupDecl(self._o)
def parseMisc(self):
"""parse an XML Misc* optional field. [27] Misc ::= Comment |
PI | S """
libxml2mod.xmlParseMisc(self._o)
def parseName(self):
"""parse an XML name. [4] NameChar ::= Letter | Digit | '.' |
'-' | '_' | ':' | CombiningChar | Extender [5] Name ::=
(Letter | '_' | ':') (NameChar)* [6] Names ::= Name (#x20
Name)* """
ret = libxml2mod.xmlParseName(self._o)
return ret
def parseNamespace(self):
"""xmlParseNamespace: parse specific PI '<?namespace ...'
constructs. This is what the older xml-name Working Draft
specified, a bunch of other stuff may still rely on it, so
support is still here as if it was declared on the root of
the Tree:-( TODO: remove from library To be removed at
next drop of binary compatibility """
libxml2mod.xmlParseNamespace(self._o)
def parseNmtoken(self):
"""parse an XML Nmtoken. [7] Nmtoken ::= (NameChar)+ [8]
Nmtokens ::= Nmtoken (#x20 Nmtoken)* """
ret = libxml2mod.xmlParseNmtoken(self._o)
return ret
def parseNotationDecl(self):
"""parse a notation declaration [82] NotationDecl ::=
'<!NOTATION' S Name S (ExternalID | PublicID) S? '>'
Hence there is actually 3 choices: 'PUBLIC' S PubidLiteral
'PUBLIC' S PubidLiteral S SystemLiteral and 'SYSTEM' S
SystemLiteral See the NOTE on xmlParseExternalID(). """
libxml2mod.xmlParseNotationDecl(self._o)
def parsePEReference(self):
"""parse PEReference declarations The entity content is
handled directly by pushing it's content as a new input
stream. [69] PEReference ::= '%' Name ';' [ WFC: No
Recursion ] A parsed entity must not contain a recursive
reference to itself, either directly or indirectly. [ WFC:
Entity Declared ] In a document without any DTD, a document
with only an internal DTD subset which contains no
parameter entity references, or a document with
"standalone='yes'", ... ... The declaration of a parameter
entity must precede any reference to it... [ VC: Entity
Declared ] In a document with an external subset or
external parameter entities with "standalone='no'", ...
... The declaration of a parameter entity must precede any
reference to it... [ WFC: In DTD ] Parameter-entity
references may only appear in the DTD. NOTE: misleading but
this is handled. """
libxml2mod.xmlParsePEReference(self._o)
def parsePI(self):
"""parse an XML Processing Instruction. [16] PI ::= '<?'
PITarget (S (Char* - (Char* '?>' Char*)))? '?>' The
processing is transfered to SAX once parsed. """
libxml2mod.xmlParsePI(self._o)
def parsePITarget(self):
"""parse the name of a PI [17] PITarget ::= Name - (('X' |
'x') ('M' | 'm') ('L' | 'l')) """
ret = libxml2mod.xmlParsePITarget(self._o)
return ret
def parsePubidLiteral(self):
"""parse an XML public literal [12] PubidLiteral ::= '"'
PubidChar* '"' | "'" (PubidChar - "'")* "'" """
ret = libxml2mod.xmlParsePubidLiteral(self._o)
return ret
def parseQuotedString(self):
"""Parse and return a string between quotes or doublequotes
TODO: Deprecated, to be removed at next drop of binary
compatibility """
ret = libxml2mod.xmlParseQuotedString(self._o)
return ret
def parseReference(self):
"""parse and handle entity references in content, depending on
the SAX interface, this may end-up in a call to character()
if this is a CharRef, a predefined entity, if there is no
reference() callback. or if the parser was asked to switch
to that mode. [67] Reference ::= EntityRef | CharRef """
libxml2mod.xmlParseReference(self._o)
def parseSDDecl(self):
"""parse the XML standalone declaration [32] SDDecl ::= S
'standalone' Eq (("'" ('yes' | 'no') "'") | ('"' ('yes' |
'no')'"')) [ VC: Standalone Document Declaration ] TODO
The standalone document declaration must have the value
"no" if any external markup declarations contain
declarations of: - attributes with default values, if
elements to which these attributes apply appear in the
document without specifications of values for these
attributes, or - entities (other than amp, lt, gt, apos,
quot), if references to those entities appear in the
document, or - attributes with values subject to
normalization, where the attribute appears in the document
with a value which will change as a result of
normalization, or - element types with element content, if
white space occurs directly within any instance of those
types. """
ret = libxml2mod.xmlParseSDDecl(self._o)
return ret
def parseStartTag(self):
"""parse a start of tag either for rule element or
EmptyElement. In both case we don't parse the tag closing
chars. [40] STag ::= '<' Name (S Attribute)* S? '>' [
WFC: Unique Att Spec ] No attribute name may appear more
than once in the same start-tag or empty-element tag. [44]
EmptyElemTag ::= '<' Name (S Attribute)* S? '/>' [ WFC:
Unique Att Spec ] No attribute name may appear more than
once in the same start-tag or empty-element tag. With
namespace: [NS 8] STag ::= '<' QName (S Attribute)* S? '>'
[NS 10] EmptyElement ::= '<' QName (S Attribute)* S? '/>' """
ret = libxml2mod.xmlParseStartTag(self._o)
return ret
def parseSystemLiteral(self):
"""parse an XML Literal [11] SystemLiteral ::= ('"' [^"]*
'"') | ("'" [^']* "'") """
ret = libxml2mod.xmlParseSystemLiteral(self._o)
return ret
def parseTextDecl(self):
"""parse an XML declaration header for external entities [77]
TextDecl ::= '<?xml' VersionInfo? EncodingDecl S? '?>' """
libxml2mod.xmlParseTextDecl(self._o)
def parseVersionInfo(self):
"""parse the XML version. [24] VersionInfo ::= S 'version' Eq
(' VersionNum ' | " VersionNum ") [25] Eq ::= S? '=' S? """
ret = libxml2mod.xmlParseVersionInfo(self._o)
return ret
def parseVersionNum(self):
"""parse the XML version value. [26] VersionNum ::= '1.'
[0-9]+ In practice allow [0-9].[0-9]+ at that level """
ret = libxml2mod.xmlParseVersionNum(self._o)
return ret
def parseXMLDecl(self):
"""parse an XML declaration header [23] XMLDecl ::= '<?xml'
VersionInfo EncodingDecl? SDDecl? S? '?>' """
libxml2mod.xmlParseXMLDecl(self._o)
def parserHandlePEReference(self):
"""[69] PEReference ::= '%' Name ';' [ WFC: No Recursion ] A
parsed entity must not contain a recursive reference to
itself, either directly or indirectly. [ WFC: Entity
Declared ] In a document without any DTD, a document with
only an internal DTD subset which contains no parameter
entity references, or a document with "standalone='yes'",
... ... The declaration of a parameter entity must precede
any reference to it... [ VC: Entity Declared ] In a
document with an external subset or external parameter
entities with "standalone='no'", ... ... The declaration
of a parameter entity must precede any reference to it...
[ WFC: In DTD ] Parameter-entity references may only appear
in the DTD. NOTE: misleading but this is handled. A
PEReference may have been detected in the current input
stream the handling is done accordingly to
http://www.w3.org/TR/REC-xml#entproc i.e. - Included in
literal in entity values - Included as Parameter Entity
reference within DTDs """
libxml2mod.xmlParserHandlePEReference(self._o)
def parserHandleReference(self):
"""TODO: Remove, now deprecated ... the test is done directly
in the content parsing routines. [67] Reference ::=
EntityRef | CharRef [68] EntityRef ::= '&' Name ';' [
WFC: Entity Declared ] the Name given in the entity
reference must match that in an entity declaration, except
that well-formed documents need not declare any of the
following entities: amp, lt, gt, apos, quot. [ WFC: Parsed
Entity ] An entity reference must not contain the name of
an unparsed entity [66] CharRef ::= '&#' [0-9]+ ';' |
'&#x' [0-9a-fA-F]+ ';' A PEReference may have been
detected in the current input stream the handling is done
accordingly to http://www.w3.org/TR/REC-xml#entproc """
libxml2mod.xmlParserHandleReference(self._o)
def popInput(self):
"""xmlPopInput: the current input pointed by ctxt->input came
to an end pop it and return the next char. """
ret = libxml2mod.xmlPopInput(self._o)
return ret
def scanName(self):
"""Trickery: parse an XML name but without consuming the input
flow Needed for rollback cases. Used only when parsing
entities references. TODO: seems deprecated now, only used
in the default part of xmlParserHandleReference [4]
NameChar ::= Letter | Digit | '.' | '-' | '_' | ':' |
CombiningChar | Extender [5] Name ::= (Letter | '_' | ':')
(NameChar)* [6] Names ::= Name (S Name)* """
ret = libxml2mod.xmlScanName(self._o)
return ret
def skipBlankChars(self):
"""skip all blanks character found at that point in the input
streams. It pops up finished entities in the process if
allowable at that point. """
ret = libxml2mod.xmlSkipBlankChars(self._o)
return ret
def stringDecodeEntities(self, str, what, end, end2, end3):
"""Takes a entity string content and process to do the
adequate substitutions. [67] Reference ::= EntityRef |
CharRef [69] PEReference ::= '%' Name ';' """
ret = libxml2mod.xmlStringDecodeEntities(self._o, str, what, end, end2, end3)
return ret
def stringLenDecodeEntities(self, str, len, what, end, end2, end3):
"""Takes a entity string content and process to do the
adequate substitutions. [67] Reference ::= EntityRef |
CharRef [69] PEReference ::= '%' Name ';' """
ret = libxml2mod.xmlStringLenDecodeEntities(self._o, str, len, what, end, end2, end3)
return ret
class xmlAttr(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlAttr got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlAttr (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
#
# xmlAttr functions from module debugXML
#
def debugDumpAttr(self, output, depth):
"""Dumps debug information for the attribute """
libxml2mod.xmlDebugDumpAttr(output, self._o, depth)
def debugDumpAttrList(self, output, depth):
"""Dumps debug information for the attribute list """
libxml2mod.xmlDebugDumpAttrList(output, self._o, depth)
#
# xmlAttr functions from module tree
#
def copyProp(self, target):
"""Do a copy of the attribute. """
if target is None: target__o = None
else: target__o = target._o
ret = libxml2mod.xmlCopyProp(target__o, self._o)
if ret is None:raise treeError('xmlCopyProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def copyPropList(self, target):
"""Do a copy of an attribute list. """
if target is None: target__o = None
else: target__o = target._o
ret = libxml2mod.xmlCopyPropList(target__o, self._o)
if ret is None:raise treeError('xmlCopyPropList() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def freeProp(self):
"""Free one attribute, all the content is freed too """
libxml2mod.xmlFreeProp(self._o)
def freePropList(self):
"""Free a property and all its siblings, all the children are
freed too. """
libxml2mod.xmlFreePropList(self._o)
def removeProp(self):
"""Unlink and free one attribute, all the content is freed too
Note this doesn't work for namespace definition attributes """
ret = libxml2mod.xmlRemoveProp(self._o)
return ret
#
# xmlAttr functions from module valid
#
def removeID(self, doc):
"""Remove the given attribute from the ID table maintained
internally. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlRemoveID(doc__o, self._o)
return ret
def removeRef(self, doc):
"""Remove the given attribute from the Ref table maintained
internally. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlRemoveRef(doc__o, self._o)
return ret
class xmlAttribute(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlAttribute got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlAttribute (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
class catalog:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeCatalog(self._o)
self._o = None
#
# catalog functions from module catalog
#
def add(self, type, orig, replace):
"""Add an entry in the catalog, it may overwrite existing but
different entries. """
ret = libxml2mod.xmlACatalogAdd(self._o, type, orig, replace)
return ret
def catalogIsEmpty(self):
"""Check is a catalog is empty """
ret = libxml2mod.xmlCatalogIsEmpty(self._o)
return ret
def convertSGMLCatalog(self):
"""Convert all the SGML catalog entries as XML ones """
ret = libxml2mod.xmlConvertSGMLCatalog(self._o)
return ret
def dump(self, out):
"""Dump the given catalog to the given file. """
libxml2mod.xmlACatalogDump(self._o, out)
def remove(self, value):
"""Remove an entry from the catalog """
ret = libxml2mod.xmlACatalogRemove(self._o, value)
return ret
def resolve(self, pubID, sysID):
"""Do a complete resolution lookup of an External Identifier """
ret = libxml2mod.xmlACatalogResolve(self._o, pubID, sysID)
return ret
def resolvePublic(self, pubID):
"""Try to lookup the catalog local reference associated to a
public ID in that catalog """
ret = libxml2mod.xmlACatalogResolvePublic(self._o, pubID)
return ret
def resolveSystem(self, sysID):
"""Try to lookup the catalog resource for a system ID """
ret = libxml2mod.xmlACatalogResolveSystem(self._o, sysID)
return ret
def resolveURI(self, URI):
"""Do a complete resolution lookup of an URI """
ret = libxml2mod.xmlACatalogResolveURI(self._o, URI)
return ret
class xmlDtd(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlDtd got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlDtd (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
#
# xmlDtd functions from module debugXML
#
def debugDumpDTD(self, output):
"""Dumps debug information for the DTD """
libxml2mod.xmlDebugDumpDTD(output, self._o)
#
# xmlDtd functions from module tree
#
def copyDtd(self):
"""Do a copy of the dtd. """
ret = libxml2mod.xmlCopyDtd(self._o)
if ret is None:raise treeError('xmlCopyDtd() failed')
__tmp = xmlDtd(_obj=ret)
return __tmp
def freeDtd(self):
"""Free a DTD structure. """
libxml2mod.xmlFreeDtd(self._o)
#
# xmlDtd functions from module valid
#
def dtdAttrDesc(self, elem, name):
"""Search the DTD for the description of this attribute on
this element. """
ret = libxml2mod.xmlGetDtdAttrDesc(self._o, elem, name)
if ret is None:raise treeError('xmlGetDtdAttrDesc() failed')
__tmp = xmlAttribute(_obj=ret)
return __tmp
def dtdElementDesc(self, name):
"""Search the DTD for the description of this element """
ret = libxml2mod.xmlGetDtdElementDesc(self._o, name)
if ret is None:raise treeError('xmlGetDtdElementDesc() failed')
__tmp = xmlElement(_obj=ret)
return __tmp
def dtdQAttrDesc(self, elem, name, prefix):
"""Search the DTD for the description of this qualified
attribute on this element. """
ret = libxml2mod.xmlGetDtdQAttrDesc(self._o, elem, name, prefix)
if ret is None:raise treeError('xmlGetDtdQAttrDesc() failed')
__tmp = xmlAttribute(_obj=ret)
return __tmp
def dtdQElementDesc(self, name, prefix):
"""Search the DTD for the description of this element """
ret = libxml2mod.xmlGetDtdQElementDesc(self._o, name, prefix)
if ret is None:raise treeError('xmlGetDtdQElementDesc() failed')
__tmp = xmlElement(_obj=ret)
return __tmp
class xmlElement(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlElement got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlElement (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
class xmlEntity(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlEntity got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlEntity (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
#
# xmlEntity functions from module parserInternals
#
def handleEntity(self, ctxt):
"""Default handling of defined entities, when should we define
a new input stream ? When do we just handle that as a set
of chars ? OBSOLETE: to be removed at some point. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
libxml2mod.xmlHandleEntity(ctxt__o, self._o)
class Error:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
# accessors for Error
def code(self):
"""The error code, e.g. an xmlParserError """
ret = libxml2mod.xmlErrorGetCode(self._o)
return ret
def domain(self):
"""What part of the library raised this error """
ret = libxml2mod.xmlErrorGetDomain(self._o)
return ret
def file(self):
"""the filename """
ret = libxml2mod.xmlErrorGetFile(self._o)
return ret
def level(self):
"""how consequent is the error """
ret = libxml2mod.xmlErrorGetLevel(self._o)
return ret
def line(self):
"""the line number if available """
ret = libxml2mod.xmlErrorGetLine(self._o)
return ret
def message(self):
"""human-readable informative error message """
ret = libxml2mod.xmlErrorGetMessage(self._o)
return ret
#
# Error functions from module xmlerror
#
def copyError(self, to):
"""Save the original error to the new place. """
if to is None: to__o = None
else: to__o = to._o
ret = libxml2mod.xmlCopyError(self._o, to__o)
return ret
def resetError(self):
"""Cleanup the error. """
libxml2mod.xmlResetError(self._o)
class xmlNs(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlNs got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlNs (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
#
# xmlNs functions from module tree
#
def copyNamespace(self):
"""Do a copy of the namespace. """
ret = libxml2mod.xmlCopyNamespace(self._o)
if ret is None:raise treeError('xmlCopyNamespace() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def copyNamespaceList(self):
"""Do a copy of an namespace list. """
ret = libxml2mod.xmlCopyNamespaceList(self._o)
if ret is None:raise treeError('xmlCopyNamespaceList() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def freeNs(self):
"""Free up the structures associated to a namespace """
libxml2mod.xmlFreeNs(self._o)
def freeNsList(self):
"""Free up all the structures associated to the chained
namespaces. """
libxml2mod.xmlFreeNsList(self._o)
def newChild(self, parent, name, content):
"""Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child list containing the TEXTs and ENTITY_REFs node will
be created. NOTE: @content is supposed to be a piece of XML
CDATA, so it allows entity references. XML special chars
must be escaped first by using
xmlEncodeEntitiesReentrant(), or xmlNewTextChild() should
be used. """
if parent is None: parent__o = None
else: parent__o = parent._o
ret = libxml2mod.xmlNewChild(parent__o, self._o, name, content)
if ret is None:raise treeError('xmlNewChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocNode(self, doc, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). NOTE: @content is supposed to
be a piece of XML CDATA, so it allow entities references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant(). Use xmlNewDocRawNode() if you
don't need entities support. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNewDocNode(doc__o, self._o, name, content)
if ret is None:raise treeError('xmlNewDocNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocNodeEatName(self, doc, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). NOTE: @content is supposed to
be a piece of XML CDATA, so it allow entities references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant(). Use xmlNewDocRawNode() if you
don't need entities support. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNewDocNodeEatName(doc__o, self._o, name, content)
if ret is None:raise treeError('xmlNewDocNodeEatName() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocRawNode(self, doc, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNewDocRawNode(doc__o, self._o, name, content)
if ret is None:raise treeError('xmlNewDocRawNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newNodeEatName(self, name):
"""Creation of a new node element. @ns is optional (None). """
ret = libxml2mod.xmlNewNodeEatName(self._o, name)
if ret is None:raise treeError('xmlNewNodeEatName() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newNsProp(self, node, name, value):
"""Create a new property tagged with a namespace and carried
by a node. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlNewNsProp(node__o, self._o, name, value)
if ret is None:raise treeError('xmlNewNsProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newNsPropEatName(self, node, name, value):
"""Create a new property tagged with a namespace and carried
by a node. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlNewNsPropEatName(node__o, self._o, name, value)
if ret is None:raise treeError('xmlNewNsPropEatName() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newTextChild(self, parent, name, content):
"""Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child TEXT node will be created containing the string
@content. NOTE: Use xmlNewChild() if @content will contain
entities that need to be preserved. Use this function,
xmlNewTextChild(), if you need to ensure that reserved XML
chars that might appear in @content, such as the ampersand,
greater-than or less-than signs, are automatically replaced
by their XML escaped entity representations. """
if parent is None: parent__o = None
else: parent__o = parent._o
ret = libxml2mod.xmlNewTextChild(parent__o, self._o, name, content)
if ret is None:raise treeError('xmlNewTextChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def setNs(self, node):
"""Associate a namespace to a node, a posteriori. """
if node is None: node__o = None
else: node__o = node._o
libxml2mod.xmlSetNs(node__o, self._o)
def setNsProp(self, node, name, value):
"""Set (or reset) an attribute carried by a node. The ns
structure must be in scope, this is not checked """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlSetNsProp(node__o, self._o, name, value)
if ret is None:raise treeError('xmlSetNsProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def unsetNsProp(self, node, name):
"""Remove an attribute carried by a node. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlUnsetNsProp(node__o, self._o, name)
return ret
#
# xmlNs functions from module xpathInternals
#
def xpathNodeSetFreeNs(self):
"""Namespace nodes in libxml don't match the XPath semantic.
In a node set the namespace nodes are duplicated and the
next pointer is set to the parent node in the XPath
semantic. Check if such a node needs to be freed """
libxml2mod.xmlXPathNodeSetFreeNs(self._o)
class outputBuffer(ioWriteWrapper):
def __init__(self, _obj=None):
self._o = _obj
ioWriteWrapper.__init__(self, _obj=_obj)
#
# outputBuffer functions from module HTMLtree
#
def htmlDocContentDumpFormatOutput(self, cur, encoding, format):
"""Dump an HTML document. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlDocContentDumpFormatOutput(self._o, cur__o, encoding, format)
def htmlDocContentDumpOutput(self, cur, encoding):
"""Dump an HTML document. Formating return/spaces are added. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlDocContentDumpOutput(self._o, cur__o, encoding)
def htmlNodeDumpFormatOutput(self, doc, cur, encoding, format):
"""Dump an HTML node, recursive behaviour,children are printed
too. """
if doc is None: doc__o = None
else: doc__o = doc._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpFormatOutput(self._o, doc__o, cur__o, encoding, format)
def htmlNodeDumpOutput(self, doc, cur, encoding):
"""Dump an HTML node, recursive behaviour,children are printed
too, and formatting returns/spaces are added. """
if doc is None: doc__o = None
else: doc__o = doc._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpOutput(self._o, doc__o, cur__o, encoding)
#
# outputBuffer functions from module tree
#
def nodeDumpOutput(self, doc, cur, level, format, encoding):
"""Dump an XML node, recursive behaviour, children are printed
too. Note that @format = 1 provide node indenting only if
xmlIndentTreeOutput = 1 or xmlKeepBlanksDefault(0) was
called """
if doc is None: doc__o = None
else: doc__o = doc._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.xmlNodeDumpOutput(self._o, doc__o, cur__o, level, format, encoding)
def saveFileTo(self, cur, encoding):
"""Dump an XML document to an I/O buffer. Warning ! This call
xmlOutputBufferClose() on buf which is not available after
this call. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlSaveFileTo(self._o, cur__o, encoding)
return ret
def saveFormatFileTo(self, cur, encoding, format):
"""Dump an XML document to an I/O buffer. Warning ! This call
xmlOutputBufferClose() on buf which is not available after
this call. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlSaveFormatFileTo(self._o, cur__o, encoding, format)
return ret
#
# outputBuffer functions from module xmlIO
#
def getContent(self):
"""Gives a pointer to the data currently held in the output
buffer """
ret = libxml2mod.xmlOutputBufferGetContent(self._o)
return ret
def write(self, len, buf):
"""Write the content of the array in the output I/O buffer
This routine handle the I18N transcoding from internal
UTF-8 The buffer is lossless, i.e. will store in case of
partial or delayed writes. """
ret = libxml2mod.xmlOutputBufferWrite(self._o, len, buf)
return ret
def writeString(self, str):
"""Write the content of the string in the output I/O buffer
This routine handle the I18N transcoding from internal
UTF-8 The buffer is lossless, i.e. will store in case of
partial or delayed writes. """
ret = libxml2mod.xmlOutputBufferWriteString(self._o, str)
return ret
class inputBuffer(ioReadWrapper):
def __init__(self, _obj=None):
self._o = _obj
ioReadWrapper.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeParserInputBuffer(self._o)
self._o = None
#
# inputBuffer functions from module xmlIO
#
def grow(self, len):
"""Grow up the content of the input buffer, the old data are
preserved This routine handle the I18N transcoding to
internal UTF-8 This routine is used when operating the
parser in normal (pull) mode TODO: one should be able to
remove one extra copy by copying directly onto in->buffer
or in->raw """
ret = libxml2mod.xmlParserInputBufferGrow(self._o, len)
return ret
def push(self, len, buf):
"""Push the content of the arry in the input buffer This
routine handle the I18N transcoding to internal UTF-8 This
is used when operating the parser in progressive (push)
mode. """
ret = libxml2mod.xmlParserInputBufferPush(self._o, len, buf)
return ret
def read(self, len):
"""Refresh the content of the input buffer, the old data are
considered consumed This routine handle the I18N
transcoding to internal UTF-8 """
ret = libxml2mod.xmlParserInputBufferRead(self._o, len)
return ret
#
# inputBuffer functions from module xmlreader
#
def Setup(self, reader, URL, encoding, options):
"""Setup an XML reader with new options """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderSetup(reader__o, self._o, URL, encoding, options)
return ret
def newTextReader(self, URI):
"""Create an xmlTextReader structure fed with @input """
ret = libxml2mod.xmlNewTextReader(self._o, URI)
if ret is None:raise treeError('xmlNewTextReader() failed')
__tmp = xmlTextReader(_obj=ret)
__tmp.input = self
return __tmp
class xmlReg:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlRegFreeRegexp(self._o)
self._o = None
#
# xmlReg functions from module xmlregexp
#
def regexpExec(self, content):
"""Check if the regular expression generates the value """
ret = libxml2mod.xmlRegexpExec(self._o, content)
return ret
def regexpIsDeterminist(self):
"""Check if the regular expression is determinist """
ret = libxml2mod.xmlRegexpIsDeterminist(self._o)
return ret
def regexpPrint(self, output):
"""Print the content of the compiled regular expression """
libxml2mod.xmlRegexpPrint(output, self._o)
class relaxNgParserCtxt:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlRelaxNGFreeParserCtxt(self._o)
self._o = None
#
# relaxNgParserCtxt functions from module relaxng
#
def relaxNGParse(self):
"""parse a schema definition resource and build an internal
XML Shema struture which can be used to validate instances. """
ret = libxml2mod.xmlRelaxNGParse(self._o)
if ret is None:raise parserError('xmlRelaxNGParse() failed')
__tmp = relaxNgSchema(_obj=ret)
return __tmp
def relaxParserSetFlag(self, flags):
"""Semi private function used to pass informations to a parser
context which are a combination of xmlRelaxNGParserFlag . """
ret = libxml2mod.xmlRelaxParserSetFlag(self._o, flags)
return ret
class relaxNgSchema:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlRelaxNGFree(self._o)
self._o = None
#
# relaxNgSchema functions from module relaxng
#
def relaxNGDump(self, output):
"""Dump a RelaxNG structure back """
libxml2mod.xmlRelaxNGDump(output, self._o)
def relaxNGDumpTree(self, output):
"""Dump the transformed RelaxNG tree. """
libxml2mod.xmlRelaxNGDumpTree(output, self._o)
def relaxNGNewValidCtxt(self):
"""Create an XML RelaxNGs validation context based on the
given schema """
ret = libxml2mod.xmlRelaxNGNewValidCtxt(self._o)
if ret is None:raise treeError('xmlRelaxNGNewValidCtxt() failed')
__tmp = relaxNgValidCtxt(_obj=ret)
__tmp.schema = self
return __tmp
#
# relaxNgSchema functions from module xmlreader
#
def RelaxNGSetSchema(self, reader):
"""Use RelaxNG to validate the document as it is processed.
Activation is only possible before the first Read(). if
@schema is None, then RelaxNG validation is desactivated. @
The @schema should not be freed until the reader is
deallocated or its use has been deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderRelaxNGSetSchema(reader__o, self._o)
return ret
class relaxNgValidCtxt(relaxNgValidCtxtCore):
def __init__(self, _obj=None):
self.schema = None
self._o = _obj
relaxNgValidCtxtCore.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlRelaxNGFreeValidCtxt(self._o)
self._o = None
#
# relaxNgValidCtxt functions from module relaxng
#
def relaxNGValidateDoc(self, doc):
"""Validate a document tree in memory. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlRelaxNGValidateDoc(self._o, doc__o)
return ret
def relaxNGValidateFullElement(self, doc, elem):
"""Validate a full subtree when
xmlRelaxNGValidatePushElement() returned 0 and the content
of the node has been expanded. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidateFullElement(self._o, doc__o, elem__o)
return ret
def relaxNGValidatePopElement(self, doc, elem):
"""Pop the element end from the RelaxNG validation stack. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidatePopElement(self._o, doc__o, elem__o)
return ret
def relaxNGValidatePushCData(self, data, len):
"""check the CData parsed for validation in the current stack """
ret = libxml2mod.xmlRelaxNGValidatePushCData(self._o, data, len)
return ret
def relaxNGValidatePushElement(self, doc, elem):
"""Push a new element start on the RelaxNG validation stack. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidatePushElement(self._o, doc__o, elem__o)
return ret
#
# relaxNgValidCtxt functions from module xmlreader
#
def RelaxNGValidateCtxt(self, reader, options):
"""Use RelaxNG schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then RelaxNG schema validation is
deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderRelaxNGValidateCtxt(reader__o, self._o, options)
return ret
class SchemaParserCtxt:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlSchemaFreeParserCtxt(self._o)
self._o = None
#
# SchemaParserCtxt functions from module xmlschemas
#
def schemaParse(self):
"""parse a schema definition resource and build an internal
XML Shema struture which can be used to validate instances. """
ret = libxml2mod.xmlSchemaParse(self._o)
if ret is None:raise parserError('xmlSchemaParse() failed')
__tmp = Schema(_obj=ret)
return __tmp
class Schema:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlSchemaFree(self._o)
self._o = None
#
# Schema functions from module xmlreader
#
def SetSchema(self, reader):
"""Use XSD Schema to validate the document as it is processed.
Activation is only possible before the first Read(). if
@schema is None, then Schema validation is desactivated. @
The @schema should not be freed until the reader is
deallocated or its use has been deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderSetSchema(reader__o, self._o)
return ret
#
# Schema functions from module xmlschemas
#
def schemaDump(self, output):
"""Dump a Schema structure. """
libxml2mod.xmlSchemaDump(output, self._o)
def schemaNewValidCtxt(self):
"""Create an XML Schemas validation context based on the given
schema. """
ret = libxml2mod.xmlSchemaNewValidCtxt(self._o)
if ret is None:raise treeError('xmlSchemaNewValidCtxt() failed')
__tmp = SchemaValidCtxt(_obj=ret)
__tmp.schema = self
return __tmp
class SchemaValidCtxt(SchemaValidCtxtCore):
def __init__(self, _obj=None):
self.schema = None
self._o = _obj
SchemaValidCtxtCore.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlSchemaFreeValidCtxt(self._o)
self._o = None
#
# SchemaValidCtxt functions from module xmlreader
#
def SchemaValidateCtxt(self, reader, options):
"""Use W3C XSD schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then XML Schema validation is
deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderSchemaValidateCtxt(reader__o, self._o, options)
return ret
#
# SchemaValidCtxt functions from module xmlschemas
#
def schemaIsValid(self):
"""Check if any error was detected during validation. """
ret = libxml2mod.xmlSchemaIsValid(self._o)
return ret
def schemaSetValidOptions(self, options):
"""Sets the options to be used during the validation. """
ret = libxml2mod.xmlSchemaSetValidOptions(self._o, options)
return ret
def schemaValidCtxtGetOptions(self):
"""Get the validation context options. """
ret = libxml2mod.xmlSchemaValidCtxtGetOptions(self._o)
return ret
def schemaValidCtxtGetParserCtxt(self):
"""allow access to the parser context of the schema validation
context """
ret = libxml2mod.xmlSchemaValidCtxtGetParserCtxt(self._o)
if ret is None:raise parserError('xmlSchemaValidCtxtGetParserCtxt() failed')
__tmp = parserCtxt(_obj=ret)
return __tmp
def schemaValidateDoc(self, doc):
"""Validate a document tree in memory. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlSchemaValidateDoc(self._o, doc__o)
return ret
def schemaValidateFile(self, filename, options):
"""Do a schemas validation of the given resource, it will use
the SAX streamable validation internally. """
ret = libxml2mod.xmlSchemaValidateFile(self._o, filename, options)
return ret
def schemaValidateOneElement(self, elem):
"""Validate a branch of a tree, starting with the given @elem. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlSchemaValidateOneElement(self._o, elem__o)
return ret
def schemaValidateSetFilename(self, filename):
"""Workaround to provide file error reporting information when
this is not provided by current APIs """
libxml2mod.xmlSchemaValidateSetFilename(self._o, filename)
class xmlTextReaderLocator:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
#
# xmlTextReaderLocator functions from module xmlreader
#
def BaseURI(self):
"""Obtain the base URI for the given locator. """
ret = libxml2mod.xmlTextReaderLocatorBaseURI(self._o)
return ret
def LineNumber(self):
"""Obtain the line number for the given locator. """
ret = libxml2mod.xmlTextReaderLocatorLineNumber(self._o)
return ret
class xmlTextReader(xmlTextReaderCore):
def __init__(self, _obj=None):
self.input = None
self._o = _obj
xmlTextReaderCore.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeTextReader(self._o)
self._o = None
#
# xmlTextReader functions from module xmlreader
#
def AttributeCount(self):
"""Provides the number of attributes of the current node """
ret = libxml2mod.xmlTextReaderAttributeCount(self._o)
return ret
def BaseUri(self):
"""The base URI of the node. """
ret = libxml2mod.xmlTextReaderConstBaseUri(self._o)
return ret
def ByteConsumed(self):
"""This function provides the current index of the parser used
by the reader, relative to the start of the current entity.
This function actually just wraps a call to
xmlBytesConsumed() for the parser context associated with
the reader. See xmlBytesConsumed() for more information. """
ret = libxml2mod.xmlTextReaderByteConsumed(self._o)
return ret
def Close(self):
"""This method releases any resources allocated by the current
instance changes the state to Closed and close any
underlying input. """
ret = libxml2mod.xmlTextReaderClose(self._o)
return ret
def CurrentDoc(self):
"""Hacking interface allowing to get the xmlDocPtr
correponding to the current document being accessed by the
xmlTextReader. NOTE: as a result of this call, the reader
will not destroy the associated XML document and calling
xmlFreeDoc() on the result is needed once the reader
parsing has finished. """
ret = libxml2mod.xmlTextReaderCurrentDoc(self._o)
if ret is None:raise treeError('xmlTextReaderCurrentDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def CurrentNode(self):
"""Hacking interface allowing to get the xmlNodePtr
correponding to the current node being accessed by the
xmlTextReader. This is dangerous because the underlying
node may be destroyed on the next Reads. """
ret = libxml2mod.xmlTextReaderCurrentNode(self._o)
if ret is None:raise treeError('xmlTextReaderCurrentNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def Depth(self):
"""The depth of the node in the tree. """
ret = libxml2mod.xmlTextReaderDepth(self._o)
return ret
def Encoding(self):
"""Determine the encoding of the document being read. """
ret = libxml2mod.xmlTextReaderConstEncoding(self._o)
return ret
def Expand(self):
"""Reads the contents of the current node and the full
subtree. It then makes the subtree available until the next
xmlTextReaderRead() call """
ret = libxml2mod.xmlTextReaderExpand(self._o)
if ret is None:raise treeError('xmlTextReaderExpand() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def GetAttribute(self, name):
"""Provides the value of the attribute with the specified
qualified name. """
ret = libxml2mod.xmlTextReaderGetAttribute(self._o, name)
return ret
def GetAttributeNo(self, no):
"""Provides the value of the attribute with the specified
index relative to the containing element. """
ret = libxml2mod.xmlTextReaderGetAttributeNo(self._o, no)
return ret
def GetAttributeNs(self, localName, namespaceURI):
"""Provides the value of the specified attribute """
ret = libxml2mod.xmlTextReaderGetAttributeNs(self._o, localName, namespaceURI)
return ret
def GetParserColumnNumber(self):
"""Provide the column number of the current parsing point. """
ret = libxml2mod.xmlTextReaderGetParserColumnNumber(self._o)
return ret
def GetParserLineNumber(self):
"""Provide the line number of the current parsing point. """
ret = libxml2mod.xmlTextReaderGetParserLineNumber(self._o)
return ret
def GetParserProp(self, prop):
"""Read the parser internal property. """
ret = libxml2mod.xmlTextReaderGetParserProp(self._o, prop)
return ret
def GetRemainder(self):
"""Method to get the remainder of the buffered XML. this
method stops the parser, set its state to End Of File and
return the input stream with what is left that the parser
did not use. The implementation is not good, the parser
certainly procgressed past what's left in reader->input,
and there is an allocation problem. Best would be to
rewrite it differently. """
ret = libxml2mod.xmlTextReaderGetRemainder(self._o)
if ret is None:raise treeError('xmlTextReaderGetRemainder() failed')
__tmp = inputBuffer(_obj=ret)
return __tmp
def HasAttributes(self):
"""Whether the node has attributes. """
ret = libxml2mod.xmlTextReaderHasAttributes(self._o)
return ret
def HasValue(self):
"""Whether the node can have a text value. """
ret = libxml2mod.xmlTextReaderHasValue(self._o)
return ret
def IsDefault(self):
"""Whether an Attribute node was generated from the default
value defined in the DTD or schema. """
ret = libxml2mod.xmlTextReaderIsDefault(self._o)
return ret
def IsEmptyElement(self):
"""Check if the current node is empty """
ret = libxml2mod.xmlTextReaderIsEmptyElement(self._o)
return ret
def IsNamespaceDecl(self):
"""Determine whether the current node is a namespace
declaration rather than a regular attribute. """
ret = libxml2mod.xmlTextReaderIsNamespaceDecl(self._o)
return ret
def IsValid(self):
"""Retrieve the validity status from the parser context """
ret = libxml2mod.xmlTextReaderIsValid(self._o)
return ret
def LocalName(self):
"""The local name of the node. """
ret = libxml2mod.xmlTextReaderConstLocalName(self._o)
return ret
def LookupNamespace(self, prefix):
"""Resolves a namespace prefix in the scope of the current
element. """
ret = libxml2mod.xmlTextReaderLookupNamespace(self._o, prefix)
return ret
def MoveToAttribute(self, name):
"""Moves the position of the current instance to the attribute
with the specified qualified name. """
ret = libxml2mod.xmlTextReaderMoveToAttribute(self._o, name)
return ret
def MoveToAttributeNo(self, no):
"""Moves the position of the current instance to the attribute
with the specified index relative to the containing element. """
ret = libxml2mod.xmlTextReaderMoveToAttributeNo(self._o, no)
return ret
def MoveToAttributeNs(self, localName, namespaceURI):
"""Moves the position of the current instance to the attribute
with the specified local name and namespace URI. """
ret = libxml2mod.xmlTextReaderMoveToAttributeNs(self._o, localName, namespaceURI)
return ret
def MoveToElement(self):
"""Moves the position of the current instance to the node that
contains the current Attribute node. """
ret = libxml2mod.xmlTextReaderMoveToElement(self._o)
return ret
def MoveToFirstAttribute(self):
"""Moves the position of the current instance to the first
attribute associated with the current node. """
ret = libxml2mod.xmlTextReaderMoveToFirstAttribute(self._o)
return ret
def MoveToNextAttribute(self):
"""Moves the position of the current instance to the next
attribute associated with the current node. """
ret = libxml2mod.xmlTextReaderMoveToNextAttribute(self._o)
return ret
def Name(self):
"""The qualified name of the node, equal to Prefix :LocalName. """
ret = libxml2mod.xmlTextReaderConstName(self._o)
return ret
def NamespaceUri(self):
"""The URI defining the namespace associated with the node. """
ret = libxml2mod.xmlTextReaderConstNamespaceUri(self._o)
return ret
def NewDoc(self, cur, URL, encoding, options):
"""Setup an xmltextReader to parse an XML in-memory document.
The parsing flags @options are a combination of
xmlParserOption. This reuses the existing @reader
xmlTextReader. """
ret = libxml2mod.xmlReaderNewDoc(self._o, cur, URL, encoding, options)
return ret
def NewFd(self, fd, URL, encoding, options):
"""Setup an xmltextReader to parse an XML from a file
descriptor. NOTE that the file descriptor will not be
closed when the reader is closed or reset. The parsing
flags @options are a combination of xmlParserOption. This
reuses the existing @reader xmlTextReader. """
ret = libxml2mod.xmlReaderNewFd(self._o, fd, URL, encoding, options)
return ret
def NewFile(self, filename, encoding, options):
"""parse an XML file from the filesystem or the network. The
parsing flags @options are a combination of
xmlParserOption. This reuses the existing @reader
xmlTextReader. """
ret = libxml2mod.xmlReaderNewFile(self._o, filename, encoding, options)
return ret
def NewMemory(self, buffer, size, URL, encoding, options):
"""Setup an xmltextReader to parse an XML in-memory document.
The parsing flags @options are a combination of
xmlParserOption. This reuses the existing @reader
xmlTextReader. """
ret = libxml2mod.xmlReaderNewMemory(self._o, buffer, size, URL, encoding, options)
return ret
def NewWalker(self, doc):
"""Setup an xmltextReader to parse a preparsed XML document.
This reuses the existing @reader xmlTextReader. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlReaderNewWalker(self._o, doc__o)
return ret
def Next(self):
"""Skip to the node following the current one in document
order while avoiding the subtree if any. """
ret = libxml2mod.xmlTextReaderNext(self._o)
return ret
def NextSibling(self):
"""Skip to the node following the current one in document
order while avoiding the subtree if any. Currently
implemented only for Readers built on a document """
ret = libxml2mod.xmlTextReaderNextSibling(self._o)
return ret
def NodeType(self):
"""Get the node type of the current node Reference:
http://www.gnu.org/software/dotgnu/pnetlib-doc/System/Xml/Xm
lNodeType.html """
ret = libxml2mod.xmlTextReaderNodeType(self._o)
return ret
def Normalization(self):
"""The value indicating whether to normalize white space and
attribute values. Since attribute value and end of line
normalizations are a MUST in the XML specification only the
value true is accepted. The broken bahaviour of accepting
out of range character entities like � is of course not
supported either. """
ret = libxml2mod.xmlTextReaderNormalization(self._o)
return ret
def Prefix(self):
"""A shorthand reference to the namespace associated with the
node. """
ret = libxml2mod.xmlTextReaderConstPrefix(self._o)
return ret
def Preserve(self):
"""This tells the XML Reader to preserve the current node. The
caller must also use xmlTextReaderCurrentDoc() to keep an
handle on the resulting document once parsing has finished """
ret = libxml2mod.xmlTextReaderPreserve(self._o)
if ret is None:raise treeError('xmlTextReaderPreserve() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def QuoteChar(self):
"""The quotation mark character used to enclose the value of
an attribute. """
ret = libxml2mod.xmlTextReaderQuoteChar(self._o)
return ret
def Read(self):
"""Moves the position of the current instance to the next node
in the stream, exposing its properties. """
ret = libxml2mod.xmlTextReaderRead(self._o)
return ret
def ReadAttributeValue(self):
"""Parses an attribute value into one or more Text and
EntityReference nodes. """
ret = libxml2mod.xmlTextReaderReadAttributeValue(self._o)
return ret
def ReadInnerXml(self):
"""Reads the contents of the current node, including child
nodes and markup. """
ret = libxml2mod.xmlTextReaderReadInnerXml(self._o)
return ret
def ReadOuterXml(self):
"""Reads the contents of the current node, including child
nodes and markup. """
ret = libxml2mod.xmlTextReaderReadOuterXml(self._o)
return ret
def ReadState(self):
"""Gets the read state of the reader. """
ret = libxml2mod.xmlTextReaderReadState(self._o)
return ret
def ReadString(self):
"""Reads the contents of an element or a text node as a string. """
ret = libxml2mod.xmlTextReaderReadString(self._o)
return ret
def RelaxNGSetSchema(self, schema):
"""Use RelaxNG to validate the document as it is processed.
Activation is only possible before the first Read(). if
@schema is None, then RelaxNG validation is desactivated. @
The @schema should not be freed until the reader is
deallocated or its use has been deactivated. """
if schema is None: schema__o = None
else: schema__o = schema._o
ret = libxml2mod.xmlTextReaderRelaxNGSetSchema(self._o, schema__o)
return ret
def RelaxNGValidate(self, rng):
"""Use RelaxNG schema to validate the document as it is
processed. Activation is only possible before the first
Read(). If @rng is None, then RelaxNG schema validation is
deactivated. """
ret = libxml2mod.xmlTextReaderRelaxNGValidate(self._o, rng)
return ret
def RelaxNGValidateCtxt(self, ctxt, options):
"""Use RelaxNG schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then RelaxNG schema validation is
deactivated. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlTextReaderRelaxNGValidateCtxt(self._o, ctxt__o, options)
return ret
def SchemaValidate(self, xsd):
"""Use W3C XSD schema to validate the document as it is
processed. Activation is only possible before the first
Read(). If @xsd is None, then XML Schema validation is
deactivated. """
ret = libxml2mod.xmlTextReaderSchemaValidate(self._o, xsd)
return ret
def SchemaValidateCtxt(self, ctxt, options):
"""Use W3C XSD schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then XML Schema validation is
deactivated. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlTextReaderSchemaValidateCtxt(self._o, ctxt__o, options)
return ret
def SetParserProp(self, prop, value):
"""Change the parser processing behaviour by changing some of
its internal properties. Note that some properties can only
be changed before any read has been done. """
ret = libxml2mod.xmlTextReaderSetParserProp(self._o, prop, value)
return ret
def SetSchema(self, schema):
"""Use XSD Schema to validate the document as it is processed.
Activation is only possible before the first Read(). if
@schema is None, then Schema validation is desactivated. @
The @schema should not be freed until the reader is
deallocated or its use has been deactivated. """
if schema is None: schema__o = None
else: schema__o = schema._o
ret = libxml2mod.xmlTextReaderSetSchema(self._o, schema__o)
return ret
def Setup(self, input, URL, encoding, options):
"""Setup an XML reader with new options """
if input is None: input__o = None
else: input__o = input._o
ret = libxml2mod.xmlTextReaderSetup(self._o, input__o, URL, encoding, options)
return ret
def Standalone(self):
"""Determine the standalone status of the document being read. """
ret = libxml2mod.xmlTextReaderStandalone(self._o)
return ret
def String(self, str):
"""Get an interned string from the reader, allows for example
to speedup string name comparisons """
ret = libxml2mod.xmlTextReaderConstString(self._o, str)
return ret
def Value(self):
"""Provides the text value of the node if present """
ret = libxml2mod.xmlTextReaderConstValue(self._o)
return ret
def XmlLang(self):
"""The xml:lang scope within which the node resides. """
ret = libxml2mod.xmlTextReaderConstXmlLang(self._o)
return ret
def XmlVersion(self):
"""Determine the XML version of the document being read. """
ret = libxml2mod.xmlTextReaderConstXmlVersion(self._o)
return ret
class URI:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeURI(self._o)
self._o = None
# accessors for URI
def authority(self):
"""Get the authority part from an URI """
ret = libxml2mod.xmlURIGetAuthority(self._o)
return ret
def fragment(self):
"""Get the fragment part from an URI """
ret = libxml2mod.xmlURIGetFragment(self._o)
return ret
def opaque(self):
"""Get the opaque part from an URI """
ret = libxml2mod.xmlURIGetOpaque(self._o)
return ret
def path(self):
"""Get the path part from an URI """
ret = libxml2mod.xmlURIGetPath(self._o)
return ret
def port(self):
"""Get the port part from an URI """
ret = libxml2mod.xmlURIGetPort(self._o)
return ret
def query(self):
"""Get the query part from an URI """
ret = libxml2mod.xmlURIGetQuery(self._o)
return ret
def queryRaw(self):
"""Get the raw query part from an URI (i.e. the unescaped
form). """
ret = libxml2mod.xmlURIGetQueryRaw(self._o)
return ret
def scheme(self):
"""Get the scheme part from an URI """
ret = libxml2mod.xmlURIGetScheme(self._o)
return ret
def server(self):
"""Get the server part from an URI """
ret = libxml2mod.xmlURIGetServer(self._o)
return ret
def setAuthority(self, authority):
"""Set the authority part of an URI. """
libxml2mod.xmlURISetAuthority(self._o, authority)
def setFragment(self, fragment):
"""Set the fragment part of an URI. """
libxml2mod.xmlURISetFragment(self._o, fragment)
def setOpaque(self, opaque):
"""Set the opaque part of an URI. """
libxml2mod.xmlURISetOpaque(self._o, opaque)
def setPath(self, path):
"""Set the path part of an URI. """
libxml2mod.xmlURISetPath(self._o, path)
def setPort(self, port):
"""Set the port part of an URI. """
libxml2mod.xmlURISetPort(self._o, port)
def setQuery(self, query):
"""Set the query part of an URI. """
libxml2mod.xmlURISetQuery(self._o, query)
def setQueryRaw(self, query_raw):
"""Set the raw query part of an URI (i.e. the unescaped form). """
libxml2mod.xmlURISetQueryRaw(self._o, query_raw)
def setScheme(self, scheme):
"""Set the scheme part of an URI. """
libxml2mod.xmlURISetScheme(self._o, scheme)
def setServer(self, server):
"""Set the server part of an URI. """
libxml2mod.xmlURISetServer(self._o, server)
def setUser(self, user):
"""Set the user part of an URI. """
libxml2mod.xmlURISetUser(self._o, user)
def user(self):
"""Get the user part from an URI """
ret = libxml2mod.xmlURIGetUser(self._o)
return ret
#
# URI functions from module uri
#
def parseURIReference(self, str):
"""Parse an URI reference string based on RFC 3986 and fills
in the appropriate fields of the @uri structure
URI-reference = URI / relative-ref """
ret = libxml2mod.xmlParseURIReference(self._o, str)
return ret
def printURI(self, stream):
"""Prints the URI in the stream @stream. """
libxml2mod.xmlPrintURI(stream, self._o)
def saveUri(self):
"""Save the URI as an escaped string """
ret = libxml2mod.xmlSaveUri(self._o)
return ret
class ValidCtxt(ValidCtxtCore):
def __init__(self, _obj=None):
self._o = _obj
ValidCtxtCore.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeValidCtxt(self._o)
self._o = None
#
# ValidCtxt functions from module valid
#
def validCtxtNormalizeAttributeValue(self, doc, elem, name, value):
"""Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character.
Also check VC: Standalone Document Declaration in P32, and
update ctxt->valid accordingly """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidCtxtNormalizeAttributeValue(self._o, doc__o, elem__o, name, value)
return ret
def validateDocument(self, doc):
"""Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateDocument(self._o, doc__o)
return ret
def validateDocumentFinal(self, doc):
"""Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateDocumentFinal(self._o, doc__o)
return ret
def validateDtd(self, doc, dtd):
"""Try to validate the document against the dtd instance
Basically it does check all the definitions in the DtD.
Note the the internal subset (if present) is de-coupled
(i.e. not used), which could give problems if ID or IDREF
is present. """
if doc is None: doc__o = None
else: doc__o = doc._o
if dtd is None: dtd__o = None
else: dtd__o = dtd._o
ret = libxml2mod.xmlValidateDtd(self._o, doc__o, dtd__o)
return ret
def validateDtdFinal(self, doc):
"""Does the final step for the dtds validation once all the
subsets have been parsed basically it does the following
checks described by the XML Rec - check that ENTITY and
ENTITIES type attributes default or possible values matches
one of the defined entities. - check that NOTATION type
attributes default or possible values matches one of the
defined notations. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateDtdFinal(self._o, doc__o)
return ret
def validateElement(self, doc, elem):
"""Try to validate the subtree under an element """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateElement(self._o, doc__o, elem__o)
return ret
def validateNotationUse(self, doc, notationName):
"""Validate that the given name match a notation declaration.
- [ VC: Notation Declared ] """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateNotationUse(self._o, doc__o, notationName)
return ret
def validateOneAttribute(self, doc, elem, attr, value):
"""Try to validate a single attribute for an element basically
it does the following checks as described by the XML-1.0
recommendation: - [ VC: Attribute Value Type ] - [ VC:
Fixed Attribute Default ] - [ VC: Entity Name ] - [ VC:
Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC: Entity
Name ] - [ VC: Notation Attributes ] The ID/IDREF
uniqueness and matching are done separately """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlValidateOneAttribute(self._o, doc__o, elem__o, attr__o, value)
return ret
def validateOneElement(self, doc, elem):
"""Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
for each attribute present. The ID/IDREF checkings are
done separately """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateOneElement(self._o, doc__o, elem__o)
return ret
def validateOneNamespace(self, doc, elem, prefix, ns, value):
"""Try to validate a single namespace declaration for an
element basically it does the following checks as described
by the XML-1.0 recommendation: - [ VC: Attribute Value Type
] - [ VC: Fixed Attribute Default ] - [ VC: Entity Name ] -
[ VC: Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC:
Entity Name ] - [ VC: Notation Attributes ] The ID/IDREF
uniqueness and matching are done separately """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlValidateOneNamespace(self._o, doc__o, elem__o, prefix, ns__o, value)
return ret
def validatePopElement(self, doc, elem, qname):
"""Pop the element end from the validation stack. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidatePopElement(self._o, doc__o, elem__o, qname)
return ret
def validatePushCData(self, data, len):
"""check the CData parsed for validation in the current stack """
ret = libxml2mod.xmlValidatePushCData(self._o, data, len)
return ret
def validatePushElement(self, doc, elem, qname):
"""Push a new element start on the validation stack. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidatePushElement(self._o, doc__o, elem__o, qname)
return ret
def validateRoot(self, doc):
"""Try to validate a the root element basically it does the
following check as described by the XML-1.0 recommendation:
- [ VC: Root Element Type ] it doesn't try to recurse or
apply other check to the element """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateRoot(self._o, doc__o)
return ret
class xpathContext:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
# accessors for xpathContext
def contextDoc(self):
"""Get the doc from an xpathContext """
ret = libxml2mod.xmlXPathGetContextDoc(self._o)
if ret is None:raise xpathError('xmlXPathGetContextDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def contextNode(self):
"""Get the current node from an xpathContext """
ret = libxml2mod.xmlXPathGetContextNode(self._o)
if ret is None:raise xpathError('xmlXPathGetContextNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def contextPosition(self):
"""Get the current node from an xpathContext """
ret = libxml2mod.xmlXPathGetContextPosition(self._o)
return ret
def contextSize(self):
"""Get the current node from an xpathContext """
ret = libxml2mod.xmlXPathGetContextSize(self._o)
return ret
def function(self):
"""Get the current function name xpathContext """
ret = libxml2mod.xmlXPathGetFunction(self._o)
return ret
def functionURI(self):
"""Get the current function name URI xpathContext """
ret = libxml2mod.xmlXPathGetFunctionURI(self._o)
return ret
def setContextDoc(self, doc):
"""Set the doc of an xpathContext """
if doc is None: doc__o = None
else: doc__o = doc._o
libxml2mod.xmlXPathSetContextDoc(self._o, doc__o)
def setContextNode(self, node):
"""Set the current node of an xpathContext """
if node is None: node__o = None
else: node__o = node._o
libxml2mod.xmlXPathSetContextNode(self._o, node__o)
#
# xpathContext functions from module python
#
def registerXPathFunction(self, name, ns_uri, f):
"""Register a Python written function to the XPath interpreter """
ret = libxml2mod.xmlRegisterXPathFunction(self._o, name, ns_uri, f)
return ret
def xpathRegisterVariable(self, name, ns_uri, value):
"""Register a variable with the XPath context """
ret = libxml2mod.xmlXPathRegisterVariable(self._o, name, ns_uri, value)
return ret
#
# xpathContext functions from module xpath
#
def xpathContextSetCache(self, active, value, options):
"""Creates/frees an object cache on the XPath context. If
activates XPath objects (xmlXPathObject) will be cached
internally to be reused. @options: 0: This will set the
XPath object caching: @value: This will set the maximum
number of XPath objects to be cached per slot There are 5
slots for: node-set, string, number, boolean, and misc
objects. Use <0 for the default number (100). Other values
for @options have currently no effect. """
ret = libxml2mod.xmlXPathContextSetCache(self._o, active, value, options)
return ret
def xpathEval(self, str):
"""Evaluate the XPath Location Path in the given context. """
ret = libxml2mod.xmlXPathEval(str, self._o)
if ret is None:raise xpathError('xmlXPathEval() failed')
return xpathObjectRet(ret)
def xpathEvalExpression(self, str):
"""Evaluate the XPath expression in the given context. """
ret = libxml2mod.xmlXPathEvalExpression(str, self._o)
if ret is None:raise xpathError('xmlXPathEvalExpression() failed')
return xpathObjectRet(ret)
def xpathFreeContext(self):
"""Free up an xmlXPathContext """
libxml2mod.xmlXPathFreeContext(self._o)
#
# xpathContext functions from module xpathInternals
#
def xpathNewParserContext(self, str):
"""Create a new xmlXPathParserContext """
ret = libxml2mod.xmlXPathNewParserContext(str, self._o)
if ret is None:raise xpathError('xmlXPathNewParserContext() failed')
__tmp = xpathParserContext(_obj=ret)
return __tmp
def xpathNsLookup(self, prefix):
"""Search in the namespace declaration array of the context
for the given namespace name associated to the given prefix """
ret = libxml2mod.xmlXPathNsLookup(self._o, prefix)
return ret
def xpathRegisterAllFunctions(self):
"""Registers all default XPath functions in this context """
libxml2mod.xmlXPathRegisterAllFunctions(self._o)
def xpathRegisterNs(self, prefix, ns_uri):
"""Register a new namespace. If @ns_uri is None it unregisters
the namespace """
ret = libxml2mod.xmlXPathRegisterNs(self._o, prefix, ns_uri)
return ret
def xpathRegisteredFuncsCleanup(self):
"""Cleanup the XPath context data associated to registered
functions """
libxml2mod.xmlXPathRegisteredFuncsCleanup(self._o)
def xpathRegisteredNsCleanup(self):
"""Cleanup the XPath context data associated to registered
variables """
libxml2mod.xmlXPathRegisteredNsCleanup(self._o)
def xpathRegisteredVariablesCleanup(self):
"""Cleanup the XPath context data associated to registered
variables """
libxml2mod.xmlXPathRegisteredVariablesCleanup(self._o)
def xpathVariableLookup(self, name):
"""Search in the Variable array of the context for the given
variable value. """
ret = libxml2mod.xmlXPathVariableLookup(self._o, name)
if ret is None:raise xpathError('xmlXPathVariableLookup() failed')
return xpathObjectRet(ret)
def xpathVariableLookupNS(self, name, ns_uri):
"""Search in the Variable array of the context for the given
variable value. """
ret = libxml2mod.xmlXPathVariableLookupNS(self._o, name, ns_uri)
if ret is None:raise xpathError('xmlXPathVariableLookupNS() failed')
return xpathObjectRet(ret)
#
# xpathContext functions from module xpointer
#
def xpointerEval(self, str):
"""Evaluate the XPath Location Path in the given context. """
ret = libxml2mod.xmlXPtrEval(str, self._o)
if ret is None:raise treeError('xmlXPtrEval() failed')
return xpathObjectRet(ret)
class xpathParserContext:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
# accessors for xpathParserContext
def context(self):
"""Get the xpathContext from an xpathParserContext """
ret = libxml2mod.xmlXPathParserGetContext(self._o)
if ret is None:raise xpathError('xmlXPathParserGetContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp
#
# xpathParserContext functions from module xpathInternals
#
def xpathAddValues(self):
"""Implement the add operation on XPath objects: The numeric
operators convert their operands to numbers as if by
calling the number function. """
libxml2mod.xmlXPathAddValues(self._o)
def xpathBooleanFunction(self, nargs):
"""Implement the boolean() XPath function boolean
boolean(object) The boolean function converts its argument
to a boolean as follows: - a number is true if and only if
it is neither positive or negative zero nor NaN - a
node-set is true if and only if it is non-empty - a string
is true if and only if its length is non-zero """
libxml2mod.xmlXPathBooleanFunction(self._o, nargs)
def xpathCeilingFunction(self, nargs):
"""Implement the ceiling() XPath function number
ceiling(number) The ceiling function returns the smallest
(closest to negative infinity) number that is not less than
the argument and that is an integer. """
libxml2mod.xmlXPathCeilingFunction(self._o, nargs)
def xpathCompareValues(self, inf, strict):
"""Implement the compare operation on XPath objects: @arg1 <
@arg2 (1, 1, ... @arg1 <= @arg2 (1, 0, ... @arg1 >
@arg2 (0, 1, ... @arg1 >= @arg2 (0, 0, ... When
neither object to be compared is a node-set and the
operator is <=, <, >=, >, then the objects are compared by
converted both objects to numbers and comparing the numbers
according to IEEE 754. The < comparison will be true if and
only if the first number is less than the second number.
The <= comparison will be true if and only if the first
number is less than or equal to the second number. The >
comparison will be true if and only if the first number is
greater than the second number. The >= comparison will be
true if and only if the first number is greater than or
equal to the second number. """
ret = libxml2mod.xmlXPathCompareValues(self._o, inf, strict)
return ret
def xpathConcatFunction(self, nargs):
"""Implement the concat() XPath function string concat(string,
string, string*) The concat function returns the
concatenation of its arguments. """
libxml2mod.xmlXPathConcatFunction(self._o, nargs)
def xpathContainsFunction(self, nargs):
"""Implement the contains() XPath function boolean
contains(string, string) The contains function returns true
if the first argument string contains the second argument
string, and otherwise returns false. """
libxml2mod.xmlXPathContainsFunction(self._o, nargs)
def xpathCountFunction(self, nargs):
"""Implement the count() XPath function number count(node-set) """
libxml2mod.xmlXPathCountFunction(self._o, nargs)
def xpathDivValues(self):
"""Implement the div operation on XPath objects @arg1 / @arg2:
The numeric operators convert their operands to numbers as
if by calling the number function. """
libxml2mod.xmlXPathDivValues(self._o)
def xpathEqualValues(self):
"""Implement the equal operation on XPath objects content:
@arg1 == @arg2 """
ret = libxml2mod.xmlXPathEqualValues(self._o)
return ret
def xpathErr(self, error):
"""Handle an XPath error """
libxml2mod.xmlXPathErr(self._o, error)
def xpathEvalExpr(self):
"""Parse and evaluate an XPath expression in the given
context, then push the result on the context stack """
libxml2mod.xmlXPathEvalExpr(self._o)
def xpathFalseFunction(self, nargs):
"""Implement the false() XPath function boolean false() """
libxml2mod.xmlXPathFalseFunction(self._o, nargs)
def xpathFloorFunction(self, nargs):
"""Implement the floor() XPath function number floor(number)
The floor function returns the largest (closest to positive
infinity) number that is not greater than the argument and
that is an integer. """
libxml2mod.xmlXPathFloorFunction(self._o, nargs)
def xpathFreeParserContext(self):
"""Free up an xmlXPathParserContext """
libxml2mod.xmlXPathFreeParserContext(self._o)
def xpathIdFunction(self, nargs):
"""Implement the id() XPath function node-set id(object) The
id function selects elements by their unique ID (see [5.2.1
Unique IDs]). When the argument to id is of type node-set,
then the result is the union of the result of applying id
to the string value of each of the nodes in the argument
node-set. When the argument to id is of any other type, the
argument is converted to a string as if by a call to the
string function; the string is split into a
whitespace-separated list of tokens (whitespace is any
sequence of characters matching the production S); the
result is a node-set containing the elements in the same
document as the context node that have a unique ID equal to
any of the tokens in the list. """
libxml2mod.xmlXPathIdFunction(self._o, nargs)
def xpathLangFunction(self, nargs):
"""Implement the lang() XPath function boolean lang(string)
The lang function returns true or false depending on
whether the language of the context node as specified by
xml:lang attributes is the same as or is a sublanguage of
the language specified by the argument string. The language
of the context node is determined by the value of the
xml:lang attribute on the context node, or, if the context
node has no xml:lang attribute, by the value of the
xml:lang attribute on the nearest ancestor of the context
node that has an xml:lang attribute. If there is no such
attribute, then lang """
libxml2mod.xmlXPathLangFunction(self._o, nargs)
def xpathLastFunction(self, nargs):
"""Implement the last() XPath function number last() The last
function returns the number of nodes in the context node
list. """
libxml2mod.xmlXPathLastFunction(self._o, nargs)
def xpathLocalNameFunction(self, nargs):
"""Implement the local-name() XPath function string
local-name(node-set?) The local-name function returns a
string containing the local part of the name of the node in
the argument node-set that is first in document order. If
the node-set is empty or the first node has no name, an
empty string is returned. If the argument is omitted it
defaults to the context node. """
libxml2mod.xmlXPathLocalNameFunction(self._o, nargs)
def xpathModValues(self):
"""Implement the mod operation on XPath objects: @arg1 / @arg2
The numeric operators convert their operands to numbers as
if by calling the number function. """
libxml2mod.xmlXPathModValues(self._o)
def xpathMultValues(self):
"""Implement the multiply operation on XPath objects: The
numeric operators convert their operands to numbers as if
by calling the number function. """
libxml2mod.xmlXPathMultValues(self._o)
def xpathNamespaceURIFunction(self, nargs):
"""Implement the namespace-uri() XPath function string
namespace-uri(node-set?) The namespace-uri function returns
a string containing the namespace URI of the expanded name
of the node in the argument node-set that is first in
document order. If the node-set is empty, the first node
has no name, or the expanded name has no namespace URI, an
empty string is returned. If the argument is omitted it
defaults to the context node. """
libxml2mod.xmlXPathNamespaceURIFunction(self._o, nargs)
def xpathNextAncestor(self, cur):
"""Traversal function for the "ancestor" direction the
ancestor axis contains the ancestors of the context node;
the ancestors of the context node consist of the parent of
context node and the parent's parent and so on; the nodes
are ordered in reverse document order; thus the parent is
the first node on the axis, and the parent's parent is the
second node on the axis """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextAncestor(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextAncestor() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextAncestorOrSelf(self, cur):
"""Traversal function for the "ancestor-or-self" direction he
ancestor-or-self axis contains the context node and
ancestors of the context node in reverse document order;
thus the context node is the first node on the axis, and
the context node's parent the second; parent here is
defined the same as with the parent axis. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextAncestorOrSelf(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextAncestorOrSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextAttribute(self, cur):
"""Traversal function for the "attribute" direction TODO:
support DTD inherited default attributes """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextAttribute(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextAttribute() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextChild(self, cur):
"""Traversal function for the "child" direction The child axis
contains the children of the context node in document order. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextChild(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextDescendant(self, cur):
"""Traversal function for the "descendant" direction the
descendant axis contains the descendants of the context
node in document order; a descendant is a child or a child
of a child and so on. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextDescendant(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextDescendant() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextDescendantOrSelf(self, cur):
"""Traversal function for the "descendant-or-self" direction
the descendant-or-self axis contains the context node and
the descendants of the context node in document order; thus
the context node is the first node on the axis, and the
first child of the context node is the second node on the
axis """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextDescendantOrSelf(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextDescendantOrSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextFollowing(self, cur):
"""Traversal function for the "following" direction The
following axis contains all nodes in the same document as
the context node that are after the context node in
document order, excluding any descendants and excluding
attribute nodes and namespace nodes; the nodes are ordered
in document order """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextFollowing(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextFollowing() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextFollowingSibling(self, cur):
"""Traversal function for the "following-sibling" direction
The following-sibling axis contains the following siblings
of the context node in document order. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextFollowingSibling(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextFollowingSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextNamespace(self, cur):
"""Traversal function for the "namespace" direction the
namespace axis contains the namespace nodes of the context
node; the order of nodes on this axis is
implementation-defined; the axis will be empty unless the
context node is an element We keep the XML namespace node
at the end of the list. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextNamespace(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextNamespace() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextParent(self, cur):
"""Traversal function for the "parent" direction The parent
axis contains the parent of the context node, if there is
one. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextParent(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextParent() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextPreceding(self, cur):
"""Traversal function for the "preceding" direction the
preceding axis contains all nodes in the same document as
the context node that are before the context node in
document order, excluding any ancestors and excluding
attribute nodes and namespace nodes; the nodes are ordered
in reverse document order """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextPreceding(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextPreceding() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextPrecedingSibling(self, cur):
"""Traversal function for the "preceding-sibling" direction
The preceding-sibling axis contains the preceding siblings
of the context node in reverse document order; the first
preceding sibling is first on the axis; the sibling
preceding that node is the second on the axis and so on. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextPrecedingSibling(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextPrecedingSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextSelf(self, cur):
"""Traversal function for the "self" direction The self axis
contains just the context node itself """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextSelf(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNormalizeFunction(self, nargs):
"""Implement the normalize-space() XPath function string
normalize-space(string?) The normalize-space function
returns the argument string with white space normalized by
stripping leading and trailing whitespace and replacing
sequences of whitespace characters by a single space.
Whitespace characters are the same allowed by the S
production in XML. If the argument is omitted, it defaults
to the context node converted to a string, in other words
the value of the context node. """
libxml2mod.xmlXPathNormalizeFunction(self._o, nargs)
def xpathNotEqualValues(self):
"""Implement the equal operation on XPath objects content:
@arg1 == @arg2 """
ret = libxml2mod.xmlXPathNotEqualValues(self._o)
return ret
def xpathNotFunction(self, nargs):
"""Implement the not() XPath function boolean not(boolean) The
not function returns true if its argument is false, and
false otherwise. """
libxml2mod.xmlXPathNotFunction(self._o, nargs)
def xpathNumberFunction(self, nargs):
"""Implement the number() XPath function number number(object?) """
libxml2mod.xmlXPathNumberFunction(self._o, nargs)
def xpathParseNCName(self):
"""parse an XML namespace non qualified name. [NS 3] NCName
::= (Letter | '_') (NCNameChar)* [NS 4] NCNameChar ::=
Letter | Digit | '.' | '-' | '_' | CombiningChar | Extender """
ret = libxml2mod.xmlXPathParseNCName(self._o)
return ret
def xpathParseName(self):
"""parse an XML name [4] NameChar ::= Letter | Digit | '.' |
'-' | '_' | ':' | CombiningChar | Extender [5] Name ::=
(Letter | '_' | ':') (NameChar)* """
ret = libxml2mod.xmlXPathParseName(self._o)
return ret
def xpathPopBoolean(self):
"""Pops a boolean from the stack, handling conversion if
needed. Check error with #xmlXPathCheckError. """
ret = libxml2mod.xmlXPathPopBoolean(self._o)
return ret
def xpathPopNumber(self):
"""Pops a number from the stack, handling conversion if
needed. Check error with #xmlXPathCheckError. """
ret = libxml2mod.xmlXPathPopNumber(self._o)
return ret
def xpathPopString(self):
"""Pops a string from the stack, handling conversion if
needed. Check error with #xmlXPathCheckError. """
ret = libxml2mod.xmlXPathPopString(self._o)
return ret
def xpathPositionFunction(self, nargs):
"""Implement the position() XPath function number position()
The position function returns the position of the context
node in the context node list. The first position is 1, and
so the last position will be equal to last(). """
libxml2mod.xmlXPathPositionFunction(self._o, nargs)
def xpathRoot(self):
"""Initialize the context to the root of the document """
libxml2mod.xmlXPathRoot(self._o)
def xpathRoundFunction(self, nargs):
"""Implement the round() XPath function number round(number)
The round function returns the number that is closest to
the argument and that is an integer. If there are two such
numbers, then the one that is even is returned. """
libxml2mod.xmlXPathRoundFunction(self._o, nargs)
def xpathStartsWithFunction(self, nargs):
"""Implement the starts-with() XPath function boolean
starts-with(string, string) The starts-with function
returns true if the first argument string starts with the
second argument string, and otherwise returns false. """
libxml2mod.xmlXPathStartsWithFunction(self._o, nargs)
def xpathStringFunction(self, nargs):
"""Implement the string() XPath function string
string(object?) The string function converts an object to a
string as follows: - A node-set is converted to a string by
returning the value of the node in the node-set that is
first in document order. If the node-set is empty, an empty
string is returned. - A number is converted to a string as
follows + NaN is converted to the string NaN + positive
zero is converted to the string 0 + negative zero is
converted to the string 0 + positive infinity is converted
to the string Infinity + negative infinity is converted to
the string -Infinity + if the number is an integer, the
number is represented in decimal form as a Number with no
decimal point and no leading zeros, preceded by a minus
sign (-) if the number is negative + otherwise, the number
is represented in decimal form as a Number including a
decimal point with at least one digit before the decimal
point and at least one digit after the decimal point,
preceded by a minus sign (-) if the number is negative;
there must be no leading zeros before the decimal point
apart possibly from the one required digit immediately
before the decimal point; beyond the one required digit
after the decimal point there must be as many, but only as
many, more digits as are needed to uniquely distinguish the
number from all other IEEE 754 numeric values. - The
boolean false value is converted to the string false. The
boolean true value is converted to the string true. If the
argument is omitted, it defaults to a node-set with the
context node as its only member. """
libxml2mod.xmlXPathStringFunction(self._o, nargs)
def xpathStringLengthFunction(self, nargs):
"""Implement the string-length() XPath function number
string-length(string?) The string-length returns the number
of characters in the string (see [3.6 Strings]). If the
argument is omitted, it defaults to the context node
converted to a string, in other words the value of the
context node. """
libxml2mod.xmlXPathStringLengthFunction(self._o, nargs)
def xpathSubValues(self):
"""Implement the subtraction operation on XPath objects: The
numeric operators convert their operands to numbers as if
by calling the number function. """
libxml2mod.xmlXPathSubValues(self._o)
def xpathSubstringAfterFunction(self, nargs):
"""Implement the substring-after() XPath function string
substring-after(string, string) The substring-after
function returns the substring of the first argument string
that follows the first occurrence of the second argument
string in the first argument string, or the empty stringi
if the first argument string does not contain the second
argument string. For example,
substring-after("1999/04/01","/") returns 04/01, and
substring-after("1999/04/01","19") returns 99/04/01. """
libxml2mod.xmlXPathSubstringAfterFunction(self._o, nargs)
def xpathSubstringBeforeFunction(self, nargs):
"""Implement the substring-before() XPath function string
substring-before(string, string) The substring-before
function returns the substring of the first argument string
that precedes the first occurrence of the second argument
string in the first argument string, or the empty string if
the first argument string does not contain the second
argument string. For example,
substring-before("1999/04/01","/") returns 1999. """
libxml2mod.xmlXPathSubstringBeforeFunction(self._o, nargs)
def xpathSubstringFunction(self, nargs):
"""Implement the substring() XPath function string
substring(string, number, number?) The substring function
returns the substring of the first argument starting at the
position specified in the second argument with length
specified in the third argument. For example,
substring("12345",2,3) returns "234". If the third argument
is not specified, it returns the substring starting at the
position specified in the second argument and continuing to
the end of the string. For example, substring("12345",2)
returns "2345". More precisely, each character in the
string (see [3.6 Strings]) is considered to have a numeric
position: the position of the first character is 1, the
position of the second character is 2 and so on. The
returned substring contains those characters for which the
position of the character is greater than or equal to the
second argument and, if the third argument is specified,
less than the sum of the second and third arguments; the
comparisons and addition used for the above follow the
standard IEEE 754 rules. Thus: - substring("12345", 1.5,
2.6) returns "234" - substring("12345", 0, 3) returns "12"
- substring("12345", 0 div 0, 3) returns "" -
substring("12345", 1, 0 div 0) returns "" -
substring("12345", -42, 1 div 0) returns "12345" -
substring("12345", -1 div 0, 1 div 0) returns "" """
libxml2mod.xmlXPathSubstringFunction(self._o, nargs)
def xpathSumFunction(self, nargs):
"""Implement the sum() XPath function number sum(node-set) The
sum function returns the sum of the values of the nodes in
the argument node-set. """
libxml2mod.xmlXPathSumFunction(self._o, nargs)
def xpathTranslateFunction(self, nargs):
"""Implement the translate() XPath function string
translate(string, string, string) The translate function
returns the first argument string with occurrences of
characters in the second argument string replaced by the
character at the corresponding position in the third
argument string. For example, translate("bar","abc","ABC")
returns the string BAr. If there is a character in the
second argument string with no character at a corresponding
position in the third argument string (because the second
argument string is longer than the third argument string),
then occurrences of that character in the first argument
string are removed. For example,
translate("--aaa--","abc-","ABC") """
libxml2mod.xmlXPathTranslateFunction(self._o, nargs)
def xpathTrueFunction(self, nargs):
"""Implement the true() XPath function boolean true() """
libxml2mod.xmlXPathTrueFunction(self._o, nargs)
def xpathValueFlipSign(self):
"""Implement the unary - operation on an XPath object The
numeric operators convert their operands to numbers as if
by calling the number function. """
libxml2mod.xmlXPathValueFlipSign(self._o)
def xpatherror(self, file, line, no):
"""Formats an error message. """
libxml2mod.xmlXPatherror(self._o, file, line, no)
#
# xpathParserContext functions from module xpointer
#
def xpointerEvalRangePredicate(self):
"""[8] Predicate ::= '[' PredicateExpr ']' [9]
PredicateExpr ::= Expr Evaluate a predicate as in
xmlXPathEvalPredicate() but for a Location Set instead of a
node set """
libxml2mod.xmlXPtrEvalRangePredicate(self._o)
def xpointerRangeToFunction(self, nargs):
"""Implement the range-to() XPointer function """
libxml2mod.xmlXPtrRangeToFunction(self._o, nargs)
# xlinkShow
XLINK_SHOW_NONE = 0
XLINK_SHOW_NEW = 1
XLINK_SHOW_EMBED = 2
XLINK_SHOW_REPLACE = 3
# xmlRelaxNGParserFlag
XML_RELAXNGP_NONE = 0
XML_RELAXNGP_FREE_DOC = 1
XML_RELAXNGP_CRNG = 2
# xmlBufferAllocationScheme
XML_BUFFER_ALLOC_DOUBLEIT = 1
XML_BUFFER_ALLOC_EXACT = 2
XML_BUFFER_ALLOC_IMMUTABLE = 3
XML_BUFFER_ALLOC_IO = 4
XML_BUFFER_ALLOC_HYBRID = 5
# xmlParserSeverities
XML_PARSER_SEVERITY_VALIDITY_WARNING = 1
XML_PARSER_SEVERITY_VALIDITY_ERROR = 2
XML_PARSER_SEVERITY_WARNING = 3
XML_PARSER_SEVERITY_ERROR = 4
# xmlAttributeDefault
XML_ATTRIBUTE_NONE = 1
XML_ATTRIBUTE_REQUIRED = 2
XML_ATTRIBUTE_IMPLIED = 3
XML_ATTRIBUTE_FIXED = 4
# xmlSchemaValType
XML_SCHEMAS_UNKNOWN = 0
XML_SCHEMAS_STRING = 1
XML_SCHEMAS_NORMSTRING = 2
XML_SCHEMAS_DECIMAL = 3
XML_SCHEMAS_TIME = 4
XML_SCHEMAS_GDAY = 5
XML_SCHEMAS_GMONTH = 6
XML_SCHEMAS_GMONTHDAY = 7
XML_SCHEMAS_GYEAR = 8
XML_SCHEMAS_GYEARMONTH = 9
XML_SCHEMAS_DATE = 10
XML_SCHEMAS_DATETIME = 11
XML_SCHEMAS_DURATION = 12
XML_SCHEMAS_FLOAT = 13
XML_SCHEMAS_DOUBLE = 14
XML_SCHEMAS_BOOLEAN = 15
XML_SCHEMAS_TOKEN = 16
XML_SCHEMAS_LANGUAGE = 17
XML_SCHEMAS_NMTOKEN = 18
XML_SCHEMAS_NMTOKENS = 19
XML_SCHEMAS_NAME = 20
XML_SCHEMAS_QNAME = 21
XML_SCHEMAS_NCNAME = 22
XML_SCHEMAS_ID = 23
XML_SCHEMAS_IDREF = 24
XML_SCHEMAS_IDREFS = 25
XML_SCHEMAS_ENTITY = 26
XML_SCHEMAS_ENTITIES = 27
XML_SCHEMAS_NOTATION = 28
XML_SCHEMAS_ANYURI = 29
XML_SCHEMAS_INTEGER = 30
XML_SCHEMAS_NPINTEGER = 31
XML_SCHEMAS_NINTEGER = 32
XML_SCHEMAS_NNINTEGER = 33
XML_SCHEMAS_PINTEGER = 34
XML_SCHEMAS_INT = 35
XML_SCHEMAS_UINT = 36
XML_SCHEMAS_LONG = 37
XML_SCHEMAS_ULONG = 38
XML_SCHEMAS_SHORT = 39
XML_SCHEMAS_USHORT = 40
XML_SCHEMAS_BYTE = 41
XML_SCHEMAS_UBYTE = 42
XML_SCHEMAS_HEXBINARY = 43
XML_SCHEMAS_BASE64BINARY = 44
XML_SCHEMAS_ANYTYPE = 45
XML_SCHEMAS_ANYSIMPLETYPE = 46
# xmlParserInputState
XML_PARSER_EOF = -1
XML_PARSER_START = 0
XML_PARSER_MISC = 1
XML_PARSER_PI = 2
XML_PARSER_DTD = 3
XML_PARSER_PROLOG = 4
XML_PARSER_COMMENT = 5
XML_PARSER_START_TAG = 6
XML_PARSER_CONTENT = 7
XML_PARSER_CDATA_SECTION = 8
XML_PARSER_END_TAG = 9
XML_PARSER_ENTITY_DECL = 10
XML_PARSER_ENTITY_VALUE = 11
XML_PARSER_ATTRIBUTE_VALUE = 12
XML_PARSER_SYSTEM_LITERAL = 13
XML_PARSER_EPILOG = 14
XML_PARSER_IGNORE = 15
XML_PARSER_PUBLIC_LITERAL = 16
# xmlEntityType
XML_INTERNAL_GENERAL_ENTITY = 1
XML_EXTERNAL_GENERAL_PARSED_ENTITY = 2
XML_EXTERNAL_GENERAL_UNPARSED_ENTITY = 3
XML_INTERNAL_PARAMETER_ENTITY = 4
XML_EXTERNAL_PARAMETER_ENTITY = 5
XML_INTERNAL_PREDEFINED_ENTITY = 6
# xmlSaveOption
XML_SAVE_FORMAT = 1
XML_SAVE_NO_DECL = 2
XML_SAVE_NO_EMPTY = 4
XML_SAVE_NO_XHTML = 8
XML_SAVE_XHTML = 16
XML_SAVE_AS_XML = 32
XML_SAVE_AS_HTML = 64
XML_SAVE_WSNONSIG = 128
# xmlPatternFlags
XML_PATTERN_DEFAULT = 0
XML_PATTERN_XPATH = 1
XML_PATTERN_XSSEL = 2
XML_PATTERN_XSFIELD = 4
# xmlParserErrors
XML_ERR_OK = 0
XML_ERR_INTERNAL_ERROR = 1
XML_ERR_NO_MEMORY = 2
XML_ERR_DOCUMENT_START = 3
XML_ERR_DOCUMENT_EMPTY = 4
XML_ERR_DOCUMENT_END = 5
XML_ERR_INVALID_HEX_CHARREF = 6
XML_ERR_INVALID_DEC_CHARREF = 7
XML_ERR_INVALID_CHARREF = 8
XML_ERR_INVALID_CHAR = 9
XML_ERR_CHARREF_AT_EOF = 10
XML_ERR_CHARREF_IN_PROLOG = 11
XML_ERR_CHARREF_IN_EPILOG = 12
XML_ERR_CHARREF_IN_DTD = 13
XML_ERR_ENTITYREF_AT_EOF = 14
XML_ERR_ENTITYREF_IN_PROLOG = 15
XML_ERR_ENTITYREF_IN_EPILOG = 16
XML_ERR_ENTITYREF_IN_DTD = 17
XML_ERR_PEREF_AT_EOF = 18
XML_ERR_PEREF_IN_PROLOG = 19
XML_ERR_PEREF_IN_EPILOG = 20
XML_ERR_PEREF_IN_INT_SUBSET = 21
XML_ERR_ENTITYREF_NO_NAME = 22
XML_ERR_ENTITYREF_SEMICOL_MISSING = 23
XML_ERR_PEREF_NO_NAME = 24
XML_ERR_PEREF_SEMICOL_MISSING = 25
XML_ERR_UNDECLARED_ENTITY = 26
XML_WAR_UNDECLARED_ENTITY = 27
XML_ERR_UNPARSED_ENTITY = 28
XML_ERR_ENTITY_IS_EXTERNAL = 29
XML_ERR_ENTITY_IS_PARAMETER = 30
XML_ERR_UNKNOWN_ENCODING = 31
XML_ERR_UNSUPPORTED_ENCODING = 32
XML_ERR_STRING_NOT_STARTED = 33
XML_ERR_STRING_NOT_CLOSED = 34
XML_ERR_NS_DECL_ERROR = 35
XML_ERR_ENTITY_NOT_STARTED = 36
XML_ERR_ENTITY_NOT_FINISHED = 37
XML_ERR_LT_IN_ATTRIBUTE = 38
XML_ERR_ATTRIBUTE_NOT_STARTED = 39
XML_ERR_ATTRIBUTE_NOT_FINISHED = 40
XML_ERR_ATTRIBUTE_WITHOUT_VALUE = 41
XML_ERR_ATTRIBUTE_REDEFINED = 42
XML_ERR_LITERAL_NOT_STARTED = 43
XML_ERR_LITERAL_NOT_FINISHED = 44
XML_ERR_COMMENT_NOT_FINISHED = 45
XML_ERR_PI_NOT_STARTED = 46
XML_ERR_PI_NOT_FINISHED = 47
XML_ERR_NOTATION_NOT_STARTED = 48
XML_ERR_NOTATION_NOT_FINISHED = 49
XML_ERR_ATTLIST_NOT_STARTED = 50
XML_ERR_ATTLIST_NOT_FINISHED = 51
XML_ERR_MIXED_NOT_STARTED = 52
XML_ERR_MIXED_NOT_FINISHED = 53
XML_ERR_ELEMCONTENT_NOT_STARTED = 54
XML_ERR_ELEMCONTENT_NOT_FINISHED = 55
XML_ERR_XMLDECL_NOT_STARTED = 56
XML_ERR_XMLDECL_NOT_FINISHED = 57
XML_ERR_CONDSEC_NOT_STARTED = 58
XML_ERR_CONDSEC_NOT_FINISHED = 59
XML_ERR_EXT_SUBSET_NOT_FINISHED = 60
XML_ERR_DOCTYPE_NOT_FINISHED = 61
XML_ERR_MISPLACED_CDATA_END = 62
XML_ERR_CDATA_NOT_FINISHED = 63
XML_ERR_RESERVED_XML_NAME = 64
XML_ERR_SPACE_REQUIRED = 65
XML_ERR_SEPARATOR_REQUIRED = 66
XML_ERR_NMTOKEN_REQUIRED = 67
XML_ERR_NAME_REQUIRED = 68
XML_ERR_PCDATA_REQUIRED = 69
XML_ERR_URI_REQUIRED = 70
XML_ERR_PUBID_REQUIRED = 71
XML_ERR_LT_REQUIRED = 72
XML_ERR_GT_REQUIRED = 73
XML_ERR_LTSLASH_REQUIRED = 74
XML_ERR_EQUAL_REQUIRED = 75
XML_ERR_TAG_NAME_MISMATCH = 76
XML_ERR_TAG_NOT_FINISHED = 77
XML_ERR_STANDALONE_VALUE = 78
XML_ERR_ENCODING_NAME = 79
XML_ERR_HYPHEN_IN_COMMENT = 80
XML_ERR_INVALID_ENCODING = 81
XML_ERR_EXT_ENTITY_STANDALONE = 82
XML_ERR_CONDSEC_INVALID = 83
XML_ERR_VALUE_REQUIRED = 84
XML_ERR_NOT_WELL_BALANCED = 85
XML_ERR_EXTRA_CONTENT = 86
XML_ERR_ENTITY_CHAR_ERROR = 87
XML_ERR_ENTITY_PE_INTERNAL = 88
XML_ERR_ENTITY_LOOP = 89
XML_ERR_ENTITY_BOUNDARY = 90
XML_ERR_INVALID_URI = 91
XML_ERR_URI_FRAGMENT = 92
XML_WAR_CATALOG_PI = 93
XML_ERR_NO_DTD = 94
XML_ERR_CONDSEC_INVALID_KEYWORD = 95
XML_ERR_VERSION_MISSING = 96
XML_WAR_UNKNOWN_VERSION = 97
XML_WAR_LANG_VALUE = 98
XML_WAR_NS_URI = 99
XML_WAR_NS_URI_RELATIVE = 100
XML_ERR_MISSING_ENCODING = 101
XML_WAR_SPACE_VALUE = 102
XML_ERR_NOT_STANDALONE = 103
XML_ERR_ENTITY_PROCESSING = 104
XML_ERR_NOTATION_PROCESSING = 105
XML_WAR_NS_COLUMN = 106
XML_WAR_ENTITY_REDEFINED = 107
XML_ERR_UNKNOWN_VERSION = 108
XML_ERR_VERSION_MISMATCH = 109
XML_ERR_NAME_TOO_LONG = 110
XML_ERR_USER_STOP = 111
XML_NS_ERR_XML_NAMESPACE = 200
XML_NS_ERR_UNDEFINED_NAMESPACE = 201
XML_NS_ERR_QNAME = 202
XML_NS_ERR_ATTRIBUTE_REDEFINED = 203
XML_NS_ERR_EMPTY = 204
XML_NS_ERR_COLON = 205
XML_DTD_ATTRIBUTE_DEFAULT = 500
XML_DTD_ATTRIBUTE_REDEFINED = 501
XML_DTD_ATTRIBUTE_VALUE = 502
XML_DTD_CONTENT_ERROR = 503
XML_DTD_CONTENT_MODEL = 504
XML_DTD_CONTENT_NOT_DETERMINIST = 505
XML_DTD_DIFFERENT_PREFIX = 506
XML_DTD_ELEM_DEFAULT_NAMESPACE = 507
XML_DTD_ELEM_NAMESPACE = 508
XML_DTD_ELEM_REDEFINED = 509
XML_DTD_EMPTY_NOTATION = 510
XML_DTD_ENTITY_TYPE = 511
XML_DTD_ID_FIXED = 512
XML_DTD_ID_REDEFINED = 513
XML_DTD_ID_SUBSET = 514
XML_DTD_INVALID_CHILD = 515
XML_DTD_INVALID_DEFAULT = 516
XML_DTD_LOAD_ERROR = 517
XML_DTD_MISSING_ATTRIBUTE = 518
XML_DTD_MIXED_CORRUPT = 519
XML_DTD_MULTIPLE_ID = 520
XML_DTD_NO_DOC = 521
XML_DTD_NO_DTD = 522
XML_DTD_NO_ELEM_NAME = 523
XML_DTD_NO_PREFIX = 524
XML_DTD_NO_ROOT = 525
XML_DTD_NOTATION_REDEFINED = 526
XML_DTD_NOTATION_VALUE = 527
XML_DTD_NOT_EMPTY = 528
XML_DTD_NOT_PCDATA = 529
XML_DTD_NOT_STANDALONE = 530
XML_DTD_ROOT_NAME = 531
XML_DTD_STANDALONE_WHITE_SPACE = 532
XML_DTD_UNKNOWN_ATTRIBUTE = 533
XML_DTD_UNKNOWN_ELEM = 534
XML_DTD_UNKNOWN_ENTITY = 535
XML_DTD_UNKNOWN_ID = 536
XML_DTD_UNKNOWN_NOTATION = 537
XML_DTD_STANDALONE_DEFAULTED = 538
XML_DTD_XMLID_VALUE = 539
XML_DTD_XMLID_TYPE = 540
XML_DTD_DUP_TOKEN = 541
XML_HTML_STRUCURE_ERROR = 800
XML_HTML_UNKNOWN_TAG = 801
XML_RNGP_ANYNAME_ATTR_ANCESTOR = 1000
XML_RNGP_ATTR_CONFLICT = 1001
XML_RNGP_ATTRIBUTE_CHILDREN = 1002
XML_RNGP_ATTRIBUTE_CONTENT = 1003
XML_RNGP_ATTRIBUTE_EMPTY = 1004
XML_RNGP_ATTRIBUTE_NOOP = 1005
XML_RNGP_CHOICE_CONTENT = 1006
XML_RNGP_CHOICE_EMPTY = 1007
XML_RNGP_CREATE_FAILURE = 1008
XML_RNGP_DATA_CONTENT = 1009
XML_RNGP_DEF_CHOICE_AND_INTERLEAVE = 1010
XML_RNGP_DEFINE_CREATE_FAILED = 1011
XML_RNGP_DEFINE_EMPTY = 1012
XML_RNGP_DEFINE_MISSING = 1013
XML_RNGP_DEFINE_NAME_MISSING = 1014
XML_RNGP_ELEM_CONTENT_EMPTY = 1015
XML_RNGP_ELEM_CONTENT_ERROR = 1016
XML_RNGP_ELEMENT_EMPTY = 1017
XML_RNGP_ELEMENT_CONTENT = 1018
XML_RNGP_ELEMENT_NAME = 1019
XML_RNGP_ELEMENT_NO_CONTENT = 1020
XML_RNGP_ELEM_TEXT_CONFLICT = 1021
XML_RNGP_EMPTY = 1022
XML_RNGP_EMPTY_CONSTRUCT = 1023
XML_RNGP_EMPTY_CONTENT = 1024
XML_RNGP_EMPTY_NOT_EMPTY = 1025
XML_RNGP_ERROR_TYPE_LIB = 1026
XML_RNGP_EXCEPT_EMPTY = 1027
XML_RNGP_EXCEPT_MISSING = 1028
XML_RNGP_EXCEPT_MULTIPLE = 1029
XML_RNGP_EXCEPT_NO_CONTENT = 1030
XML_RNGP_EXTERNALREF_EMTPY = 1031
XML_RNGP_EXTERNAL_REF_FAILURE = 1032
XML_RNGP_EXTERNALREF_RECURSE = 1033
XML_RNGP_FORBIDDEN_ATTRIBUTE = 1034
XML_RNGP_FOREIGN_ELEMENT = 1035
XML_RNGP_GRAMMAR_CONTENT = 1036
XML_RNGP_GRAMMAR_EMPTY = 1037
XML_RNGP_GRAMMAR_MISSING = 1038
XML_RNGP_GRAMMAR_NO_START = 1039
XML_RNGP_GROUP_ATTR_CONFLICT = 1040
XML_RNGP_HREF_ERROR = 1041
XML_RNGP_INCLUDE_EMPTY = 1042
XML_RNGP_INCLUDE_FAILURE = 1043
XML_RNGP_INCLUDE_RECURSE = 1044
XML_RNGP_INTERLEAVE_ADD = 1045
XML_RNGP_INTERLEAVE_CREATE_FAILED = 1046
XML_RNGP_INTERLEAVE_EMPTY = 1047
XML_RNGP_INTERLEAVE_NO_CONTENT = 1048
XML_RNGP_INVALID_DEFINE_NAME = 1049
XML_RNGP_INVALID_URI = 1050
XML_RNGP_INVALID_VALUE = 1051
XML_RNGP_MISSING_HREF = 1052
XML_RNGP_NAME_MISSING = 1053
XML_RNGP_NEED_COMBINE = 1054
XML_RNGP_NOTALLOWED_NOT_EMPTY = 1055
XML_RNGP_NSNAME_ATTR_ANCESTOR = 1056
XML_RNGP_NSNAME_NO_NS = 1057
XML_RNGP_PARAM_FORBIDDEN = 1058
XML_RNGP_PARAM_NAME_MISSING = 1059
XML_RNGP_PARENTREF_CREATE_FAILED = 1060
XML_RNGP_PARENTREF_NAME_INVALID = 1061
XML_RNGP_PARENTREF_NO_NAME = 1062
XML_RNGP_PARENTREF_NO_PARENT = 1063
XML_RNGP_PARENTREF_NOT_EMPTY = 1064
XML_RNGP_PARSE_ERROR = 1065
XML_RNGP_PAT_ANYNAME_EXCEPT_ANYNAME = 1066
XML_RNGP_PAT_ATTR_ATTR = 1067
XML_RNGP_PAT_ATTR_ELEM = 1068
XML_RNGP_PAT_DATA_EXCEPT_ATTR = 1069
XML_RNGP_PAT_DATA_EXCEPT_ELEM = 1070
XML_RNGP_PAT_DATA_EXCEPT_EMPTY = 1071
XML_RNGP_PAT_DATA_EXCEPT_GROUP = 1072
XML_RNGP_PAT_DATA_EXCEPT_INTERLEAVE = 1073
XML_RNGP_PAT_DATA_EXCEPT_LIST = 1074
XML_RNGP_PAT_DATA_EXCEPT_ONEMORE = 1075
XML_RNGP_PAT_DATA_EXCEPT_REF = 1076
XML_RNGP_PAT_DATA_EXCEPT_TEXT = 1077
XML_RNGP_PAT_LIST_ATTR = 1078
XML_RNGP_PAT_LIST_ELEM = 1079
XML_RNGP_PAT_LIST_INTERLEAVE = 1080
XML_RNGP_PAT_LIST_LIST = 1081
XML_RNGP_PAT_LIST_REF = 1082
XML_RNGP_PAT_LIST_TEXT = 1083
XML_RNGP_PAT_NSNAME_EXCEPT_ANYNAME = 1084
XML_RNGP_PAT_NSNAME_EXCEPT_NSNAME = 1085
XML_RNGP_PAT_ONEMORE_GROUP_ATTR = 1086
XML_RNGP_PAT_ONEMORE_INTERLEAVE_ATTR = 1087
XML_RNGP_PAT_START_ATTR = 1088
XML_RNGP_PAT_START_DATA = 1089
XML_RNGP_PAT_START_EMPTY = 1090
XML_RNGP_PAT_START_GROUP = 1091
XML_RNGP_PAT_START_INTERLEAVE = 1092
XML_RNGP_PAT_START_LIST = 1093
XML_RNGP_PAT_START_ONEMORE = 1094
XML_RNGP_PAT_START_TEXT = 1095
XML_RNGP_PAT_START_VALUE = 1096
XML_RNGP_PREFIX_UNDEFINED = 1097
XML_RNGP_REF_CREATE_FAILED = 1098
XML_RNGP_REF_CYCLE = 1099
XML_RNGP_REF_NAME_INVALID = 1100
XML_RNGP_REF_NO_DEF = 1101
XML_RNGP_REF_NO_NAME = 1102
XML_RNGP_REF_NOT_EMPTY = 1103
XML_RNGP_START_CHOICE_AND_INTERLEAVE = 1104
XML_RNGP_START_CONTENT = 1105
XML_RNGP_START_EMPTY = 1106
XML_RNGP_START_MISSING = 1107
XML_RNGP_TEXT_EXPECTED = 1108
XML_RNGP_TEXT_HAS_CHILD = 1109
XML_RNGP_TYPE_MISSING = 1110
XML_RNGP_TYPE_NOT_FOUND = 1111
XML_RNGP_TYPE_VALUE = 1112
XML_RNGP_UNKNOWN_ATTRIBUTE = 1113
XML_RNGP_UNKNOWN_COMBINE = 1114
XML_RNGP_UNKNOWN_CONSTRUCT = 1115
XML_RNGP_UNKNOWN_TYPE_LIB = 1116
XML_RNGP_URI_FRAGMENT = 1117
XML_RNGP_URI_NOT_ABSOLUTE = 1118
XML_RNGP_VALUE_EMPTY = 1119
XML_RNGP_VALUE_NO_CONTENT = 1120
XML_RNGP_XMLNS_NAME = 1121
XML_RNGP_XML_NS = 1122
XML_XPATH_EXPRESSION_OK = 1200
XML_XPATH_NUMBER_ERROR = 1201
XML_XPATH_UNFINISHED_LITERAL_ERROR = 1202
XML_XPATH_START_LITERAL_ERROR = 1203
XML_XPATH_VARIABLE_REF_ERROR = 1204
XML_XPATH_UNDEF_VARIABLE_ERROR = 1205
XML_XPATH_INVALID_PREDICATE_ERROR = 1206
XML_XPATH_EXPR_ERROR = 1207
XML_XPATH_UNCLOSED_ERROR = 1208
XML_XPATH_UNKNOWN_FUNC_ERROR = 1209
XML_XPATH_INVALID_OPERAND = 1210
XML_XPATH_INVALID_TYPE = 1211
XML_XPATH_INVALID_ARITY = 1212
XML_XPATH_INVALID_CTXT_SIZE = 1213
XML_XPATH_INVALID_CTXT_POSITION = 1214
XML_XPATH_MEMORY_ERROR = 1215
XML_XPTR_SYNTAX_ERROR = 1216
XML_XPTR_RESOURCE_ERROR = 1217
XML_XPTR_SUB_RESOURCE_ERROR = 1218
XML_XPATH_UNDEF_PREFIX_ERROR = 1219
XML_XPATH_ENCODING_ERROR = 1220
XML_XPATH_INVALID_CHAR_ERROR = 1221
XML_TREE_INVALID_HEX = 1300
XML_TREE_INVALID_DEC = 1301
XML_TREE_UNTERMINATED_ENTITY = 1302
XML_TREE_NOT_UTF8 = 1303
XML_SAVE_NOT_UTF8 = 1400
XML_SAVE_CHAR_INVALID = 1401
XML_SAVE_NO_DOCTYPE = 1402
XML_SAVE_UNKNOWN_ENCODING = 1403
XML_REGEXP_COMPILE_ERROR = 1450
XML_IO_UNKNOWN = 1500
XML_IO_EACCES = 1501
XML_IO_EAGAIN = 1502
XML_IO_EBADF = 1503
XML_IO_EBADMSG = 1504
XML_IO_EBUSY = 1505
XML_IO_ECANCELED = 1506
XML_IO_ECHILD = 1507
XML_IO_EDEADLK = 1508
XML_IO_EDOM = 1509
XML_IO_EEXIST = 1510
XML_IO_EFAULT = 1511
XML_IO_EFBIG = 1512
XML_IO_EINPROGRESS = 1513
XML_IO_EINTR = 1514
XML_IO_EINVAL = 1515
XML_IO_EIO = 1516
XML_IO_EISDIR = 1517
XML_IO_EMFILE = 1518
XML_IO_EMLINK = 1519
XML_IO_EMSGSIZE = 1520
XML_IO_ENAMETOOLONG = 1521
XML_IO_ENFILE = 1522
XML_IO_ENODEV = 1523
XML_IO_ENOENT = 1524
XML_IO_ENOEXEC = 1525
XML_IO_ENOLCK = 1526
XML_IO_ENOMEM = 1527
XML_IO_ENOSPC = 1528
XML_IO_ENOSYS = 1529
XML_IO_ENOTDIR = 1530
XML_IO_ENOTEMPTY = 1531
XML_IO_ENOTSUP = 1532
XML_IO_ENOTTY = 1533
XML_IO_ENXIO = 1534
XML_IO_EPERM = 1535
XML_IO_EPIPE = 1536
XML_IO_ERANGE = 1537
XML_IO_EROFS = 1538
XML_IO_ESPIPE = 1539
XML_IO_ESRCH = 1540
XML_IO_ETIMEDOUT = 1541
XML_IO_EXDEV = 1542
XML_IO_NETWORK_ATTEMPT = 1543
XML_IO_ENCODER = 1544
XML_IO_FLUSH = 1545
XML_IO_WRITE = 1546
XML_IO_NO_INPUT = 1547
XML_IO_BUFFER_FULL = 1548
XML_IO_LOAD_ERROR = 1549
XML_IO_ENOTSOCK = 1550
XML_IO_EISCONN = 1551
XML_IO_ECONNREFUSED = 1552
XML_IO_ENETUNREACH = 1553
XML_IO_EADDRINUSE = 1554
XML_IO_EALREADY = 1555
XML_IO_EAFNOSUPPORT = 1556
XML_XINCLUDE_RECURSION = 1600
XML_XINCLUDE_PARSE_VALUE = 1601
XML_XINCLUDE_ENTITY_DEF_MISMATCH = 1602
XML_XINCLUDE_NO_HREF = 1603
XML_XINCLUDE_NO_FALLBACK = 1604
XML_XINCLUDE_HREF_URI = 1605
XML_XINCLUDE_TEXT_FRAGMENT = 1606
XML_XINCLUDE_TEXT_DOCUMENT = 1607
XML_XINCLUDE_INVALID_CHAR = 1608
XML_XINCLUDE_BUILD_FAILED = 1609
XML_XINCLUDE_UNKNOWN_ENCODING = 1610
XML_XINCLUDE_MULTIPLE_ROOT = 1611
XML_XINCLUDE_XPTR_FAILED = 1612
XML_XINCLUDE_XPTR_RESULT = 1613
XML_XINCLUDE_INCLUDE_IN_INCLUDE = 1614
XML_XINCLUDE_FALLBACKS_IN_INCLUDE = 1615
XML_XINCLUDE_FALLBACK_NOT_IN_INCLUDE = 1616
XML_XINCLUDE_DEPRECATED_NS = 1617
XML_XINCLUDE_FRAGMENT_ID = 1618
XML_CATALOG_MISSING_ATTR = 1650
XML_CATALOG_ENTRY_BROKEN = 1651
XML_CATALOG_PREFER_VALUE = 1652
XML_CATALOG_NOT_CATALOG = 1653
XML_CATALOG_RECURSION = 1654
XML_SCHEMAP_PREFIX_UNDEFINED = 1700
XML_SCHEMAP_ATTRFORMDEFAULT_VALUE = 1701
XML_SCHEMAP_ATTRGRP_NONAME_NOREF = 1702
XML_SCHEMAP_ATTR_NONAME_NOREF = 1703
XML_SCHEMAP_COMPLEXTYPE_NONAME_NOREF = 1704
XML_SCHEMAP_ELEMFORMDEFAULT_VALUE = 1705
XML_SCHEMAP_ELEM_NONAME_NOREF = 1706
XML_SCHEMAP_EXTENSION_NO_BASE = 1707
XML_SCHEMAP_FACET_NO_VALUE = 1708
XML_SCHEMAP_FAILED_BUILD_IMPORT = 1709
XML_SCHEMAP_GROUP_NONAME_NOREF = 1710
XML_SCHEMAP_IMPORT_NAMESPACE_NOT_URI = 1711
XML_SCHEMAP_IMPORT_REDEFINE_NSNAME = 1712
XML_SCHEMAP_IMPORT_SCHEMA_NOT_URI = 1713
XML_SCHEMAP_INVALID_BOOLEAN = 1714
XML_SCHEMAP_INVALID_ENUM = 1715
XML_SCHEMAP_INVALID_FACET = 1716
XML_SCHEMAP_INVALID_FACET_VALUE = 1717
XML_SCHEMAP_INVALID_MAXOCCURS = 1718
XML_SCHEMAP_INVALID_MINOCCURS = 1719
XML_SCHEMAP_INVALID_REF_AND_SUBTYPE = 1720
XML_SCHEMAP_INVALID_WHITE_SPACE = 1721
XML_SCHEMAP_NOATTR_NOREF = 1722
XML_SCHEMAP_NOTATION_NO_NAME = 1723
XML_SCHEMAP_NOTYPE_NOREF = 1724
XML_SCHEMAP_REF_AND_SUBTYPE = 1725
XML_SCHEMAP_RESTRICTION_NONAME_NOREF = 1726
XML_SCHEMAP_SIMPLETYPE_NONAME = 1727
XML_SCHEMAP_TYPE_AND_SUBTYPE = 1728
XML_SCHEMAP_UNKNOWN_ALL_CHILD = 1729
XML_SCHEMAP_UNKNOWN_ANYATTRIBUTE_CHILD = 1730
XML_SCHEMAP_UNKNOWN_ATTR_CHILD = 1731
XML_SCHEMAP_UNKNOWN_ATTRGRP_CHILD = 1732
XML_SCHEMAP_UNKNOWN_ATTRIBUTE_GROUP = 1733
XML_SCHEMAP_UNKNOWN_BASE_TYPE = 1734
XML_SCHEMAP_UNKNOWN_CHOICE_CHILD = 1735
XML_SCHEMAP_UNKNOWN_COMPLEXCONTENT_CHILD = 1736
XML_SCHEMAP_UNKNOWN_COMPLEXTYPE_CHILD = 1737
XML_SCHEMAP_UNKNOWN_ELEM_CHILD = 1738
XML_SCHEMAP_UNKNOWN_EXTENSION_CHILD = 1739
XML_SCHEMAP_UNKNOWN_FACET_CHILD = 1740
XML_SCHEMAP_UNKNOWN_FACET_TYPE = 1741
XML_SCHEMAP_UNKNOWN_GROUP_CHILD = 1742
XML_SCHEMAP_UNKNOWN_IMPORT_CHILD = 1743
XML_SCHEMAP_UNKNOWN_LIST_CHILD = 1744
XML_SCHEMAP_UNKNOWN_NOTATION_CHILD = 1745
XML_SCHEMAP_UNKNOWN_PROCESSCONTENT_CHILD = 1746
XML_SCHEMAP_UNKNOWN_REF = 1747
XML_SCHEMAP_UNKNOWN_RESTRICTION_CHILD = 1748
XML_SCHEMAP_UNKNOWN_SCHEMAS_CHILD = 1749
XML_SCHEMAP_UNKNOWN_SEQUENCE_CHILD = 1750
XML_SCHEMAP_UNKNOWN_SIMPLECONTENT_CHILD = 1751
XML_SCHEMAP_UNKNOWN_SIMPLETYPE_CHILD = 1752
XML_SCHEMAP_UNKNOWN_TYPE = 1753
XML_SCHEMAP_UNKNOWN_UNION_CHILD = 1754
XML_SCHEMAP_ELEM_DEFAULT_FIXED = 1755
XML_SCHEMAP_REGEXP_INVALID = 1756
XML_SCHEMAP_FAILED_LOAD = 1757
XML_SCHEMAP_NOTHING_TO_PARSE = 1758
XML_SCHEMAP_NOROOT = 1759
XML_SCHEMAP_REDEFINED_GROUP = 1760
XML_SCHEMAP_REDEFINED_TYPE = 1761
XML_SCHEMAP_REDEFINED_ELEMENT = 1762
XML_SCHEMAP_REDEFINED_ATTRGROUP = 1763
XML_SCHEMAP_REDEFINED_ATTR = 1764
XML_SCHEMAP_REDEFINED_NOTATION = 1765
XML_SCHEMAP_FAILED_PARSE = 1766
XML_SCHEMAP_UNKNOWN_PREFIX = 1767
XML_SCHEMAP_DEF_AND_PREFIX = 1768
XML_SCHEMAP_UNKNOWN_INCLUDE_CHILD = 1769
XML_SCHEMAP_INCLUDE_SCHEMA_NOT_URI = 1770
XML_SCHEMAP_INCLUDE_SCHEMA_NO_URI = 1771
XML_SCHEMAP_NOT_SCHEMA = 1772
XML_SCHEMAP_UNKNOWN_MEMBER_TYPE = 1773
XML_SCHEMAP_INVALID_ATTR_USE = 1774
XML_SCHEMAP_RECURSIVE = 1775
XML_SCHEMAP_SUPERNUMEROUS_LIST_ITEM_TYPE = 1776
XML_SCHEMAP_INVALID_ATTR_COMBINATION = 1777
XML_SCHEMAP_INVALID_ATTR_INLINE_COMBINATION = 1778
XML_SCHEMAP_MISSING_SIMPLETYPE_CHILD = 1779
XML_SCHEMAP_INVALID_ATTR_NAME = 1780
XML_SCHEMAP_REF_AND_CONTENT = 1781
XML_SCHEMAP_CT_PROPS_CORRECT_1 = 1782
XML_SCHEMAP_CT_PROPS_CORRECT_2 = 1783
XML_SCHEMAP_CT_PROPS_CORRECT_3 = 1784
XML_SCHEMAP_CT_PROPS_CORRECT_4 = 1785
XML_SCHEMAP_CT_PROPS_CORRECT_5 = 1786
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_1 = 1787
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_1 = 1788
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_2 = 1789
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_2 = 1790
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_3 = 1791
XML_SCHEMAP_WILDCARD_INVALID_NS_MEMBER = 1792
XML_SCHEMAP_INTERSECTION_NOT_EXPRESSIBLE = 1793
XML_SCHEMAP_UNION_NOT_EXPRESSIBLE = 1794
XML_SCHEMAP_SRC_IMPORT_3_1 = 1795
XML_SCHEMAP_SRC_IMPORT_3_2 = 1796
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_4_1 = 1797
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_4_2 = 1798
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_4_3 = 1799
XML_SCHEMAP_COS_CT_EXTENDS_1_3 = 1800
XML_SCHEMAV_NOROOT = 1801
XML_SCHEMAV_UNDECLAREDELEM = 1802
XML_SCHEMAV_NOTTOPLEVEL = 1803
XML_SCHEMAV_MISSING = 1804
XML_SCHEMAV_WRONGELEM = 1805
XML_SCHEMAV_NOTYPE = 1806
XML_SCHEMAV_NOROLLBACK = 1807
XML_SCHEMAV_ISABSTRACT = 1808
XML_SCHEMAV_NOTEMPTY = 1809
XML_SCHEMAV_ELEMCONT = 1810
XML_SCHEMAV_HAVEDEFAULT = 1811
XML_SCHEMAV_NOTNILLABLE = 1812
XML_SCHEMAV_EXTRACONTENT = 1813
XML_SCHEMAV_INVALIDATTR = 1814
XML_SCHEMAV_INVALIDELEM = 1815
XML_SCHEMAV_NOTDETERMINIST = 1816
XML_SCHEMAV_CONSTRUCT = 1817
XML_SCHEMAV_INTERNAL = 1818
XML_SCHEMAV_NOTSIMPLE = 1819
XML_SCHEMAV_ATTRUNKNOWN = 1820
XML_SCHEMAV_ATTRINVALID = 1821
XML_SCHEMAV_VALUE = 1822
XML_SCHEMAV_FACET = 1823
XML_SCHEMAV_CVC_DATATYPE_VALID_1_2_1 = 1824
XML_SCHEMAV_CVC_DATATYPE_VALID_1_2_2 = 1825
XML_SCHEMAV_CVC_DATATYPE_VALID_1_2_3 = 1826
XML_SCHEMAV_CVC_TYPE_3_1_1 = 1827
XML_SCHEMAV_CVC_TYPE_3_1_2 = 1828
XML_SCHEMAV_CVC_FACET_VALID = 1829
XML_SCHEMAV_CVC_LENGTH_VALID = 1830
XML_SCHEMAV_CVC_MINLENGTH_VALID = 1831
XML_SCHEMAV_CVC_MAXLENGTH_VALID = 1832
XML_SCHEMAV_CVC_MININCLUSIVE_VALID = 1833
XML_SCHEMAV_CVC_MAXINCLUSIVE_VALID = 1834
XML_SCHEMAV_CVC_MINEXCLUSIVE_VALID = 1835
XML_SCHEMAV_CVC_MAXEXCLUSIVE_VALID = 1836
XML_SCHEMAV_CVC_TOTALDIGITS_VALID = 1837
XML_SCHEMAV_CVC_FRACTIONDIGITS_VALID = 1838
XML_SCHEMAV_CVC_PATTERN_VALID = 1839
XML_SCHEMAV_CVC_ENUMERATION_VALID = 1840
XML_SCHEMAV_CVC_COMPLEX_TYPE_2_1 = 1841
XML_SCHEMAV_CVC_COMPLEX_TYPE_2_2 = 1842
XML_SCHEMAV_CVC_COMPLEX_TYPE_2_3 = 1843
XML_SCHEMAV_CVC_COMPLEX_TYPE_2_4 = 1844
XML_SCHEMAV_CVC_ELT_1 = 1845
XML_SCHEMAV_CVC_ELT_2 = 1846
XML_SCHEMAV_CVC_ELT_3_1 = 1847
XML_SCHEMAV_CVC_ELT_3_2_1 = 1848
XML_SCHEMAV_CVC_ELT_3_2_2 = 1849
XML_SCHEMAV_CVC_ELT_4_1 = 1850
XML_SCHEMAV_CVC_ELT_4_2 = 1851
XML_SCHEMAV_CVC_ELT_4_3 = 1852
XML_SCHEMAV_CVC_ELT_5_1_1 = 1853
XML_SCHEMAV_CVC_ELT_5_1_2 = 1854
XML_SCHEMAV_CVC_ELT_5_2_1 = 1855
XML_SCHEMAV_CVC_ELT_5_2_2_1 = 1856
XML_SCHEMAV_CVC_ELT_5_2_2_2_1 = 1857
XML_SCHEMAV_CVC_ELT_5_2_2_2_2 = 1858
XML_SCHEMAV_CVC_ELT_6 = 1859
XML_SCHEMAV_CVC_ELT_7 = 1860
XML_SCHEMAV_CVC_ATTRIBUTE_1 = 1861
XML_SCHEMAV_CVC_ATTRIBUTE_2 = 1862
XML_SCHEMAV_CVC_ATTRIBUTE_3 = 1863
XML_SCHEMAV_CVC_ATTRIBUTE_4 = 1864
XML_SCHEMAV_CVC_COMPLEX_TYPE_3_1 = 1865
XML_SCHEMAV_CVC_COMPLEX_TYPE_3_2_1 = 1866
XML_SCHEMAV_CVC_COMPLEX_TYPE_3_2_2 = 1867
XML_SCHEMAV_CVC_COMPLEX_TYPE_4 = 1868
XML_SCHEMAV_CVC_COMPLEX_TYPE_5_1 = 1869
XML_SCHEMAV_CVC_COMPLEX_TYPE_5_2 = 1870
XML_SCHEMAV_ELEMENT_CONTENT = 1871
XML_SCHEMAV_DOCUMENT_ELEMENT_MISSING = 1872
XML_SCHEMAV_CVC_COMPLEX_TYPE_1 = 1873
XML_SCHEMAV_CVC_AU = 1874
XML_SCHEMAV_CVC_TYPE_1 = 1875
XML_SCHEMAV_CVC_TYPE_2 = 1876
XML_SCHEMAV_CVC_IDC = 1877
XML_SCHEMAV_CVC_WILDCARD = 1878
XML_SCHEMAV_MISC = 1879
XML_XPTR_UNKNOWN_SCHEME = 1900
XML_XPTR_CHILDSEQ_START = 1901
XML_XPTR_EVAL_FAILED = 1902
XML_XPTR_EXTRA_OBJECTS = 1903
XML_C14N_CREATE_CTXT = 1950
XML_C14N_REQUIRES_UTF8 = 1951
XML_C14N_CREATE_STACK = 1952
XML_C14N_INVALID_NODE = 1953
XML_C14N_UNKNOW_NODE = 1954
XML_C14N_RELATIVE_NAMESPACE = 1955
XML_FTP_PASV_ANSWER = 2000
XML_FTP_EPSV_ANSWER = 2001
XML_FTP_ACCNT = 2002
XML_FTP_URL_SYNTAX = 2003
XML_HTTP_URL_SYNTAX = 2020
XML_HTTP_USE_IP = 2021
XML_HTTP_UNKNOWN_HOST = 2022
XML_SCHEMAP_SRC_SIMPLE_TYPE_1 = 3000
XML_SCHEMAP_SRC_SIMPLE_TYPE_2 = 3001
XML_SCHEMAP_SRC_SIMPLE_TYPE_3 = 3002
XML_SCHEMAP_SRC_SIMPLE_TYPE_4 = 3003
XML_SCHEMAP_SRC_RESOLVE = 3004
XML_SCHEMAP_SRC_RESTRICTION_BASE_OR_SIMPLETYPE = 3005
XML_SCHEMAP_SRC_LIST_ITEMTYPE_OR_SIMPLETYPE = 3006
XML_SCHEMAP_SRC_UNION_MEMBERTYPES_OR_SIMPLETYPES = 3007
XML_SCHEMAP_ST_PROPS_CORRECT_1 = 3008
XML_SCHEMAP_ST_PROPS_CORRECT_2 = 3009
XML_SCHEMAP_ST_PROPS_CORRECT_3 = 3010
XML_SCHEMAP_COS_ST_RESTRICTS_1_1 = 3011
XML_SCHEMAP_COS_ST_RESTRICTS_1_2 = 3012
XML_SCHEMAP_COS_ST_RESTRICTS_1_3_1 = 3013
XML_SCHEMAP_COS_ST_RESTRICTS_1_3_2 = 3014
XML_SCHEMAP_COS_ST_RESTRICTS_2_1 = 3015
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_1_1 = 3016
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_1_2 = 3017
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_2_1 = 3018
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_2_2 = 3019
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_2_3 = 3020
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_2_4 = 3021
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_2_5 = 3022
XML_SCHEMAP_COS_ST_RESTRICTS_3_1 = 3023
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_1 = 3024
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_1_2 = 3025
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_2_2 = 3026
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_2_1 = 3027
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_2_3 = 3028
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_2_4 = 3029
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_2_5 = 3030
XML_SCHEMAP_COS_ST_DERIVED_OK_2_1 = 3031
XML_SCHEMAP_COS_ST_DERIVED_OK_2_2 = 3032
XML_SCHEMAP_S4S_ELEM_NOT_ALLOWED = 3033
XML_SCHEMAP_S4S_ELEM_MISSING = 3034
XML_SCHEMAP_S4S_ATTR_NOT_ALLOWED = 3035
XML_SCHEMAP_S4S_ATTR_MISSING = 3036
XML_SCHEMAP_S4S_ATTR_INVALID_VALUE = 3037
XML_SCHEMAP_SRC_ELEMENT_1 = 3038
XML_SCHEMAP_SRC_ELEMENT_2_1 = 3039
XML_SCHEMAP_SRC_ELEMENT_2_2 = 3040
XML_SCHEMAP_SRC_ELEMENT_3 = 3041
XML_SCHEMAP_P_PROPS_CORRECT_1 = 3042
XML_SCHEMAP_P_PROPS_CORRECT_2_1 = 3043
XML_SCHEMAP_P_PROPS_CORRECT_2_2 = 3044
XML_SCHEMAP_E_PROPS_CORRECT_2 = 3045
XML_SCHEMAP_E_PROPS_CORRECT_3 = 3046
XML_SCHEMAP_E_PROPS_CORRECT_4 = 3047
XML_SCHEMAP_E_PROPS_CORRECT_5 = 3048
XML_SCHEMAP_E_PROPS_CORRECT_6 = 3049
XML_SCHEMAP_SRC_INCLUDE = 3050
XML_SCHEMAP_SRC_ATTRIBUTE_1 = 3051
XML_SCHEMAP_SRC_ATTRIBUTE_2 = 3052
XML_SCHEMAP_SRC_ATTRIBUTE_3_1 = 3053
XML_SCHEMAP_SRC_ATTRIBUTE_3_2 = 3054
XML_SCHEMAP_SRC_ATTRIBUTE_4 = 3055
XML_SCHEMAP_NO_XMLNS = 3056
XML_SCHEMAP_NO_XSI = 3057
XML_SCHEMAP_COS_VALID_DEFAULT_1 = 3058
XML_SCHEMAP_COS_VALID_DEFAULT_2_1 = 3059
XML_SCHEMAP_COS_VALID_DEFAULT_2_2_1 = 3060
XML_SCHEMAP_COS_VALID_DEFAULT_2_2_2 = 3061
XML_SCHEMAP_CVC_SIMPLE_TYPE = 3062
XML_SCHEMAP_COS_CT_EXTENDS_1_1 = 3063
XML_SCHEMAP_SRC_IMPORT_1_1 = 3064
XML_SCHEMAP_SRC_IMPORT_1_2 = 3065
XML_SCHEMAP_SRC_IMPORT_2 = 3066
XML_SCHEMAP_SRC_IMPORT_2_1 = 3067
XML_SCHEMAP_SRC_IMPORT_2_2 = 3068
XML_SCHEMAP_INTERNAL = 3069
XML_SCHEMAP_NOT_DETERMINISTIC = 3070
XML_SCHEMAP_SRC_ATTRIBUTE_GROUP_1 = 3071
XML_SCHEMAP_SRC_ATTRIBUTE_GROUP_2 = 3072
XML_SCHEMAP_SRC_ATTRIBUTE_GROUP_3 = 3073
XML_SCHEMAP_MG_PROPS_CORRECT_1 = 3074
XML_SCHEMAP_MG_PROPS_CORRECT_2 = 3075
XML_SCHEMAP_SRC_CT_1 = 3076
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_3 = 3077
XML_SCHEMAP_AU_PROPS_CORRECT_2 = 3078
XML_SCHEMAP_A_PROPS_CORRECT_2 = 3079
XML_SCHEMAP_C_PROPS_CORRECT = 3080
XML_SCHEMAP_SRC_REDEFINE = 3081
XML_SCHEMAP_SRC_IMPORT = 3082
XML_SCHEMAP_WARN_SKIP_SCHEMA = 3083
XML_SCHEMAP_WARN_UNLOCATED_SCHEMA = 3084
XML_SCHEMAP_WARN_ATTR_REDECL_PROH = 3085
XML_SCHEMAP_WARN_ATTR_POINTLESS_PROH = 3086
XML_SCHEMAP_AG_PROPS_CORRECT = 3087
XML_SCHEMAP_COS_CT_EXTENDS_1_2 = 3088
XML_SCHEMAP_AU_PROPS_CORRECT = 3089
XML_SCHEMAP_A_PROPS_CORRECT_3 = 3090
XML_SCHEMAP_COS_ALL_LIMITED = 3091
XML_SCHEMATRONV_ASSERT = 4000
XML_SCHEMATRONV_REPORT = 4001
XML_MODULE_OPEN = 4900
XML_MODULE_CLOSE = 4901
XML_CHECK_FOUND_ELEMENT = 5000
XML_CHECK_FOUND_ATTRIBUTE = 5001
XML_CHECK_FOUND_TEXT = 5002
XML_CHECK_FOUND_CDATA = 5003
XML_CHECK_FOUND_ENTITYREF = 5004
XML_CHECK_FOUND_ENTITY = 5005
XML_CHECK_FOUND_PI = 5006
XML_CHECK_FOUND_COMMENT = 5007
XML_CHECK_FOUND_DOCTYPE = 5008
XML_CHECK_FOUND_FRAGMENT = 5009
XML_CHECK_FOUND_NOTATION = 5010
XML_CHECK_UNKNOWN_NODE = 5011
XML_CHECK_ENTITY_TYPE = 5012
XML_CHECK_NO_PARENT = 5013
XML_CHECK_NO_DOC = 5014
XML_CHECK_NO_NAME = 5015
XML_CHECK_NO_ELEM = 5016
XML_CHECK_WRONG_DOC = 5017
XML_CHECK_NO_PREV = 5018
XML_CHECK_WRONG_PREV = 5019
XML_CHECK_NO_NEXT = 5020
XML_CHECK_WRONG_NEXT = 5021
XML_CHECK_NOT_DTD = 5022
XML_CHECK_NOT_ATTR = 5023
XML_CHECK_NOT_ATTR_DECL = 5024
XML_CHECK_NOT_ELEM_DECL = 5025
XML_CHECK_NOT_ENTITY_DECL = 5026
XML_CHECK_NOT_NS_DECL = 5027
XML_CHECK_NO_HREF = 5028
XML_CHECK_WRONG_PARENT = 5029
XML_CHECK_NS_SCOPE = 5030
XML_CHECK_NS_ANCESTOR = 5031
XML_CHECK_NOT_UTF8 = 5032
XML_CHECK_NO_DICT = 5033
XML_CHECK_NOT_NCNAME = 5034
XML_CHECK_OUTSIDE_DICT = 5035
XML_CHECK_WRONG_NAME = 5036
XML_CHECK_NAME_NOT_NULL = 5037
XML_I18N_NO_NAME = 6000
XML_I18N_NO_HANDLER = 6001
XML_I18N_EXCESS_HANDLER = 6002
XML_I18N_CONV_FAILED = 6003
XML_I18N_NO_OUTPUT = 6004
XML_BUF_OVERFLOW = 7000
# xmlExpNodeType
XML_EXP_EMPTY = 0
XML_EXP_FORBID = 1
XML_EXP_ATOM = 2
XML_EXP_SEQ = 3
XML_EXP_OR = 4
XML_EXP_COUNT = 5
# xmlElementContentType
XML_ELEMENT_CONTENT_PCDATA = 1
XML_ELEMENT_CONTENT_ELEMENT = 2
XML_ELEMENT_CONTENT_SEQ = 3
XML_ELEMENT_CONTENT_OR = 4
# xmlParserProperties
XML_PARSER_LOADDTD = 1
XML_PARSER_DEFAULTATTRS = 2
XML_PARSER_VALIDATE = 3
XML_PARSER_SUBST_ENTITIES = 4
# xmlReaderTypes
XML_READER_TYPE_NONE = 0
XML_READER_TYPE_ELEMENT = 1
XML_READER_TYPE_ATTRIBUTE = 2
XML_READER_TYPE_TEXT = 3
XML_READER_TYPE_CDATA = 4
XML_READER_TYPE_ENTITY_REFERENCE = 5
XML_READER_TYPE_ENTITY = 6
XML_READER_TYPE_PROCESSING_INSTRUCTION = 7
XML_READER_TYPE_COMMENT = 8
XML_READER_TYPE_DOCUMENT = 9
XML_READER_TYPE_DOCUMENT_TYPE = 10
XML_READER_TYPE_DOCUMENT_FRAGMENT = 11
XML_READER_TYPE_NOTATION = 12
XML_READER_TYPE_WHITESPACE = 13
XML_READER_TYPE_SIGNIFICANT_WHITESPACE = 14
XML_READER_TYPE_END_ELEMENT = 15
XML_READER_TYPE_END_ENTITY = 16
XML_READER_TYPE_XML_DECLARATION = 17
# xmlCatalogPrefer
XML_CATA_PREFER_NONE = 0
XML_CATA_PREFER_PUBLIC = 1
XML_CATA_PREFER_SYSTEM = 2
# xmlElementType
XML_ELEMENT_NODE = 1
XML_ATTRIBUTE_NODE = 2
XML_TEXT_NODE = 3
XML_CDATA_SECTION_NODE = 4
XML_ENTITY_REF_NODE = 5
XML_ENTITY_NODE = 6
XML_PI_NODE = 7
XML_COMMENT_NODE = 8
XML_DOCUMENT_NODE = 9
XML_DOCUMENT_TYPE_NODE = 10
XML_DOCUMENT_FRAG_NODE = 11
XML_NOTATION_NODE = 12
XML_HTML_DOCUMENT_NODE = 13
XML_DTD_NODE = 14
XML_ELEMENT_DECL = 15
XML_ATTRIBUTE_DECL = 16
XML_ENTITY_DECL = 17
XML_NAMESPACE_DECL = 18
XML_XINCLUDE_START = 19
XML_XINCLUDE_END = 20
XML_DOCB_DOCUMENT_NODE = 21
# xlinkActuate
XLINK_ACTUATE_NONE = 0
XLINK_ACTUATE_AUTO = 1
XLINK_ACTUATE_ONREQUEST = 2
# xmlFeature
XML_WITH_THREAD = 1
XML_WITH_TREE = 2
XML_WITH_OUTPUT = 3
XML_WITH_PUSH = 4
XML_WITH_READER = 5
XML_WITH_PATTERN = 6
XML_WITH_WRITER = 7
XML_WITH_SAX1 = 8
XML_WITH_FTP = 9
XML_WITH_HTTP = 10
XML_WITH_VALID = 11
XML_WITH_HTML = 12
XML_WITH_LEGACY = 13
XML_WITH_C14N = 14
XML_WITH_CATALOG = 15
XML_WITH_XPATH = 16
XML_WITH_XPTR = 17
XML_WITH_XINCLUDE = 18
XML_WITH_ICONV = 19
XML_WITH_ISO8859X = 20
XML_WITH_UNICODE = 21
XML_WITH_REGEXP = 22
XML_WITH_AUTOMATA = 23
XML_WITH_EXPR = 24
XML_WITH_SCHEMAS = 25
XML_WITH_SCHEMATRON = 26
XML_WITH_MODULES = 27
XML_WITH_DEBUG = 28
XML_WITH_DEBUG_MEM = 29
XML_WITH_DEBUG_RUN = 30
XML_WITH_ZLIB = 31
XML_WITH_ICU = 32
XML_WITH_LZMA = 33
XML_WITH_NONE = 99999
# xmlElementContentOccur
XML_ELEMENT_CONTENT_ONCE = 1
XML_ELEMENT_CONTENT_OPT = 2
XML_ELEMENT_CONTENT_MULT = 3
XML_ELEMENT_CONTENT_PLUS = 4
# xmlXPathError
XPATH_EXPRESSION_OK = 0
XPATH_NUMBER_ERROR = 1
XPATH_UNFINISHED_LITERAL_ERROR = 2
XPATH_START_LITERAL_ERROR = 3
XPATH_VARIABLE_REF_ERROR = 4
XPATH_UNDEF_VARIABLE_ERROR = 5
XPATH_INVALID_PREDICATE_ERROR = 6
XPATH_EXPR_ERROR = 7
XPATH_UNCLOSED_ERROR = 8
XPATH_UNKNOWN_FUNC_ERROR = 9
XPATH_INVALID_OPERAND = 10
XPATH_INVALID_TYPE = 11
XPATH_INVALID_ARITY = 12
XPATH_INVALID_CTXT_SIZE = 13
XPATH_INVALID_CTXT_POSITION = 14
XPATH_MEMORY_ERROR = 15
XPTR_SYNTAX_ERROR = 16
XPTR_RESOURCE_ERROR = 17
XPTR_SUB_RESOURCE_ERROR = 18
XPATH_UNDEF_PREFIX_ERROR = 19
XPATH_ENCODING_ERROR = 20
XPATH_INVALID_CHAR_ERROR = 21
XPATH_INVALID_CTXT = 22
XPATH_STACK_ERROR = 23
XPATH_FORBID_VARIABLE_ERROR = 24
# xmlTextReaderMode
XML_TEXTREADER_MODE_INITIAL = 0
XML_TEXTREADER_MODE_INTERACTIVE = 1
XML_TEXTREADER_MODE_ERROR = 2
XML_TEXTREADER_MODE_EOF = 3
XML_TEXTREADER_MODE_CLOSED = 4
XML_TEXTREADER_MODE_READING = 5
# xmlErrorLevel
XML_ERR_NONE = 0
XML_ERR_WARNING = 1
XML_ERR_ERROR = 2
XML_ERR_FATAL = 3
# xmlCharEncoding
XML_CHAR_ENCODING_ERROR = -1
XML_CHAR_ENCODING_NONE = 0
XML_CHAR_ENCODING_UTF8 = 1
XML_CHAR_ENCODING_UTF16LE = 2
XML_CHAR_ENCODING_UTF16BE = 3
XML_CHAR_ENCODING_UCS4LE = 4
XML_CHAR_ENCODING_UCS4BE = 5
XML_CHAR_ENCODING_EBCDIC = 6
XML_CHAR_ENCODING_UCS4_2143 = 7
XML_CHAR_ENCODING_UCS4_3412 = 8
XML_CHAR_ENCODING_UCS2 = 9
XML_CHAR_ENCODING_8859_1 = 10
XML_CHAR_ENCODING_8859_2 = 11
XML_CHAR_ENCODING_8859_3 = 12
XML_CHAR_ENCODING_8859_4 = 13
XML_CHAR_ENCODING_8859_5 = 14
XML_CHAR_ENCODING_8859_6 = 15
XML_CHAR_ENCODING_8859_7 = 16
XML_CHAR_ENCODING_8859_8 = 17
XML_CHAR_ENCODING_8859_9 = 18
XML_CHAR_ENCODING_2022_JP = 19
XML_CHAR_ENCODING_SHIFT_JIS = 20
XML_CHAR_ENCODING_EUC_JP = 21
XML_CHAR_ENCODING_ASCII = 22
# xmlErrorDomain
XML_FROM_NONE = 0
XML_FROM_PARSER = 1
XML_FROM_TREE = 2
XML_FROM_NAMESPACE = 3
XML_FROM_DTD = 4
XML_FROM_HTML = 5
XML_FROM_MEMORY = 6
XML_FROM_OUTPUT = 7
XML_FROM_IO = 8
XML_FROM_FTP = 9
XML_FROM_HTTP = 10
XML_FROM_XINCLUDE = 11
XML_FROM_XPATH = 12
XML_FROM_XPOINTER = 13
XML_FROM_REGEXP = 14
XML_FROM_DATATYPE = 15
XML_FROM_SCHEMASP = 16
XML_FROM_SCHEMASV = 17
XML_FROM_RELAXNGP = 18
XML_FROM_RELAXNGV = 19
XML_FROM_CATALOG = 20
XML_FROM_C14N = 21
XML_FROM_XSLT = 22
XML_FROM_VALID = 23
XML_FROM_CHECK = 24
XML_FROM_WRITER = 25
XML_FROM_MODULE = 26
XML_FROM_I18N = 27
XML_FROM_SCHEMATRONV = 28
XML_FROM_BUFFER = 29
XML_FROM_URI = 30
# htmlStatus
HTML_NA = 0
HTML_INVALID = 1
HTML_DEPRECATED = 2
HTML_VALID = 4
HTML_REQUIRED = 12
# xmlSchemaValidOption
XML_SCHEMA_VAL_VC_I_CREATE = 1
# xmlSchemaWhitespaceValueType
XML_SCHEMA_WHITESPACE_UNKNOWN = 0
XML_SCHEMA_WHITESPACE_PRESERVE = 1
XML_SCHEMA_WHITESPACE_REPLACE = 2
XML_SCHEMA_WHITESPACE_COLLAPSE = 3
# htmlParserOption
HTML_PARSE_RECOVER = 1
HTML_PARSE_NODEFDTD = 4
HTML_PARSE_NOERROR = 32
HTML_PARSE_NOWARNING = 64
HTML_PARSE_PEDANTIC = 128
HTML_PARSE_NOBLANKS = 256
HTML_PARSE_NONET = 2048
HTML_PARSE_NOIMPLIED = 8192
HTML_PARSE_COMPACT = 65536
HTML_PARSE_IGNORE_ENC = 2097152
# xmlRelaxNGValidErr
XML_RELAXNG_OK = 0
XML_RELAXNG_ERR_MEMORY = 1
XML_RELAXNG_ERR_TYPE = 2
XML_RELAXNG_ERR_TYPEVAL = 3
XML_RELAXNG_ERR_DUPID = 4
XML_RELAXNG_ERR_TYPECMP = 5
XML_RELAXNG_ERR_NOSTATE = 6
XML_RELAXNG_ERR_NODEFINE = 7
XML_RELAXNG_ERR_LISTEXTRA = 8
XML_RELAXNG_ERR_LISTEMPTY = 9
XML_RELAXNG_ERR_INTERNODATA = 10
XML_RELAXNG_ERR_INTERSEQ = 11
XML_RELAXNG_ERR_INTEREXTRA = 12
XML_RELAXNG_ERR_ELEMNAME = 13
XML_RELAXNG_ERR_ATTRNAME = 14
XML_RELAXNG_ERR_ELEMNONS = 15
XML_RELAXNG_ERR_ATTRNONS = 16
XML_RELAXNG_ERR_ELEMWRONGNS = 17
XML_RELAXNG_ERR_ATTRWRONGNS = 18
XML_RELAXNG_ERR_ELEMEXTRANS = 19
XML_RELAXNG_ERR_ATTREXTRANS = 20
XML_RELAXNG_ERR_ELEMNOTEMPTY = 21
XML_RELAXNG_ERR_NOELEM = 22
XML_RELAXNG_ERR_NOTELEM = 23
XML_RELAXNG_ERR_ATTRVALID = 24
XML_RELAXNG_ERR_CONTENTVALID = 25
XML_RELAXNG_ERR_EXTRACONTENT = 26
XML_RELAXNG_ERR_INVALIDATTR = 27
XML_RELAXNG_ERR_DATAELEM = 28
XML_RELAXNG_ERR_VALELEM = 29
XML_RELAXNG_ERR_LISTELEM = 30
XML_RELAXNG_ERR_DATATYPE = 31
XML_RELAXNG_ERR_VALUE = 32
XML_RELAXNG_ERR_LIST = 33
XML_RELAXNG_ERR_NOGRAMMAR = 34
XML_RELAXNG_ERR_EXTRADATA = 35
XML_RELAXNG_ERR_LACKDATA = 36
XML_RELAXNG_ERR_INTERNAL = 37
XML_RELAXNG_ERR_ELEMWRONG = 38
XML_RELAXNG_ERR_TEXTWRONG = 39
# xmlCatalogAllow
XML_CATA_ALLOW_NONE = 0
XML_CATA_ALLOW_GLOBAL = 1
XML_CATA_ALLOW_DOCUMENT = 2
XML_CATA_ALLOW_ALL = 3
# xmlAttributeType
XML_ATTRIBUTE_CDATA = 1
XML_ATTRIBUTE_ID = 2
XML_ATTRIBUTE_IDREF = 3
XML_ATTRIBUTE_IDREFS = 4
XML_ATTRIBUTE_ENTITY = 5
XML_ATTRIBUTE_ENTITIES = 6
XML_ATTRIBUTE_NMTOKEN = 7
XML_ATTRIBUTE_NMTOKENS = 8
XML_ATTRIBUTE_ENUMERATION = 9
XML_ATTRIBUTE_NOTATION = 10
# xmlSchematronValidOptions
XML_SCHEMATRON_OUT_QUIET = 1
XML_SCHEMATRON_OUT_TEXT = 2
XML_SCHEMATRON_OUT_XML = 4
XML_SCHEMATRON_OUT_ERROR = 8
XML_SCHEMATRON_OUT_FILE = 256
XML_SCHEMATRON_OUT_BUFFER = 512
XML_SCHEMATRON_OUT_IO = 1024
# xmlSchemaContentType
XML_SCHEMA_CONTENT_UNKNOWN = 0
XML_SCHEMA_CONTENT_EMPTY = 1
XML_SCHEMA_CONTENT_ELEMENTS = 2
XML_SCHEMA_CONTENT_MIXED = 3
XML_SCHEMA_CONTENT_SIMPLE = 4
XML_SCHEMA_CONTENT_MIXED_OR_ELEMENTS = 5
XML_SCHEMA_CONTENT_BASIC = 6
XML_SCHEMA_CONTENT_ANY = 7
# xmlSchemaTypeType
XML_SCHEMA_TYPE_BASIC = 1
XML_SCHEMA_TYPE_ANY = 2
XML_SCHEMA_TYPE_FACET = 3
XML_SCHEMA_TYPE_SIMPLE = 4
XML_SCHEMA_TYPE_COMPLEX = 5
XML_SCHEMA_TYPE_SEQUENCE = 6
XML_SCHEMA_TYPE_CHOICE = 7
XML_SCHEMA_TYPE_ALL = 8
XML_SCHEMA_TYPE_SIMPLE_CONTENT = 9
XML_SCHEMA_TYPE_COMPLEX_CONTENT = 10
XML_SCHEMA_TYPE_UR = 11
XML_SCHEMA_TYPE_RESTRICTION = 12
XML_SCHEMA_TYPE_EXTENSION = 13
XML_SCHEMA_TYPE_ELEMENT = 14
XML_SCHEMA_TYPE_ATTRIBUTE = 15
XML_SCHEMA_TYPE_ATTRIBUTEGROUP = 16
XML_SCHEMA_TYPE_GROUP = 17
XML_SCHEMA_TYPE_NOTATION = 18
XML_SCHEMA_TYPE_LIST = 19
XML_SCHEMA_TYPE_UNION = 20
XML_SCHEMA_TYPE_ANY_ATTRIBUTE = 21
XML_SCHEMA_TYPE_IDC_UNIQUE = 22
XML_SCHEMA_TYPE_IDC_KEY = 23
XML_SCHEMA_TYPE_IDC_KEYREF = 24
XML_SCHEMA_TYPE_PARTICLE = 25
XML_SCHEMA_TYPE_ATTRIBUTE_USE = 26
XML_SCHEMA_FACET_MININCLUSIVE = 1000
XML_SCHEMA_FACET_MINEXCLUSIVE = 1001
XML_SCHEMA_FACET_MAXINCLUSIVE = 1002
XML_SCHEMA_FACET_MAXEXCLUSIVE = 1003
XML_SCHEMA_FACET_TOTALDIGITS = 1004
XML_SCHEMA_FACET_FRACTIONDIGITS = 1005
XML_SCHEMA_FACET_PATTERN = 1006
XML_SCHEMA_FACET_ENUMERATION = 1007
XML_SCHEMA_FACET_WHITESPACE = 1008
XML_SCHEMA_FACET_LENGTH = 1009
XML_SCHEMA_FACET_MAXLENGTH = 1010
XML_SCHEMA_FACET_MINLENGTH = 1011
XML_SCHEMA_EXTRA_QNAMEREF = 2000
XML_SCHEMA_EXTRA_ATTR_USE_PROHIB = 2001
# xmlModuleOption
XML_MODULE_LAZY = 1
XML_MODULE_LOCAL = 2
# xmlParserMode
XML_PARSE_UNKNOWN = 0
XML_PARSE_DOM = 1
XML_PARSE_SAX = 2
XML_PARSE_PUSH_DOM = 3
XML_PARSE_PUSH_SAX = 4
XML_PARSE_READER = 5
# xmlC14NMode
XML_C14N_1_0 = 0
XML_C14N_EXCLUSIVE_1_0 = 1
XML_C14N_1_1 = 2
# xmlParserOption
XML_PARSE_RECOVER = 1
XML_PARSE_NOENT = 2
XML_PARSE_DTDLOAD = 4
XML_PARSE_DTDATTR = 8
XML_PARSE_DTDVALID = 16
XML_PARSE_NOERROR = 32
XML_PARSE_NOWARNING = 64
XML_PARSE_PEDANTIC = 128
XML_PARSE_NOBLANKS = 256
XML_PARSE_SAX1 = 512
XML_PARSE_XINCLUDE = 1024
XML_PARSE_NONET = 2048
XML_PARSE_NODICT = 4096
XML_PARSE_NSCLEAN = 8192
XML_PARSE_NOCDATA = 16384
XML_PARSE_NOXINCNODE = 32768
XML_PARSE_COMPACT = 65536
XML_PARSE_OLD10 = 131072
XML_PARSE_NOBASEFIX = 262144
XML_PARSE_HUGE = 524288
XML_PARSE_OLDSAX = 1048576
XML_PARSE_IGNORE_ENC = 2097152
XML_PARSE_BIG_LINES = 4194304
# xmlElementTypeVal
XML_ELEMENT_TYPE_UNDEFINED = 0
XML_ELEMENT_TYPE_EMPTY = 1
XML_ELEMENT_TYPE_ANY = 2
XML_ELEMENT_TYPE_MIXED = 3
XML_ELEMENT_TYPE_ELEMENT = 4
# xmlDocProperties
XML_DOC_WELLFORMED = 1
XML_DOC_NSVALID = 2
XML_DOC_OLD10 = 4
XML_DOC_DTDVALID = 8
XML_DOC_XINCLUDE = 16
XML_DOC_USERBUILT = 32
XML_DOC_INTERNAL = 64
XML_DOC_HTML = 128
# xlinkType
XLINK_TYPE_NONE = 0
XLINK_TYPE_SIMPLE = 1
XLINK_TYPE_EXTENDED = 2
XLINK_TYPE_EXTENDED_SET = 3
# xmlXPathObjectType
XPATH_UNDEFINED = 0
XPATH_NODESET = 1
XPATH_BOOLEAN = 2
XPATH_NUMBER = 3
XPATH_STRING = 4
XPATH_POINT = 5
XPATH_RANGE = 6
XPATH_LOCATIONSET = 7
XPATH_USERS = 8
XPATH_XSLT_TREE = 9
# xmlSchemaValidError
XML_SCHEMAS_ERR_OK = 0
XML_SCHEMAS_ERR_NOROOT = 1
XML_SCHEMAS_ERR_UNDECLAREDELEM = 2
XML_SCHEMAS_ERR_NOTTOPLEVEL = 3
XML_SCHEMAS_ERR_MISSING = 4
XML_SCHEMAS_ERR_WRONGELEM = 5
XML_SCHEMAS_ERR_NOTYPE = 6
XML_SCHEMAS_ERR_NOROLLBACK = 7
XML_SCHEMAS_ERR_ISABSTRACT = 8
XML_SCHEMAS_ERR_NOTEMPTY = 9
XML_SCHEMAS_ERR_ELEMCONT = 10
XML_SCHEMAS_ERR_HAVEDEFAULT = 11
XML_SCHEMAS_ERR_NOTNILLABLE = 12
XML_SCHEMAS_ERR_EXTRACONTENT = 13
XML_SCHEMAS_ERR_INVALIDATTR = 14
XML_SCHEMAS_ERR_INVALIDELEM = 15
XML_SCHEMAS_ERR_NOTDETERMINIST = 16
XML_SCHEMAS_ERR_CONSTRUCT = 17
XML_SCHEMAS_ERR_INTERNAL = 18
XML_SCHEMAS_ERR_NOTSIMPLE = 19
XML_SCHEMAS_ERR_ATTRUNKNOWN = 20
XML_SCHEMAS_ERR_ATTRINVALID = 21
XML_SCHEMAS_ERR_VALUE = 22
XML_SCHEMAS_ERR_FACET = 23
XML_SCHEMAS_ERR_ = 24
XML_SCHEMAS_ERR_XXX = 25
| mit |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/django/contrib/gis/maps/google/zoom.py | 527 | 6676 | from __future__ import unicode_literals
from math import atan, exp, log, pi, sin
from django.contrib.gis.geos import GEOSGeometry, LinearRing, Point, Polygon
from django.contrib.gis.maps.google.gmap import GoogleMapException
from django.utils.six.moves import range
# Constants used for degree to radian conversion, and vice-versa.
DTOR = pi / 180.
RTOD = 180. / pi
class GoogleZoom(object):
"""
GoogleZoom is a utility for performing operations related to the zoom
levels on Google Maps.
This class is inspired by the OpenStreetMap Mapnik tile generation routine
`generate_tiles.py`, and the article "How Big Is the World" (Hack #16) in
"Google Maps Hacks" by Rich Gibson and Schuyler Erle.
`generate_tiles.py` may be found at:
http://trac.openstreetmap.org/browser/applications/rendering/mapnik/generate_tiles.py
"Google Maps Hacks" may be found at http://safari.oreilly.com/0596101619
"""
def __init__(self, num_zoom=19, tilesize=256):
"Initializes the Google Zoom object."
# Google's tilesize is 256x256, square tiles are assumed.
self._tilesize = tilesize
# The number of zoom levels
self._nzoom = num_zoom
# Initializing arrays to hold the parameters for each one of the
# zoom levels.
self._degpp = [] # Degrees per pixel
self._radpp = [] # Radians per pixel
self._npix = [] # 1/2 the number of pixels for a tile at the given zoom level
# Incrementing through the zoom levels and populating the parameter arrays.
z = tilesize # The number of pixels per zoom level.
for i in range(num_zoom):
# Getting the degrees and radians per pixel, and the 1/2 the number of
# for every zoom level.
self._degpp.append(z / 360.) # degrees per pixel
self._radpp.append(z / (2 * pi)) # radians per pixel
self._npix.append(z / 2) # number of pixels to center of tile
# Multiplying `z` by 2 for the next iteration.
z *= 2
def __len__(self):
"Returns the number of zoom levels."
return self._nzoom
def get_lon_lat(self, lonlat):
"Unpacks longitude, latitude from GEOS Points and 2-tuples."
if isinstance(lonlat, Point):
lon, lat = lonlat.coords
else:
lon, lat = lonlat
return lon, lat
def lonlat_to_pixel(self, lonlat, zoom):
"Converts a longitude, latitude coordinate pair for the given zoom level."
# Setting up, unpacking the longitude, latitude values and getting the
# number of pixels for the given zoom level.
lon, lat = self.get_lon_lat(lonlat)
npix = self._npix[zoom]
# Calculating the pixel x coordinate by multiplying the longitude value
# with the number of degrees/pixel at the given zoom level.
px_x = round(npix + (lon * self._degpp[zoom]))
# Creating the factor, and ensuring that 1 or -1 is not passed in as the
# base to the logarithm. Here's why:
# if fac = -1, we'll get log(0) which is undefined;
# if fac = 1, our logarithm base will be divided by 0, also undefined.
fac = min(max(sin(DTOR * lat), -0.9999), 0.9999)
# Calculating the pixel y coordinate.
px_y = round(npix + (0.5 * log((1 + fac) / (1 - fac)) * (-1.0 * self._radpp[zoom])))
# Returning the pixel x, y to the caller of the function.
return (px_x, px_y)
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * (2 * atan(exp((px[1] - npix) / (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def tile(self, lonlat, zoom):
"""
Returns a Polygon corresponding to the region represented by a fictional
Google Tile for the given longitude/latitude pair and zoom level. This
tile is used to determine the size of a tile at the given point.
"""
# The given lonlat is the center of the tile.
delta = self._tilesize / 2
# Getting the pixel coordinates corresponding to the
# the longitude/latitude.
px = self.lonlat_to_pixel(lonlat, zoom)
# Getting the lower-left and upper-right lat/lon coordinates
# for the bounding box of the tile.
ll = self.pixel_to_lonlat((px[0] - delta, px[1] - delta), zoom)
ur = self.pixel_to_lonlat((px[0] + delta, px[1] + delta), zoom)
# Constructing the Polygon, representing the tile and returning.
return Polygon(LinearRing(ll, (ll[0], ur[1]), ur, (ur[0], ll[1]), ll), srid=4326)
def get_zoom(self, geom):
"Returns the optimal Zoom level for the given geometry."
# Checking the input type.
if not isinstance(geom, GEOSGeometry) or geom.srid != 4326:
raise TypeError('get_zoom() expects a GEOS Geometry with an SRID of 4326.')
# Getting the envelope for the geometry, and its associated width, height
# and centroid.
env = geom.envelope
env_w, env_h = self.get_width_height(env.extent)
center = env.centroid
for z in range(self._nzoom):
# Getting the tile at the zoom level.
tile_w, tile_h = self.get_width_height(self.tile(center, z).extent)
# When we span more than one tile, this is an approximately good
# zoom level.
if (env_w > tile_w) or (env_h > tile_h):
if z == 0:
raise GoogleMapException('Geometry width and height should not exceed that of the Earth.')
return z - 1
# Otherwise, we've zoomed in to the max.
return self._nzoom - 1
def get_width_height(self, extent):
"""
Returns the width and height for the given extent.
"""
# Getting the lower-left, upper-left, and upper-right
# coordinates from the extent.
ll = Point(extent[:2])
ul = Point(extent[0], extent[3])
ur = Point(extent[2:])
# Calculating the width and height.
height = ll.distance(ul)
width = ul.distance(ur)
return width, height
| mit |
KohlsTechnology/ansible | lib/ansible/modules/network/f5/bigip_facts.py | 13 | 63421 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# Copyright (c) 2013 Matt Hite <mhite@hotmail.com>
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_facts
short_description: Collect facts from F5 BIG-IP devices
description:
- Collect facts from F5 BIG-IP devices via iControl SOAP API
version_added: "1.6"
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- Requires BIG-IP software version >= 11.4
- F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
- Best run as a local_action in your playbook
- Tested with manager and above account privilege level
- C(provision) facts were added in 2.2
requirements:
- bigsuds
options:
session:
description:
- BIG-IP session support; may be useful to avoid concurrency
issues in certain circumstances.
type: bool
default: 'yes'
include:
description:
- Fact category or list of categories to collect
required: true
choices:
- address_class
- certificate
- client_ssl_profile
- device
- device_group
- interface
- key
- node
- pool
- provision
- rule
- self_ip
- software
- system_info
- traffic_group
- trunk
- virtual_address
- virtual_server
- vlan
filter:
description:
- Shell-style glob matching string used to filter fact keys. Not
applicable for software, provision, and system_info fact categories.
extends_documentation_fragment: f5
'''
EXAMPLES = r'''
- name: Collect BIG-IP facts
bigip_facts:
server: lb.mydomain.com
user: admin
password: secret
include: interface,vlan
delegate_to: localhost
'''
import fnmatch
import re
import traceback
try:
from suds import MethodNotFound, WebFault
except ImportError:
pass # Handle via f5_utils.bigsuds_found
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.f5_utils import bigip_api, bigsuds_found
from ansible.module_utils.six.moves import map, zip
try:
from library.module_utils.network.f5.common import f5_argument_spec
except ImportError:
from ansible.module_utils.network.f5.common import f5_argument_spec
class F5(object):
"""F5 iControl class.
F5 BIG-IP iControl API class.
Attributes:
api: iControl API instance.
"""
def __init__(self, host, user, password, session=False, validate_certs=True, port=443):
self.api = bigip_api(host, user, password, validate_certs, port)
if session:
self.start_session()
def start_session(self):
self.api = self.api.with_session_id()
def get_api(self):
return self.api
def set_recursive_query_state(self, state):
self.api.System.Session.set_recursive_query_state(state)
def get_recursive_query_state(self):
return self.api.System.Session.get_recursive_query_state()
def enable_recursive_query_state(self):
self.set_recursive_query_state('STATE_ENABLED')
def disable_recursive_query_state(self):
self.set_recursive_query_state('STATE_DISABLED')
def set_active_folder(self, folder):
self.api.System.Session.set_active_folder(folder=folder)
def get_active_folder(self):
return self.api.System.Session.get_active_folder()
class Interfaces(object):
"""Interfaces class.
F5 BIG-IP interfaces class.
Attributes:
api: iControl API instance.
interfaces: A list of BIG-IP interface names.
"""
def __init__(self, api, regex=None):
self.api = api
self.interfaces = api.Networking.Interfaces.get_list()
if regex:
re_filter = re.compile(regex)
self.interfaces = filter(re_filter.search, self.interfaces)
def get_list(self):
return self.interfaces
def get_active_media(self):
return self.api.Networking.Interfaces.get_active_media(self.interfaces)
def get_actual_flow_control(self):
return self.api.Networking.Interfaces.get_actual_flow_control(self.interfaces)
def get_bundle_state(self):
return self.api.Networking.Interfaces.get_bundle_state(self.interfaces)
def get_description(self):
return self.api.Networking.Interfaces.get_description(self.interfaces)
def get_dual_media_state(self):
return self.api.Networking.Interfaces.get_dual_media_state(self.interfaces)
def get_enabled_state(self):
return self.api.Networking.Interfaces.get_enabled_state(self.interfaces)
def get_if_index(self):
return self.api.Networking.Interfaces.get_if_index(self.interfaces)
def get_learning_mode(self):
return self.api.Networking.Interfaces.get_learning_mode(self.interfaces)
def get_lldp_admin_status(self):
return self.api.Networking.Interfaces.get_lldp_admin_status(self.interfaces)
def get_lldp_tlvmap(self):
return self.api.Networking.Interfaces.get_lldp_tlvmap(self.interfaces)
def get_mac_address(self):
return self.api.Networking.Interfaces.get_mac_address(self.interfaces)
def get_media(self):
return self.api.Networking.Interfaces.get_media(self.interfaces)
def get_media_option(self):
return self.api.Networking.Interfaces.get_media_option(self.interfaces)
def get_media_option_sfp(self):
return self.api.Networking.Interfaces.get_media_option_sfp(self.interfaces)
def get_media_sfp(self):
return self.api.Networking.Interfaces.get_media_sfp(self.interfaces)
def get_media_speed(self):
return self.api.Networking.Interfaces.get_media_speed(self.interfaces)
def get_media_status(self):
return self.api.Networking.Interfaces.get_media_status(self.interfaces)
def get_mtu(self):
return self.api.Networking.Interfaces.get_mtu(self.interfaces)
def get_phy_master_slave_mode(self):
return self.api.Networking.Interfaces.get_phy_master_slave_mode(self.interfaces)
def get_prefer_sfp_state(self):
return self.api.Networking.Interfaces.get_prefer_sfp_state(self.interfaces)
def get_flow_control(self):
return self.api.Networking.Interfaces.get_requested_flow_control(self.interfaces)
def get_sflow_poll_interval(self):
return self.api.Networking.Interfaces.get_sflow_poll_interval(self.interfaces)
def get_sflow_poll_interval_global(self):
return self.api.Networking.Interfaces.get_sflow_poll_interval_global(self.interfaces)
def get_sfp_media_state(self):
return self.api.Networking.Interfaces.get_sfp_media_state(self.interfaces)
def get_stp_active_edge_port_state(self):
return self.api.Networking.Interfaces.get_stp_active_edge_port_state(self.interfaces)
def get_stp_enabled_state(self):
return self.api.Networking.Interfaces.get_stp_enabled_state(self.interfaces)
def get_stp_link_type(self):
return self.api.Networking.Interfaces.get_stp_link_type(self.interfaces)
def get_stp_protocol_detection_reset_state(self):
return self.api.Networking.Interfaces.get_stp_protocol_detection_reset_state(self.interfaces)
class SelfIPs(object):
"""Self IPs class.
F5 BIG-IP Self IPs class.
Attributes:
api: iControl API instance.
self_ips: List of self IPs.
"""
def __init__(self, api, regex=None):
self.api = api
self.self_ips = api.Networking.SelfIPV2.get_list()
if regex:
re_filter = re.compile(regex)
self.self_ips = filter(re_filter.search, self.self_ips)
def get_list(self):
return self.self_ips
def get_address(self):
return self.api.Networking.SelfIPV2.get_address(self.self_ips)
def get_allow_access_list(self):
return self.api.Networking.SelfIPV2.get_allow_access_list(self.self_ips)
def get_description(self):
return self.api.Networking.SelfIPV2.get_description(self.self_ips)
def get_enforced_firewall_policy(self):
return self.api.Networking.SelfIPV2.get_enforced_firewall_policy(self.self_ips)
def get_floating_state(self):
return self.api.Networking.SelfIPV2.get_floating_state(self.self_ips)
def get_fw_rule(self):
return self.api.Networking.SelfIPV2.get_fw_rule(self.self_ips)
def get_netmask(self):
return self.api.Networking.SelfIPV2.get_netmask(self.self_ips)
def get_staged_firewall_policy(self):
return self.api.Networking.SelfIPV2.get_staged_firewall_policy(self.self_ips)
def get_traffic_group(self):
return self.api.Networking.SelfIPV2.get_traffic_group(self.self_ips)
def get_vlan(self):
return self.api.Networking.SelfIPV2.get_vlan(self.self_ips)
def get_is_traffic_group_inherited(self):
return self.api.Networking.SelfIPV2.is_traffic_group_inherited(self.self_ips)
class Trunks(object):
"""Trunks class.
F5 BIG-IP trunks class.
Attributes:
api: iControl API instance.
trunks: List of trunks.
"""
def __init__(self, api, regex=None):
self.api = api
self.trunks = api.Networking.Trunk.get_list()
if regex:
re_filter = re.compile(regex)
self.trunks = filter(re_filter.search, self.trunks)
def get_list(self):
return self.trunks
def get_active_lacp_state(self):
return self.api.Networking.Trunk.get_active_lacp_state(self.trunks)
def get_configured_member_count(self):
return self.api.Networking.Trunk.get_configured_member_count(self.trunks)
def get_description(self):
return self.api.Networking.Trunk.get_description(self.trunks)
def get_distribution_hash_option(self):
return self.api.Networking.Trunk.get_distribution_hash_option(self.trunks)
def get_interface(self):
return self.api.Networking.Trunk.get_interface(self.trunks)
def get_lacp_enabled_state(self):
return self.api.Networking.Trunk.get_lacp_enabled_state(self.trunks)
def get_lacp_timeout_option(self):
return self.api.Networking.Trunk.get_lacp_timeout_option(self.trunks)
def get_link_selection_policy(self):
return self.api.Networking.Trunk.get_link_selection_policy(self.trunks)
def get_media_speed(self):
return self.api.Networking.Trunk.get_media_speed(self.trunks)
def get_media_status(self):
return self.api.Networking.Trunk.get_media_status(self.trunks)
def get_operational_member_count(self):
return self.api.Networking.Trunk.get_operational_member_count(self.trunks)
def get_stp_enabled_state(self):
return self.api.Networking.Trunk.get_stp_enabled_state(self.trunks)
def get_stp_protocol_detection_reset_state(self):
return self.api.Networking.Trunk.get_stp_protocol_detection_reset_state(self.trunks)
class Vlans(object):
"""Vlans class.
F5 BIG-IP Vlans class.
Attributes:
api: iControl API instance.
vlans: List of VLANs.
"""
def __init__(self, api, regex=None):
self.api = api
self.vlans = api.Networking.VLAN.get_list()
if regex:
re_filter = re.compile(regex)
self.vlans = filter(re_filter.search, self.vlans)
def get_list(self):
return self.vlans
def get_auto_lasthop(self):
return self.api.Networking.VLAN.get_auto_lasthop(self.vlans)
def get_cmp_hash_algorithm(self):
return self.api.Networking.VLAN.get_cmp_hash_algorithm(self.vlans)
def get_description(self):
return self.api.Networking.VLAN.get_description(self.vlans)
def get_dynamic_forwarding(self):
return self.api.Networking.VLAN.get_dynamic_forwarding(self.vlans)
def get_failsafe_action(self):
return self.api.Networking.VLAN.get_failsafe_action(self.vlans)
def get_failsafe_state(self):
return self.api.Networking.VLAN.get_failsafe_state(self.vlans)
def get_failsafe_timeout(self):
return self.api.Networking.VLAN.get_failsafe_timeout(self.vlans)
def get_if_index(self):
return self.api.Networking.VLAN.get_if_index(self.vlans)
def get_learning_mode(self):
return self.api.Networking.VLAN.get_learning_mode(self.vlans)
def get_mac_masquerade_address(self):
return self.api.Networking.VLAN.get_mac_masquerade_address(self.vlans)
def get_member(self):
return self.api.Networking.VLAN.get_member(self.vlans)
def get_mtu(self):
return self.api.Networking.VLAN.get_mtu(self.vlans)
def get_sflow_poll_interval(self):
return self.api.Networking.VLAN.get_sflow_poll_interval(self.vlans)
def get_sflow_poll_interval_global(self):
return self.api.Networking.VLAN.get_sflow_poll_interval_global(self.vlans)
def get_sflow_sampling_rate(self):
return self.api.Networking.VLAN.get_sflow_sampling_rate(self.vlans)
def get_sflow_sampling_rate_global(self):
return self.api.Networking.VLAN.get_sflow_sampling_rate_global(self.vlans)
def get_source_check_state(self):
return self.api.Networking.VLAN.get_source_check_state(self.vlans)
def get_true_mac_address(self):
return self.api.Networking.VLAN.get_true_mac_address(self.vlans)
def get_vlan_id(self):
return self.api.Networking.VLAN.get_vlan_id(self.vlans)
class Software(object):
"""Software class.
F5 BIG-IP software class.
Attributes:
api: iControl API instance.
"""
def __init__(self, api):
self.api = api
def get_all_software_status(self):
return self.api.System.SoftwareManagement.get_all_software_status()
class VirtualServers(object):
"""Virtual servers class.
F5 BIG-IP virtual servers class.
Attributes:
api: iControl API instance.
virtual_servers: List of virtual servers.
"""
def __init__(self, api, regex=None):
self.api = api
self.virtual_servers = api.LocalLB.VirtualServer.get_list()
if regex:
re_filter = re.compile(regex)
self.virtual_servers = filter(re_filter.search, self.virtual_servers)
def get_list(self):
return self.virtual_servers
def get_name(self):
return [x[x.rfind('/') + 1:] for x in self.virtual_servers]
def get_actual_hardware_acceleration(self):
return self.api.LocalLB.VirtualServer.get_actual_hardware_acceleration(self.virtual_servers)
def get_authentication_profile(self):
return self.api.LocalLB.VirtualServer.get_authentication_profile(self.virtual_servers)
def get_auto_lasthop(self):
return self.api.LocalLB.VirtualServer.get_auto_lasthop(self.virtual_servers)
def get_bw_controller_policy(self):
return self.api.LocalLB.VirtualServer.get_bw_controller_policy(self.virtual_servers)
def get_clone_pool(self):
return self.api.LocalLB.VirtualServer.get_clone_pool(self.virtual_servers)
def get_cmp_enable_mode(self):
return self.api.LocalLB.VirtualServer.get_cmp_enable_mode(self.virtual_servers)
def get_connection_limit(self):
return self.api.LocalLB.VirtualServer.get_connection_limit(self.virtual_servers)
def get_connection_mirror_state(self):
return self.api.LocalLB.VirtualServer.get_connection_mirror_state(self.virtual_servers)
def get_default_pool_name(self):
return self.api.LocalLB.VirtualServer.get_default_pool_name(self.virtual_servers)
def get_description(self):
return self.api.LocalLB.VirtualServer.get_description(self.virtual_servers)
def get_destination(self):
return self.api.LocalLB.VirtualServer.get_destination_v2(self.virtual_servers)
def get_enabled_state(self):
return self.api.LocalLB.VirtualServer.get_enabled_state(self.virtual_servers)
def get_enforced_firewall_policy(self):
return self.api.LocalLB.VirtualServer.get_enforced_firewall_policy(self.virtual_servers)
def get_fallback_persistence_profile(self):
return self.api.LocalLB.VirtualServer.get_fallback_persistence_profile(self.virtual_servers)
def get_fw_rule(self):
return self.api.LocalLB.VirtualServer.get_fw_rule(self.virtual_servers)
def get_gtm_score(self):
return self.api.LocalLB.VirtualServer.get_gtm_score(self.virtual_servers)
def get_last_hop_pool(self):
return self.api.LocalLB.VirtualServer.get_last_hop_pool(self.virtual_servers)
def get_nat64_state(self):
return self.api.LocalLB.VirtualServer.get_nat64_state(self.virtual_servers)
def get_object_status(self):
return self.api.LocalLB.VirtualServer.get_object_status(self.virtual_servers)
def get_persistence_profile(self):
return self.api.LocalLB.VirtualServer.get_persistence_profile(self.virtual_servers)
def get_profile(self):
return self.api.LocalLB.VirtualServer.get_profile(self.virtual_servers)
def get_protocol(self):
return self.api.LocalLB.VirtualServer.get_protocol(self.virtual_servers)
def get_rate_class(self):
return self.api.LocalLB.VirtualServer.get_rate_class(self.virtual_servers)
def get_rate_limit(self):
return self.api.LocalLB.VirtualServer.get_rate_limit(self.virtual_servers)
def get_rate_limit_destination_mask(self):
return self.api.LocalLB.VirtualServer.get_rate_limit_destination_mask(self.virtual_servers)
def get_rate_limit_mode(self):
return self.api.LocalLB.VirtualServer.get_rate_limit_mode(self.virtual_servers)
def get_rate_limit_source_mask(self):
return self.api.LocalLB.VirtualServer.get_rate_limit_source_mask(self.virtual_servers)
def get_related_rule(self):
return self.api.LocalLB.VirtualServer.get_related_rule(self.virtual_servers)
def get_rule(self):
return self.api.LocalLB.VirtualServer.get_rule(self.virtual_servers)
def get_security_log_profile(self):
return self.api.LocalLB.VirtualServer.get_security_log_profile(self.virtual_servers)
def get_snat_pool(self):
return self.api.LocalLB.VirtualServer.get_snat_pool(self.virtual_servers)
def get_snat_type(self):
return self.api.LocalLB.VirtualServer.get_snat_type(self.virtual_servers)
def get_source_address(self):
return self.api.LocalLB.VirtualServer.get_source_address(self.virtual_servers)
def get_source_address_translation_lsn_pool(self):
return self.api.LocalLB.VirtualServer.get_source_address_translation_lsn_pool(self.virtual_servers)
def get_source_address_translation_snat_pool(self):
return self.api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(self.virtual_servers)
def get_source_address_translation_type(self):
return self.api.LocalLB.VirtualServer.get_source_address_translation_type(self.virtual_servers)
def get_source_port_behavior(self):
return self.api.LocalLB.VirtualServer.get_source_port_behavior(self.virtual_servers)
def get_staged_firewall_policy(self):
return self.api.LocalLB.VirtualServer.get_staged_firewall_policy(self.virtual_servers)
def get_translate_address_state(self):
return self.api.LocalLB.VirtualServer.get_translate_address_state(self.virtual_servers)
def get_translate_port_state(self):
return self.api.LocalLB.VirtualServer.get_translate_port_state(self.virtual_servers)
def get_type(self):
return self.api.LocalLB.VirtualServer.get_type(self.virtual_servers)
def get_vlan(self):
return self.api.LocalLB.VirtualServer.get_vlan(self.virtual_servers)
def get_wildmask(self):
return self.api.LocalLB.VirtualServer.get_wildmask(self.virtual_servers)
class Pools(object):
"""Pools class.
F5 BIG-IP pools class.
Attributes:
api: iControl API instance.
pool_names: List of pool names.
"""
def __init__(self, api, regex=None):
self.api = api
self.pool_names = api.LocalLB.Pool.get_list()
if regex:
re_filter = re.compile(regex)
self.pool_names = filter(re_filter.search, self.pool_names)
def get_list(self):
return self.pool_names
def get_name(self):
return [x[x.rfind('/') + 1:] for x in self.pool_names]
def get_action_on_service_down(self):
return self.api.LocalLB.Pool.get_action_on_service_down(self.pool_names)
def get_active_member_count(self):
return self.api.LocalLB.Pool.get_active_member_count(self.pool_names)
def get_aggregate_dynamic_ratio(self):
return self.api.LocalLB.Pool.get_aggregate_dynamic_ratio(self.pool_names)
def get_allow_nat_state(self):
return self.api.LocalLB.Pool.get_allow_nat_state(self.pool_names)
def get_allow_snat_state(self):
return self.api.LocalLB.Pool.get_allow_snat_state(self.pool_names)
def get_client_ip_tos(self):
return self.api.LocalLB.Pool.get_client_ip_tos(self.pool_names)
def get_client_link_qos(self):
return self.api.LocalLB.Pool.get_client_link_qos(self.pool_names)
def get_description(self):
return self.api.LocalLB.Pool.get_description(self.pool_names)
def get_gateway_failsafe_device(self):
return self.api.LocalLB.Pool.get_gateway_failsafe_device(self.pool_names)
def get_ignore_persisted_weight_state(self):
return self.api.LocalLB.Pool.get_ignore_persisted_weight_state(self.pool_names)
def get_lb_method(self):
result = []
lb_choice = dict(
LB_METHOD_DYNAMIC_RATIO_MEMBER='dynamic-ratio-member',
LB_METHOD_DYNAMIC_RATIO='dynamic-ratio-node',
LB_METHOD_FASTEST_APP_RESPONSE='fastest-app-response',
LB_METHOD_FASTEST_NODE_ADDRESS='fastest-node',
LB_METHOD_LEAST_CONNECTION_MEMBER='least-connections-member',
LB_METHOD_LEAST_CONNECTION_NODE_ADDRESS='least-connections-node',
LB_METHOD_LEAST_SESSIONS='least-sessions',
LB_METHOD_OBSERVED_MEMBER='observed-member',
LB_METHOD_OBSERVED_NODE_ADDRESS='observed-node',
LB_METHOD_PREDICTIVE_MEMBER='predictive-member',
LB_METHOD_PREDICTIVE_NODE_ADDRESS='predictive-node',
LB_METHOD_RATIO_LEAST_CONNECTION_MEMBER='ratio-least-connections-member',
LB_METHOD_RATIO_LEAST_CONNECTION_NODE_ADDRESS='ratio-least-connections-node',
LB_METHOD_RATIO_MEMBER='ratio-member',
LB_METHOD_RATIO_NODE_ADDRESS='ratio-node',
LB_METHOD_RATIO_SESSION='ratio-session',
LB_METHOD_ROUND_ROBIN='round-robin',
LB_METHOD_WEIGHTED_LEAST_CONNECTION_MEMBER='weighted-least-connections-member',
LB_METHOD_WEIGHTED_LEAST_CONNECTION_NODE_ADDRESS='weighted-least-connections-node'
)
methods = self.api.LocalLB.Pool.get_lb_method(self.pool_names)
for method in methods:
result.append(lb_choice.get(method, method))
return result
def get_member(self):
return self.api.LocalLB.Pool.get_member_v2(self.pool_names)
def get_minimum_active_member(self):
return self.api.LocalLB.Pool.get_minimum_active_member(self.pool_names)
def get_minimum_up_member(self):
return self.api.LocalLB.Pool.get_minimum_up_member(self.pool_names)
def get_minimum_up_member_action(self):
return self.api.LocalLB.Pool.get_minimum_up_member_action(self.pool_names)
def get_minimum_up_member_enabled_state(self):
return self.api.LocalLB.Pool.get_minimum_up_member_enabled_state(self.pool_names)
def get_monitor_association(self):
return self.api.LocalLB.Pool.get_monitor_association(self.pool_names)
def get_monitor_instance(self):
return self.api.LocalLB.Pool.get_monitor_instance(self.pool_names)
def get_object_status(self):
return self.api.LocalLB.Pool.get_object_status(self.pool_names)
def get_profile(self):
return self.api.LocalLB.Pool.get_profile(self.pool_names)
def get_queue_depth_limit(self):
return self.api.LocalLB.Pool.get_queue_depth_limit(self.pool_names)
def get_queue_on_connection_limit_state(self):
return self.api.LocalLB.Pool.get_queue_on_connection_limit_state(self.pool_names)
def get_queue_time_limit(self):
return self.api.LocalLB.Pool.get_queue_time_limit(self.pool_names)
def get_reselect_tries(self):
return self.api.LocalLB.Pool.get_reselect_tries(self.pool_names)
def get_server_ip_tos(self):
return self.api.LocalLB.Pool.get_server_ip_tos(self.pool_names)
def get_server_link_qos(self):
return self.api.LocalLB.Pool.get_server_link_qos(self.pool_names)
def get_simple_timeout(self):
return self.api.LocalLB.Pool.get_simple_timeout(self.pool_names)
def get_slow_ramp_time(self):
return self.api.LocalLB.Pool.get_slow_ramp_time(self.pool_names)
class Devices(object):
"""Devices class.
F5 BIG-IP devices class.
Attributes:
api: iControl API instance.
devices: List of devices.
"""
def __init__(self, api, regex=None):
self.api = api
self.devices = api.Management.Device.get_list()
if regex:
re_filter = re.compile(regex)
self.devices = filter(re_filter.search, self.devices)
def get_list(self):
return self.devices
def get_active_modules(self):
return self.api.Management.Device.get_active_modules(self.devices)
def get_base_mac_address(self):
return self.api.Management.Device.get_base_mac_address(self.devices)
def get_blade_addresses(self):
return self.api.Management.Device.get_blade_addresses(self.devices)
def get_build(self):
return self.api.Management.Device.get_build(self.devices)
def get_chassis_id(self):
return self.api.Management.Device.get_chassis_id(self.devices)
def get_chassis_type(self):
return self.api.Management.Device.get_chassis_type(self.devices)
def get_comment(self):
return self.api.Management.Device.get_comment(self.devices)
def get_configsync_address(self):
return self.api.Management.Device.get_configsync_address(self.devices)
def get_contact(self):
return self.api.Management.Device.get_contact(self.devices)
def get_description(self):
return self.api.Management.Device.get_description(self.devices)
def get_edition(self):
return self.api.Management.Device.get_edition(self.devices)
def get_failover_state(self):
return self.api.Management.Device.get_failover_state(self.devices)
def get_local_device(self):
return self.api.Management.Device.get_local_device()
def get_hostname(self):
return self.api.Management.Device.get_hostname(self.devices)
def get_inactive_modules(self):
return self.api.Management.Device.get_inactive_modules(self.devices)
def get_location(self):
return self.api.Management.Device.get_location(self.devices)
def get_management_address(self):
return self.api.Management.Device.get_management_address(self.devices)
def get_marketing_name(self):
return self.api.Management.Device.get_marketing_name(self.devices)
def get_multicast_address(self):
return self.api.Management.Device.get_multicast_address(self.devices)
def get_optional_modules(self):
return self.api.Management.Device.get_optional_modules(self.devices)
def get_platform_id(self):
return self.api.Management.Device.get_platform_id(self.devices)
def get_primary_mirror_address(self):
return self.api.Management.Device.get_primary_mirror_address(self.devices)
def get_product(self):
return self.api.Management.Device.get_product(self.devices)
def get_secondary_mirror_address(self):
return self.api.Management.Device.get_secondary_mirror_address(self.devices)
def get_software_version(self):
return self.api.Management.Device.get_software_version(self.devices)
def get_timelimited_modules(self):
return self.api.Management.Device.get_timelimited_modules(self.devices)
def get_timezone(self):
return self.api.Management.Device.get_timezone(self.devices)
def get_unicast_addresses(self):
return self.api.Management.Device.get_unicast_addresses(self.devices)
class DeviceGroups(object):
"""Device groups class.
F5 BIG-IP device groups class.
Attributes:
api: iControl API instance.
device_groups: List of device groups.
"""
def __init__(self, api, regex=None):
self.api = api
self.device_groups = api.Management.DeviceGroup.get_list()
if regex:
re_filter = re.compile(regex)
self.device_groups = filter(re_filter.search, self.device_groups)
def get_list(self):
return self.device_groups
def get_all_preferred_active(self):
return self.api.Management.DeviceGroup.get_all_preferred_active(self.device_groups)
def get_autosync_enabled_state(self):
return self.api.Management.DeviceGroup.get_autosync_enabled_state(self.device_groups)
def get_description(self):
return self.api.Management.DeviceGroup.get_description(self.device_groups)
def get_device(self):
return self.api.Management.DeviceGroup.get_device(self.device_groups)
def get_full_load_on_sync_state(self):
return self.api.Management.DeviceGroup.get_full_load_on_sync_state(self.device_groups)
def get_incremental_config_sync_size_maximum(self):
return self.api.Management.DeviceGroup.get_incremental_config_sync_size_maximum(self.device_groups)
def get_network_failover_enabled_state(self):
return self.api.Management.DeviceGroup.get_network_failover_enabled_state(self.device_groups)
def get_sync_status(self):
return self.api.Management.DeviceGroup.get_sync_status(self.device_groups)
def get_type(self):
return self.api.Management.DeviceGroup.get_type(self.device_groups)
class TrafficGroups(object):
"""Traffic groups class.
F5 BIG-IP traffic groups class.
Attributes:
api: iControl API instance.
traffic_groups: List of traffic groups.
"""
def __init__(self, api, regex=None):
self.api = api
self.traffic_groups = api.Management.TrafficGroup.get_list()
if regex:
re_filter = re.compile(regex)
self.traffic_groups = filter(re_filter.search, self.traffic_groups)
def get_list(self):
return self.traffic_groups
def get_auto_failback_enabled_state(self):
return self.api.Management.TrafficGroup.get_auto_failback_enabled_state(self.traffic_groups)
def get_auto_failback_time(self):
return self.api.Management.TrafficGroup.get_auto_failback_time(self.traffic_groups)
def get_default_device(self):
return self.api.Management.TrafficGroup.get_default_device(self.traffic_groups)
def get_description(self):
return self.api.Management.TrafficGroup.get_description(self.traffic_groups)
def get_ha_load_factor(self):
return self.api.Management.TrafficGroup.get_ha_load_factor(self.traffic_groups)
def get_ha_order(self):
return self.api.Management.TrafficGroup.get_ha_order(self.traffic_groups)
def get_is_floating(self):
return self.api.Management.TrafficGroup.get_is_floating(self.traffic_groups)
def get_mac_masquerade_address(self):
return self.api.Management.TrafficGroup.get_mac_masquerade_address(self.traffic_groups)
def get_unit_id(self):
return self.api.Management.TrafficGroup.get_unit_id(self.traffic_groups)
class Rules(object):
"""Rules class.
F5 BIG-IP iRules class.
Attributes:
api: iControl API instance.
rules: List of iRules.
"""
def __init__(self, api, regex=None):
self.api = api
self.rules = api.LocalLB.Rule.get_list()
if regex:
re_filter = re.compile(regex)
self.traffic_groups = filter(re_filter.search, self.rules)
def get_list(self):
return self.rules
def get_description(self):
return self.api.LocalLB.Rule.get_description(rule_names=self.rules)
def get_ignore_vertification(self):
return self.api.LocalLB.Rule.get_ignore_vertification(rule_names=self.rules)
def get_verification_status(self):
return self.api.LocalLB.Rule.get_verification_status_v2(rule_names=self.rules)
def get_definition(self):
return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)]
class Nodes(object):
"""Nodes class.
F5 BIG-IP nodes class.
Attributes:
api: iControl API instance.
nodes: List of nodes.
"""
def __init__(self, api, regex=None):
self.api = api
self.nodes = api.LocalLB.NodeAddressV2.get_list()
if regex:
re_filter = re.compile(regex)
self.nodes = filter(re_filter.search, self.nodes)
def get_list(self):
return self.nodes
def get_address(self):
return self.api.LocalLB.NodeAddressV2.get_address(nodes=self.nodes)
def get_name(self):
return [x[x.rfind('/') + 1:] for x in self.nodes]
def get_connection_limit(self):
return self.api.LocalLB.NodeAddressV2.get_connection_limit(nodes=self.nodes)
def get_description(self):
return self.api.LocalLB.NodeAddressV2.get_description(nodes=self.nodes)
def get_dynamic_ratio(self):
return self.api.LocalLB.NodeAddressV2.get_dynamic_ratio_v2(nodes=self.nodes)
def get_monitor_instance(self):
return self.api.LocalLB.NodeAddressV2.get_monitor_instance(nodes=self.nodes)
def get_monitor_rule(self):
return self.api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=self.nodes)
def get_monitor_status(self):
return self.api.LocalLB.NodeAddressV2.get_monitor_status(nodes=self.nodes)
def get_object_status(self):
return self.api.LocalLB.NodeAddressV2.get_object_status(nodes=self.nodes)
def get_rate_limit(self):
return self.api.LocalLB.NodeAddressV2.get_rate_limit(nodes=self.nodes)
def get_ratio(self):
return self.api.LocalLB.NodeAddressV2.get_ratio(nodes=self.nodes)
def get_session_status(self):
return self.api.LocalLB.NodeAddressV2.get_session_status(nodes=self.nodes)
class VirtualAddresses(object):
"""Virtual addresses class.
F5 BIG-IP virtual addresses class.
Attributes:
api: iControl API instance.
virtual_addresses: List of virtual addresses.
"""
def __init__(self, api, regex=None):
self.api = api
self.virtual_addresses = api.LocalLB.VirtualAddressV2.get_list()
if regex:
re_filter = re.compile(regex)
self.virtual_addresses = filter(re_filter.search, self.virtual_addresses)
def get_list(self):
return self.virtual_addresses
def get_address(self):
return self.api.LocalLB.VirtualAddressV2.get_address(self.virtual_addresses)
def get_arp_state(self):
return self.api.LocalLB.VirtualAddressV2.get_arp_state(self.virtual_addresses)
def get_auto_delete_state(self):
return self.api.LocalLB.VirtualAddressV2.get_auto_delete_state(self.virtual_addresses)
def get_connection_limit(self):
return self.api.LocalLB.VirtualAddressV2.get_connection_limit(self.virtual_addresses)
def get_description(self):
return self.api.LocalLB.VirtualAddressV2.get_description(self.virtual_addresses)
def get_enabled_state(self):
return self.api.LocalLB.VirtualAddressV2.get_enabled_state(self.virtual_addresses)
def get_icmp_echo_state(self):
return self.api.LocalLB.VirtualAddressV2.get_icmp_echo_state(self.virtual_addresses)
def get_is_floating_state(self):
return self.api.LocalLB.VirtualAddressV2.get_is_floating_state(self.virtual_addresses)
def get_netmask(self):
return self.api.LocalLB.VirtualAddressV2.get_netmask(self.virtual_addresses)
def get_object_status(self):
return self.api.LocalLB.VirtualAddressV2.get_object_status(self.virtual_addresses)
def get_route_advertisement_state(self):
return self.api.LocalLB.VirtualAddressV2.get_route_advertisement_state(self.virtual_addresses)
def get_traffic_group(self):
return self.api.LocalLB.VirtualAddressV2.get_traffic_group(self.virtual_addresses)
class AddressClasses(object):
"""Address group/class class.
F5 BIG-IP address group/class class.
Attributes:
api: iControl API instance.
address_classes: List of address classes.
"""
def __init__(self, api, regex=None):
self.api = api
self.address_classes = api.LocalLB.Class.get_address_class_list()
if regex:
re_filter = re.compile(regex)
self.address_classes = filter(re_filter.search, self.address_classes)
def get_list(self):
return self.address_classes
def get_address_class(self):
key = self.api.LocalLB.Class.get_address_class(self.address_classes)
value = self.api.LocalLB.Class.get_address_class_member_data_value(key)
result = list(map(zip, [x['members'] for x in key], value))
return result
def get_description(self):
return self.api.LocalLB.Class.get_description(self.address_classes)
class Certificates(object):
"""Certificates class.
F5 BIG-IP certificates class.
Attributes:
api: iControl API instance.
certificates: List of certificate identifiers.
certificate_list: List of certificate information structures.
"""
def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"):
self.api = api
self.certificate_list = api.Management.KeyCertificate.get_certificate_list(mode=mode)
self.certificates = [x['certificate']['cert_info']['id'] for x in self.certificate_list]
if regex:
re_filter = re.compile(regex)
self.certificates = filter(re_filter.search, self.certificates)
self.certificate_list = [x for x in self.certificate_list if x['certificate']['cert_info']['id'] in self.certificates]
def get_list(self):
return self.certificates
def get_certificate_list(self):
return self.certificate_list
class Keys(object):
"""Keys class.
F5 BIG-IP keys class.
Attributes:
api: iControl API instance.
keys: List of key identifiers.
key_list: List of key information structures.
"""
def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"):
self.api = api
self.key_list = api.Management.KeyCertificate.get_key_list(mode=mode)
self.keys = [x['key_info']['id'] for x in self.key_list]
if regex:
re_filter = re.compile(regex)
self.keys = filter(re_filter.search, self.keys)
self.key_list = [x for x in self.key_list if x['key_info']['id'] in self.keys]
def get_list(self):
return self.keys
def get_key_list(self):
return self.key_list
class ProfileClientSSL(object):
"""Client SSL profiles class.
F5 BIG-IP client SSL profiles class.
Attributes:
api: iControl API instance.
profiles: List of client SSL profiles.
"""
def __init__(self, api, regex=None):
self.api = api
self.profiles = api.LocalLB.ProfileClientSSL.get_list()
if regex:
re_filter = re.compile(regex)
self.profiles = filter(re_filter.search, self.profiles)
def get_list(self):
return self.profiles
def get_alert_timeout(self):
return self.api.LocalLB.ProfileClientSSL.get_alert_timeout(self.profiles)
def get_allow_nonssl_state(self):
return self.api.LocalLB.ProfileClientSSL.get_allow_nonssl_state(self.profiles)
def get_authenticate_depth(self):
return self.api.LocalLB.ProfileClientSSL.get_authenticate_depth(self.profiles)
def get_authenticate_once_state(self):
return self.api.LocalLB.ProfileClientSSL.get_authenticate_once_state(self.profiles)
def get_ca_file(self):
return self.api.LocalLB.ProfileClientSSL.get_ca_file_v2(self.profiles)
def get_cache_size(self):
return self.api.LocalLB.ProfileClientSSL.get_cache_size(self.profiles)
def get_cache_timeout(self):
return self.api.LocalLB.ProfileClientSSL.get_cache_timeout(self.profiles)
def get_certificate_file(self):
return self.api.LocalLB.ProfileClientSSL.get_certificate_file_v2(self.profiles)
def get_chain_file(self):
return self.api.LocalLB.ProfileClientSSL.get_chain_file_v2(self.profiles)
def get_cipher_list(self):
return self.api.LocalLB.ProfileClientSSL.get_cipher_list(self.profiles)
def get_client_certificate_ca_file(self):
return self.api.LocalLB.ProfileClientSSL.get_client_certificate_ca_file_v2(self.profiles)
def get_crl_file(self):
return self.api.LocalLB.ProfileClientSSL.get_crl_file_v2(self.profiles)
def get_default_profile(self):
return self.api.LocalLB.ProfileClientSSL.get_default_profile(self.profiles)
def get_description(self):
return self.api.LocalLB.ProfileClientSSL.get_description(self.profiles)
def get_forward_proxy_ca_certificate_file(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_certificate_file(self.profiles)
def get_forward_proxy_ca_key_file(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_key_file(self.profiles)
def get_forward_proxy_ca_passphrase(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_passphrase(self.profiles)
def get_forward_proxy_certificate_extension_include(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_extension_include(self.profiles)
def get_forward_proxy_certificate_lifespan(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_lifespan(self.profiles)
def get_forward_proxy_enabled_state(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_enabled_state(self.profiles)
def get_forward_proxy_lookup_by_ipaddr_port_state(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_lookup_by_ipaddr_port_state(self.profiles)
def get_handshake_timeout(self):
return self.api.LocalLB.ProfileClientSSL.get_handshake_timeout(self.profiles)
def get_key_file(self):
return self.api.LocalLB.ProfileClientSSL.get_key_file_v2(self.profiles)
def get_modssl_emulation_state(self):
return self.api.LocalLB.ProfileClientSSL.get_modssl_emulation_state(self.profiles)
def get_passphrase(self):
return self.api.LocalLB.ProfileClientSSL.get_passphrase(self.profiles)
def get_peer_certification_mode(self):
return self.api.LocalLB.ProfileClientSSL.get_peer_certification_mode(self.profiles)
def get_profile_mode(self):
return self.api.LocalLB.ProfileClientSSL.get_profile_mode(self.profiles)
def get_renegotiation_maximum_record_delay(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_maximum_record_delay(self.profiles)
def get_renegotiation_period(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_period(self.profiles)
def get_renegotiation_state(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_state(self.profiles)
def get_renegotiation_throughput(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_throughput(self.profiles)
def get_retain_certificate_state(self):
return self.api.LocalLB.ProfileClientSSL.get_retain_certificate_state(self.profiles)
def get_secure_renegotiation_mode(self):
return self.api.LocalLB.ProfileClientSSL.get_secure_renegotiation_mode(self.profiles)
def get_server_name(self):
return self.api.LocalLB.ProfileClientSSL.get_server_name(self.profiles)
def get_session_ticket_state(self):
return self.api.LocalLB.ProfileClientSSL.get_session_ticket_state(self.profiles)
def get_sni_default_state(self):
return self.api.LocalLB.ProfileClientSSL.get_sni_default_state(self.profiles)
def get_sni_require_state(self):
return self.api.LocalLB.ProfileClientSSL.get_sni_require_state(self.profiles)
def get_ssl_option(self):
return self.api.LocalLB.ProfileClientSSL.get_ssl_option(self.profiles)
def get_strict_resume_state(self):
return self.api.LocalLB.ProfileClientSSL.get_strict_resume_state(self.profiles)
def get_unclean_shutdown_state(self):
return self.api.LocalLB.ProfileClientSSL.get_unclean_shutdown_state(self.profiles)
def get_is_base_profile(self):
return self.api.LocalLB.ProfileClientSSL.is_base_profile(self.profiles)
def get_is_system_profile(self):
return self.api.LocalLB.ProfileClientSSL.is_system_profile(self.profiles)
class SystemInfo(object):
"""System information class.
F5 BIG-IP system information class.
Attributes:
api: iControl API instance.
"""
def __init__(self, api):
self.api = api
def get_base_mac_address(self):
return self.api.System.SystemInfo.get_base_mac_address()
def get_blade_temperature(self):
return self.api.System.SystemInfo.get_blade_temperature()
def get_chassis_slot_information(self):
return self.api.System.SystemInfo.get_chassis_slot_information()
def get_globally_unique_identifier(self):
return self.api.System.SystemInfo.get_globally_unique_identifier()
def get_group_id(self):
return self.api.System.SystemInfo.get_group_id()
def get_hardware_information(self):
return self.api.System.SystemInfo.get_hardware_information()
def get_marketing_name(self):
return self.api.System.SystemInfo.get_marketing_name()
def get_product_information(self):
return self.api.System.SystemInfo.get_product_information()
def get_pva_version(self):
return self.api.System.SystemInfo.get_pva_version()
def get_system_id(self):
return self.api.System.SystemInfo.get_system_id()
def get_system_information(self):
return self.api.System.SystemInfo.get_system_information()
def get_time(self):
return self.api.System.SystemInfo.get_time()
def get_time_zone(self):
return self.api.System.SystemInfo.get_time_zone()
def get_uptime(self):
return self.api.System.SystemInfo.get_uptime()
class ProvisionInfo(object):
"""Provision information class.
F5 BIG-IP provision information class.
Attributes:
api: iControl API instance.
"""
def __init__(self, api):
self.api = api
def get_list(self):
result = []
list = self.api.Management.Provision.get_list()
for item in list:
item = item.lower().replace('tmos_module_', '')
result.append(item)
return result
def get_provisioned_list(self):
result = []
list = self.api.Management.Provision.get_provisioned_list()
for item in list:
item = item.lower().replace('tmos_module_', '')
result.append(item)
return result
def generate_dict(api_obj, fields):
result_dict = {}
lists = []
supported_fields = []
if api_obj.get_list():
for field in fields:
try:
api_response = getattr(api_obj, "get_" + field)()
except (MethodNotFound, WebFault):
pass
else:
lists.append(api_response)
supported_fields.append(field)
for i, j in enumerate(api_obj.get_list()):
temp = {}
temp.update([(item[0], item[1][i]) for item in zip(supported_fields, lists)])
result_dict[j] = temp
return result_dict
def generate_simple_dict(api_obj, fields):
result_dict = {}
for field in fields:
try:
api_response = getattr(api_obj, "get_" + field)()
except (MethodNotFound, WebFault):
pass
else:
result_dict[field] = api_response
return result_dict
def generate_interface_dict(f5, regex):
interfaces = Interfaces(f5.get_api(), regex)
fields = ['active_media', 'actual_flow_control', 'bundle_state',
'description', 'dual_media_state', 'enabled_state', 'if_index',
'learning_mode', 'lldp_admin_status', 'lldp_tlvmap',
'mac_address', 'media', 'media_option', 'media_option_sfp',
'media_sfp', 'media_speed', 'media_status', 'mtu',
'phy_master_slave_mode', 'prefer_sfp_state', 'flow_control',
'sflow_poll_interval', 'sflow_poll_interval_global',
'sfp_media_state', 'stp_active_edge_port_state',
'stp_enabled_state', 'stp_link_type',
'stp_protocol_detection_reset_state']
return generate_dict(interfaces, fields)
def generate_self_ip_dict(f5, regex):
self_ips = SelfIPs(f5.get_api(), regex)
fields = ['address', 'allow_access_list', 'description',
'enforced_firewall_policy', 'floating_state', 'fw_rule',
'netmask', 'staged_firewall_policy', 'traffic_group',
'vlan', 'is_traffic_group_inherited']
return generate_dict(self_ips, fields)
def generate_trunk_dict(f5, regex):
trunks = Trunks(f5.get_api(), regex)
fields = ['active_lacp_state', 'configured_member_count', 'description',
'distribution_hash_option', 'interface', 'lacp_enabled_state',
'lacp_timeout_option', 'link_selection_policy', 'media_speed',
'media_status', 'operational_member_count', 'stp_enabled_state',
'stp_protocol_detection_reset_state']
return generate_dict(trunks, fields)
def generate_vlan_dict(f5, regex):
vlans = Vlans(f5.get_api(), regex)
fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description',
'dynamic_forwarding', 'failsafe_action', 'failsafe_state',
'failsafe_timeout', 'if_index', 'learning_mode',
'mac_masquerade_address', 'member', 'mtu',
'sflow_poll_interval', 'sflow_poll_interval_global',
'sflow_sampling_rate', 'sflow_sampling_rate_global',
'source_check_state', 'true_mac_address', 'vlan_id']
return generate_dict(vlans, fields)
def generate_vs_dict(f5, regex):
virtual_servers = VirtualServers(f5.get_api(), regex)
fields = ['actual_hardware_acceleration', 'authentication_profile',
'auto_lasthop', 'bw_controller_policy', 'clone_pool',
'cmp_enable_mode', 'connection_limit', 'connection_mirror_state',
'default_pool_name', 'description', 'destination',
'enabled_state', 'enforced_firewall_policy',
'fallback_persistence_profile', 'fw_rule', 'gtm_score',
'last_hop_pool', 'nat64_state', 'object_status',
'persistence_profile', 'profile', 'protocol',
'rate_class', 'rate_limit', 'rate_limit_destination_mask',
'rate_limit_mode', 'rate_limit_source_mask', 'related_rule',
'rule', 'security_log_profile', 'snat_pool', 'snat_type',
'source_address', 'source_address_translation_lsn_pool',
'source_address_translation_snat_pool',
'source_address_translation_type', 'source_port_behavior',
'staged_firewall_policy', 'translate_address_state',
'translate_port_state', 'type', 'vlan', 'wildmask',
'name']
return generate_dict(virtual_servers, fields)
def generate_pool_dict(f5, regex):
pools = Pools(f5.get_api(), regex)
fields = ['action_on_service_down', 'active_member_count',
'aggregate_dynamic_ratio', 'allow_nat_state',
'allow_snat_state', 'client_ip_tos', 'client_link_qos',
'description', 'gateway_failsafe_device',
'ignore_persisted_weight_state', 'lb_method', 'member',
'minimum_active_member', 'minimum_up_member',
'minimum_up_member_action', 'minimum_up_member_enabled_state',
'monitor_association', 'monitor_instance', 'object_status',
'profile', 'queue_depth_limit',
'queue_on_connection_limit_state', 'queue_time_limit',
'reselect_tries', 'server_ip_tos', 'server_link_qos',
'simple_timeout', 'slow_ramp_time', 'name']
return generate_dict(pools, fields)
def generate_device_dict(f5, regex):
devices = Devices(f5.get_api(), regex)
fields = ['active_modules', 'base_mac_address', 'blade_addresses',
'build', 'chassis_id', 'chassis_type', 'comment',
'configsync_address', 'contact', 'description', 'edition',
'failover_state', 'hostname', 'inactive_modules', 'location',
'management_address', 'marketing_name', 'multicast_address',
'optional_modules', 'platform_id', 'primary_mirror_address',
'product', 'secondary_mirror_address', 'software_version',
'timelimited_modules', 'timezone', 'unicast_addresses']
return generate_dict(devices, fields)
def generate_device_group_dict(f5, regex):
device_groups = DeviceGroups(f5.get_api(), regex)
fields = ['all_preferred_active', 'autosync_enabled_state', 'description',
'device', 'full_load_on_sync_state',
'incremental_config_sync_size_maximum',
'network_failover_enabled_state', 'sync_status', 'type']
return generate_dict(device_groups, fields)
def generate_traffic_group_dict(f5, regex):
traffic_groups = TrafficGroups(f5.get_api(), regex)
fields = ['auto_failback_enabled_state', 'auto_failback_time',
'default_device', 'description', 'ha_load_factor',
'ha_order', 'is_floating', 'mac_masquerade_address',
'unit_id']
return generate_dict(traffic_groups, fields)
def generate_rule_dict(f5, regex):
rules = Rules(f5.get_api(), regex)
fields = ['definition', 'description', 'ignore_vertification',
'verification_status']
return generate_dict(rules, fields)
def generate_node_dict(f5, regex):
nodes = Nodes(f5.get_api(), regex)
fields = ['name', 'address', 'connection_limit', 'description', 'dynamic_ratio',
'monitor_instance', 'monitor_rule', 'monitor_status',
'object_status', 'rate_limit', 'ratio', 'session_status']
return generate_dict(nodes, fields)
def generate_virtual_address_dict(f5, regex):
virtual_addresses = VirtualAddresses(f5.get_api(), regex)
fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit',
'description', 'enabled_state', 'icmp_echo_state',
'is_floating_state', 'netmask', 'object_status',
'route_advertisement_state', 'traffic_group']
return generate_dict(virtual_addresses, fields)
def generate_address_class_dict(f5, regex):
address_classes = AddressClasses(f5.get_api(), regex)
fields = ['address_class', 'description']
return generate_dict(address_classes, fields)
def generate_certificate_dict(f5, regex):
certificates = Certificates(f5.get_api(), regex)
return dict(zip(certificates.get_list(), certificates.get_certificate_list()))
def generate_key_dict(f5, regex):
keys = Keys(f5.get_api(), regex)
return dict(zip(keys.get_list(), keys.get_key_list()))
def generate_client_ssl_profile_dict(f5, regex):
profiles = ProfileClientSSL(f5.get_api(), regex)
fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth',
'authenticate_once_state', 'ca_file', 'cache_size',
'cache_timeout', 'certificate_file', 'chain_file',
'cipher_list', 'client_certificate_ca_file', 'crl_file',
'default_profile', 'description',
'forward_proxy_ca_certificate_file', 'forward_proxy_ca_key_file',
'forward_proxy_ca_passphrase',
'forward_proxy_certificate_extension_include',
'forward_proxy_certificate_lifespan',
'forward_proxy_enabled_state',
'forward_proxy_lookup_by_ipaddr_port_state', 'handshake_timeout',
'key_file', 'modssl_emulation_state', 'passphrase',
'peer_certification_mode', 'profile_mode',
'renegotiation_maximum_record_delay', 'renegotiation_period',
'renegotiation_state', 'renegotiation_throughput',
'retain_certificate_state', 'secure_renegotiation_mode',
'server_name', 'session_ticket_state', 'sni_default_state',
'sni_require_state', 'ssl_option', 'strict_resume_state',
'unclean_shutdown_state', 'is_base_profile', 'is_system_profile']
return generate_dict(profiles, fields)
def generate_system_info_dict(f5):
system_info = SystemInfo(f5.get_api())
fields = ['base_mac_address',
'blade_temperature', 'chassis_slot_information',
'globally_unique_identifier', 'group_id',
'hardware_information',
'marketing_name',
'product_information', 'pva_version', 'system_id',
'system_information', 'time',
'time_zone', 'uptime']
return generate_simple_dict(system_info, fields)
def generate_software_list(f5):
software = Software(f5.get_api())
software_list = software.get_all_software_status()
return software_list
def generate_provision_dict(f5):
provisioned = ProvisionInfo(f5.get_api())
fields = ['list', 'provisioned_list']
return generate_simple_dict(provisioned, fields)
def main():
argument_spec = f5_argument_spec
meta_args = dict(
session=dict(type='bool', default=False),
include=dict(type='list', required=True),
filter=dict(type='str', required=False),
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec
)
if not bigsuds_found:
module.fail_json(msg="the python suds and bigsuds modules are required")
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
validate_certs = module.params['validate_certs']
session = module.params['session']
fact_filter = module.params['filter']
if validate_certs:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(
msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task'
)
if fact_filter:
regex = fnmatch.translate(fact_filter)
else:
regex = None
include = [x.lower() for x in module.params['include']]
valid_includes = ('address_class', 'certificate', 'client_ssl_profile',
'device', 'device_group', 'interface', 'key', 'node',
'pool', 'provision', 'rule', 'self_ip', 'software',
'system_info', 'traffic_group', 'trunk',
'virtual_address', 'virtual_server', 'vlan')
include_test = (x in valid_includes for x in include)
if not all(include_test):
module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include)))
try:
facts = {}
if len(include) > 0:
f5 = F5(server, user, password, session, validate_certs, server_port)
saved_active_folder = f5.get_active_folder()
saved_recursive_query_state = f5.get_recursive_query_state()
if saved_active_folder != "/":
f5.set_active_folder("/")
if saved_recursive_query_state != "STATE_ENABLED":
f5.enable_recursive_query_state()
if 'interface' in include:
facts['interface'] = generate_interface_dict(f5, regex)
if 'self_ip' in include:
facts['self_ip'] = generate_self_ip_dict(f5, regex)
if 'trunk' in include:
facts['trunk'] = generate_trunk_dict(f5, regex)
if 'vlan' in include:
facts['vlan'] = generate_vlan_dict(f5, regex)
if 'virtual_server' in include:
facts['virtual_server'] = generate_vs_dict(f5, regex)
if 'pool' in include:
facts['pool'] = generate_pool_dict(f5, regex)
if 'provision' in include:
facts['provision'] = generate_provision_dict(f5)
if 'device' in include:
facts['device'] = generate_device_dict(f5, regex)
if 'device_group' in include:
facts['device_group'] = generate_device_group_dict(f5, regex)
if 'traffic_group' in include:
facts['traffic_group'] = generate_traffic_group_dict(f5, regex)
if 'rule' in include:
facts['rule'] = generate_rule_dict(f5, regex)
if 'node' in include:
facts['node'] = generate_node_dict(f5, regex)
if 'virtual_address' in include:
facts['virtual_address'] = generate_virtual_address_dict(f5, regex)
if 'address_class' in include:
facts['address_class'] = generate_address_class_dict(f5, regex)
if 'software' in include:
facts['software'] = generate_software_list(f5)
if 'certificate' in include:
facts['certificate'] = generate_certificate_dict(f5, regex)
if 'key' in include:
facts['key'] = generate_key_dict(f5, regex)
if 'client_ssl_profile' in include:
facts['client_ssl_profile'] = generate_client_ssl_profile_dict(f5, regex)
if 'system_info' in include:
facts['system_info'] = generate_system_info_dict(f5)
# restore saved state
if saved_active_folder and saved_active_folder != "/":
f5.set_active_folder(saved_active_folder)
if saved_recursive_query_state and \
saved_recursive_query_state != "STATE_ENABLED":
f5.set_recursive_query_state(saved_recursive_query_state)
result = dict(
ansible_facts=facts,
)
result.update(**facts)
except Exception as e:
module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc()))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
SCSSG/Odoo-SCS | addons/account/wizard/account_validate_account_move.py | 381 | 3203 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class validate_account_move(osv.osv_memory):
_name = "validate.account.move"
_description = "Validate Account Move"
_columns = {
'journal_ids': fields.many2many('account.journal', 'wizard_validate_account_move_journal', 'wizard_id', 'journal_id', 'Journal', required=True),
'period_ids': fields.many2many('account.period', 'wizard_validate_account_move_period', 'wizard_id', 'period_id', 'Period', required=True, domain=[('state','<>','done')]),
}
def validate_move(self, cr, uid, ids, context=None):
obj_move = self.pool.get('account.move')
if context is None:
context = {}
data = self.read(cr, uid, ids[0], context=context)
ids_move = obj_move.search(cr, uid, [('state','=','draft'),('journal_id','in',tuple(data['journal_ids'])),('period_id','in',tuple(data['period_ids']))], order='date')
if not ids_move:
raise osv.except_osv(_('Warning!'), _('Specified journals do not have any account move entries in draft state for the specified periods.'))
obj_move.button_validate(cr, uid, ids_move, context=context)
return {'type': 'ir.actions.act_window_close'}
class validate_account_move_lines(osv.osv_memory):
_name = "validate.account.move.lines"
_description = "Validate Account Move Lines"
def validate_move_lines(self, cr, uid, ids, context=None):
obj_move_line = self.pool.get('account.move.line')
obj_move = self.pool.get('account.move')
move_ids = []
if context is None:
context = {}
data_line = obj_move_line.browse(cr, uid, context['active_ids'], context)
for line in data_line:
if line.move_id.state=='draft':
move_ids.append(line.move_id.id)
move_ids = list(set(move_ids))
if not move_ids:
raise osv.except_osv(_('Warning!'), _('Selected Entry Lines does not have any account move entries in draft state.'))
obj_move.button_validate(cr, uid, move_ids, context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AlexOugh/horizon | horizon/messages.py | 85 | 3425 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Drop-in replacement for django.contrib.messages which handles Horizon's
messaging needs (e.g. AJAX communication, etc.).
"""
from django.contrib import messages as _messages
from django.contrib.messages import constants
from django.utils.encoding import force_text
from django.utils.safestring import SafeData # noqa
def horizon_message_already_queued(request, message):
_message = force_text(message)
if request.is_ajax():
for tag, msg, extra in request.horizon['async_messages']:
if _message == msg:
return True
else:
for msg in _messages.get_messages(request)._queued_messages:
if msg.message == _message:
return True
return False
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""Attempts to add a message to the request using the 'messages' app."""
if not horizon_message_already_queued(request, message):
if request.is_ajax():
tag = constants.DEFAULT_TAGS[level]
# if message is marked as safe, pass "safe" tag as extra_tags so
# that client can skip HTML escape for the message when rendering
if isinstance(message, SafeData):
extra_tags = extra_tags + ' safe'
request.horizon['async_messages'].append([tag,
force_text(message),
extra_tags])
else:
return _messages.add_message(request, level, message,
extra_tags, fail_silently)
def debug(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``DEBUG`` level."""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``INFO`` level."""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``SUCCESS`` level."""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``WARNING`` level."""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``ERROR`` level."""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
| apache-2.0 |
meganbkratz/acq4 | acq4/devices/Laser/taskTemplate.py | 3 | 5816 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './acq4/devices/Laser/taskTemplate.ui'
#
# Created: Tue Dec 24 01:49:07 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(218, 190)
self.gridLayout_2 = QtGui.QGridLayout(Form)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 0, 0, 1, 1)
self.outputPowerLabel = QtGui.QLabel(Form)
self.outputPowerLabel.setObjectName(_fromUtf8("outputPowerLabel"))
self.gridLayout_2.addWidget(self.outputPowerLabel, 0, 1, 1, 1)
self.checkPowerBtn = QtGui.QPushButton(Form)
self.checkPowerBtn.setObjectName(_fromUtf8("checkPowerBtn"))
self.gridLayout_2.addWidget(self.checkPowerBtn, 0, 2, 1, 1)
self.label_3 = QtGui.QLabel(Form)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 1, 0, 1, 1)
self.samplePowerLabel = QtGui.QLabel(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.samplePowerLabel.setFont(font)
self.samplePowerLabel.setObjectName(_fromUtf8("samplePowerLabel"))
self.gridLayout_2.addWidget(self.samplePowerLabel, 1, 1, 1, 1)
self.wavelengthWidget = QtGui.QWidget(Form)
self.wavelengthWidget.setObjectName(_fromUtf8("wavelengthWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.wavelengthWidget)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.setWavelengthCheck = QtGui.QCheckBox(self.wavelengthWidget)
self.setWavelengthCheck.setObjectName(_fromUtf8("setWavelengthCheck"))
self.horizontalLayout.addWidget(self.setWavelengthCheck)
self.wavelengthSpin = QtGui.QSpinBox(self.wavelengthWidget)
self.wavelengthSpin.setMaximum(4000)
self.wavelengthSpin.setSingleStep(10)
self.wavelengthSpin.setProperty("value", 1080)
self.wavelengthSpin.setObjectName(_fromUtf8("wavelengthSpin"))
self.horizontalLayout.addWidget(self.wavelengthSpin)
self.gridLayout_2.addWidget(self.wavelengthWidget, 4, 0, 1, 3)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setSpacing(0)
self.gridLayout.setContentsMargins(3, 0, 3, 3)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.powerWaveRadio = QtGui.QRadioButton(self.groupBox)
self.powerWaveRadio.setChecked(True)
self.powerWaveRadio.setObjectName(_fromUtf8("powerWaveRadio"))
self.gridLayout.addWidget(self.powerWaveRadio, 0, 0, 1, 1)
self.switchWaveRadio = QtGui.QRadioButton(self.groupBox)
self.switchWaveRadio.setObjectName(_fromUtf8("switchWaveRadio"))
self.gridLayout.addWidget(self.switchWaveRadio, 1, 0, 1, 1)
self.gridLayout_2.addWidget(self.groupBox, 5, 0, 1, 3)
self.adjustLengthCheck = QtGui.QCheckBox(Form)
self.adjustLengthCheck.setChecked(True)
self.adjustLengthCheck.setTristate(False)
self.adjustLengthCheck.setObjectName(_fromUtf8("adjustLengthCheck"))
self.gridLayout_2.addWidget(self.adjustLengthCheck, 3, 0, 1, 3)
self.checkPowerCheck = QtGui.QCheckBox(Form)
self.checkPowerCheck.setChecked(True)
self.checkPowerCheck.setObjectName(_fromUtf8("checkPowerCheck"))
self.gridLayout_2.addWidget(self.checkPowerCheck, 2, 0, 1, 3)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.label_2.setText(_translate("Form", "Output Power:", None))
self.outputPowerLabel.setText(_translate("Form", "0mW", None))
self.checkPowerBtn.setText(_translate("Form", "Check Power", None))
self.label_3.setText(_translate("Form", "Power at Sample:", None))
self.samplePowerLabel.setText(_translate("Form", "0mW", None))
self.setWavelengthCheck.setText(_translate("Form", "Set wavelength", None))
self.wavelengthSpin.setSuffix(_translate("Form", " nm", None))
self.groupBox.setTitle(_translate("Form", "Control Mode:", None))
self.powerWaveRadio.setText(_translate("Form", "Power waveform (W)", None))
self.switchWaveRadio.setText(_translate("Form", "Switch waveform (%)", None))
self.adjustLengthCheck.setToolTip(_translate("Form", "If the output power of the laser changes, adjust the length of laser pulses to maintain constant pulse energy.", None))
self.adjustLengthCheck.setText(_translate("Form", "Adjust pulse length if power changes", None))
self.checkPowerCheck.setText(_translate("Form", "Check power before task start", None))
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.