repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
kursitet/edx-platform
|
refs/heads/master
|
lms/djangoapps/lti_provider/migrations/__init__.py
|
12133432
| |
yanheven/neutron
|
refs/heads/master
|
neutron/tests/unit/agent/linux/test_external_process.py
|
10
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os.path
from neutron.agent.linux import external_process as ep
from neutron.common import utils as common_utils
from neutron.tests import base
from neutron.tests import tools
TEST_UUID = 'test-uuid'
TEST_SERVICE = 'testsvc'
TEST_PID = 1234
class BaseTestProcessMonitor(base.BaseTestCase):
def setUp(self):
super(BaseTestProcessMonitor, self).setUp()
self.log_patch = mock.patch("neutron.agent.linux.external_process."
"LOG.error")
self.error_log = self.log_patch.start()
self.spawn_patch = mock.patch("eventlet.spawn")
self.eventlent_spawn = self.spawn_patch.start()
# create a default process monitor
self.create_child_process_monitor('respawn')
def create_child_process_monitor(self, action):
conf = mock.Mock()
conf.AGENT.check_child_processes_action = action
conf.AGENT.check_child_processes = True
self.pmonitor = ep.ProcessMonitor(
config=conf,
resource_type='test')
def get_monitored_process(self, uuid, service=None):
monitored_process = mock.Mock()
self.pmonitor.register(uuid=uuid,
service_name=service,
monitored_process=monitored_process)
return monitored_process
class TestProcessMonitor(BaseTestProcessMonitor):
def test_error_logged(self):
pm = self.get_monitored_process(TEST_UUID)
pm.active = False
self.pmonitor._check_child_processes()
self.assertTrue(self.error_log.called)
def test_exit_handler(self):
self.create_child_process_monitor('exit')
pm = self.get_monitored_process(TEST_UUID)
pm.active = False
with mock.patch.object(ep.ProcessMonitor,
'_exit_handler') as exit_handler:
self.pmonitor._check_child_processes()
exit_handler.assert_called_once_with(TEST_UUID, None)
def test_register(self):
pm = self.get_monitored_process(TEST_UUID)
self.assertEqual(len(self.pmonitor._monitored_processes), 1)
self.assertIn(pm, self.pmonitor._monitored_processes.values())
def test_register_same_service_twice(self):
self.get_monitored_process(TEST_UUID)
self.get_monitored_process(TEST_UUID)
self.assertEqual(len(self.pmonitor._monitored_processes), 1)
def test_register_different_service_types(self):
self.get_monitored_process(TEST_UUID)
self.get_monitored_process(TEST_UUID, TEST_SERVICE)
self.assertEqual(len(self.pmonitor._monitored_processes), 2)
def test_unregister(self):
self.get_monitored_process(TEST_UUID)
self.pmonitor.unregister(TEST_UUID, None)
self.assertEqual(len(self.pmonitor._monitored_processes), 0)
def test_unregister_unknown_process(self):
self.pmonitor.unregister(TEST_UUID, None)
self.assertEqual(len(self.pmonitor._monitored_processes), 0)
class TestProcessManager(base.BaseTestCase):
def setUp(self):
super(TestProcessManager, self).setUp()
self.execute_p = mock.patch('neutron.agent.common.utils.execute')
self.execute = self.execute_p.start()
self.delete_if_exists = mock.patch(
'oslo_utils.fileutils.delete_if_exists').start()
self.ensure_dir = mock.patch.object(
common_utils, 'ensure_dir').start()
self.conf = mock.Mock()
self.conf.external_pids = '/var/path'
def test_processmanager_ensures_pid_dir(self):
pid_file = os.path.join(self.conf.external_pids, 'pid')
ep.ProcessManager(self.conf, 'uuid', pid_file=pid_file)
self.ensure_dir.assert_called_once_with(self.conf.external_pids)
def test_enable_no_namespace(self):
callback = mock.Mock()
callback.return_value = ['the', 'cmd']
with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name:
name.return_value = 'pidfile'
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=False)
manager = ep.ProcessManager(self.conf, 'uuid')
manager.enable(callback)
callback.assert_called_once_with('pidfile')
self.execute.assert_called_once_with(['the', 'cmd'],
check_exit_code=True,
extra_ok_codes=None,
run_as_root=False,
log_fail_as_error=True)
def test_enable_with_namespace(self):
callback = mock.Mock()
callback.return_value = ['the', 'cmd']
with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name:
name.return_value = 'pidfile'
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=False)
manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
with mock.patch.object(ep, 'ip_lib') as ip_lib:
manager.enable(callback)
callback.assert_called_once_with('pidfile')
ip_lib.assert_has_calls([
mock.call.IPWrapper(namespace='ns'),
mock.call.IPWrapper().netns.execute(
['the', 'cmd'], addl_env=None, run_as_root=False)])
def test_enable_with_namespace_process_active(self):
callback = mock.Mock()
callback.return_value = ['the', 'cmd']
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=True)
manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
with mock.patch.object(ep, 'ip_lib'):
manager.enable(callback)
self.assertFalse(callback.called)
def test_disable_no_namespace(self):
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=True)
manager = ep.ProcessManager(self.conf, 'uuid')
with mock.patch.object(ep, 'utils') as utils:
manager.disable()
utils.assert_has_calls([
mock.call.execute(['kill', '-9', 4],
run_as_root=True)])
def test_disable_namespace(self):
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=True)
manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
with mock.patch.object(ep, 'utils') as utils:
manager.disable()
utils.assert_has_calls([
mock.call.execute(['kill', '-9', 4],
run_as_root=True)])
def test_disable_not_active(self):
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=False)
with mock.patch.object(ep.LOG, 'debug') as debug:
manager = ep.ProcessManager(self.conf, 'uuid')
manager.disable()
debug.assert_called_once_with(mock.ANY, mock.ANY)
def test_disable_no_pid(self):
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=None)
with mock.patch.object(ep.ProcessManager, 'active') as active:
active.__get__ = mock.Mock(return_value=False)
with mock.patch.object(ep.LOG, 'debug') as debug:
manager = ep.ProcessManager(self.conf, 'uuid')
manager.disable()
debug.assert_called_once_with(mock.ANY, mock.ANY)
def test_get_pid_file_name_default(self):
manager = ep.ProcessManager(self.conf, 'uuid')
retval = manager.get_pid_file_name()
self.assertEqual(retval, '/var/path/uuid.pid')
def test_pid(self):
self.useFixture(tools.OpenFixture('/var/path/uuid.pid', '5'))
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertEqual(manager.pid, 5)
def test_pid_no_an_int(self):
self.useFixture(tools.OpenFixture('/var/path/uuid.pid', 'foo'))
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertIsNone(manager.pid)
def test_pid_invalid_file(self):
with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name:
name.return_value = '.doesnotexist/pid'
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertIsNone(manager.pid)
def test_active(self):
mock_open = self.useFixture(
tools.OpenFixture('/proc/4/cmdline', 'python foo --router_id=uuid')
).mock_open
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertTrue(manager.active)
mock_open.assert_called_once_with('/proc/4/cmdline', 'r')
def test_active_none(self):
dummy_cmd_line = 'python foo --router_id=uuid'
self.execute.return_value = dummy_cmd_line
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=None)
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertFalse(manager.active)
def test_active_cmd_mismatch(self):
mock_open = self.useFixture(
tools.OpenFixture('/proc/4/cmdline',
'python foo --router_id=anotherid')
).mock_open
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertFalse(manager.active)
mock_open.assert_called_once_with('/proc/4/cmdline', 'r')
|
dariemp/odoo
|
refs/heads/8.0
|
addons/payment_buckaroo/models/buckaroo.py
|
209
|
# -*- coding: utf-'8' "-*-"
from hashlib import sha1
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_buckaroo.controllers.main import BuckarooController
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class AcquirerBuckaroo(osv.Model):
_inherit = 'payment.acquirer'
def _get_buckaroo_urls(self, cr, uid, environment, context=None):
""" Buckaroo URLs
"""
if environment == 'prod':
return {
'buckaroo_form_url': 'https://checkout.buckaroo.nl/html/',
}
else:
return {
'buckaroo_form_url': 'https://testcheckout.buckaroo.nl/html/',
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerBuckaroo, self)._get_providers(cr, uid, context=context)
providers.append(['buckaroo', 'Buckaroo'])
return providers
_columns = {
'brq_websitekey': fields.char('WebsiteKey', required_if_provider='buckaroo'),
'brq_secretkey': fields.char('SecretKey', required_if_provider='buckaroo'),
}
def _buckaroo_generate_digital_sign(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting buckaroo) or 'out' (buckaroo
contacting openerp).
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'buckaroo'
keys = "add_returndata Brq_amount Brq_culture Brq_currency Brq_invoicenumber Brq_return Brq_returncancel Brq_returnerror Brq_returnreject brq_test Brq_websitekey".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
values = dict(values or {})
if inout == 'out':
if 'BRQ_SIGNATURE' in values:
del values['BRQ_SIGNATURE']
items = sorted((k.upper(), v) for k, v in values.items())
sign = ''.join('%s=%s' % (k, v) for k, v in items)
else:
sign = ''.join('%s=%s' % (k,get_value(k)) for k in keys)
#Add the pre-shared secret key at the end of the signature
sign = sign + acquirer.brq_secretkey
if isinstance(sign, str):
sign = urlparse.parse_qsl(sign)
shasign = sha1(sign).hexdigest()
return shasign
def buckaroo_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
buckaroo_tx_values = dict(tx_values)
buckaroo_tx_values.update({
'Brq_websitekey': acquirer.brq_websitekey,
'Brq_amount': tx_values['amount'],
'Brq_currency': tx_values['currency'] and tx_values['currency'].name or '',
'Brq_invoicenumber': tx_values['reference'],
'brq_test': False if acquirer.environment == 'prod' else True,
'Brq_return': '%s' % urlparse.urljoin(base_url, BuckarooController._return_url),
'Brq_returncancel': '%s' % urlparse.urljoin(base_url, BuckarooController._cancel_url),
'Brq_returnerror': '%s' % urlparse.urljoin(base_url, BuckarooController._exception_url),
'Brq_returnreject': '%s' % urlparse.urljoin(base_url, BuckarooController._reject_url),
'Brq_culture': (partner_values.get('lang') or 'en_US').replace('_', '-'),
})
if buckaroo_tx_values.get('return_url'):
buckaroo_tx_values['add_returndata'] = {'return_url': '%s' % buckaroo_tx_values.pop('return_url')}
else:
buckaroo_tx_values['add_returndata'] = ''
buckaroo_tx_values['Brq_signature'] = self._buckaroo_generate_digital_sign(acquirer, 'in', buckaroo_tx_values)
return partner_values, buckaroo_tx_values
def buckaroo_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_buckaroo_urls(cr, uid, acquirer.environment, context=context)['buckaroo_form_url']
class TxBuckaroo(osv.Model):
_inherit = 'payment.transaction'
# buckaroo status
_buckaroo_valid_tx_status = [190]
_buckaroo_pending_tx_status = [790, 791, 792, 793]
_buckaroo_cancel_tx_status = [890, 891]
_buckaroo_error_tx_status = [490, 491, 492]
_buckaroo_reject_tx_status = [690]
_columns = {
'buckaroo_txnid': fields.char('Transaction ID'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _buckaroo_form_get_tx_from_data(self, cr, uid, data, context=None):
""" Given a data dict coming from buckaroo, verify it and find the related
transaction record. """
reference, pay_id, shasign = data.get('BRQ_INVOICENUMBER'), data.get('BRQ_PAYMENT'), data.get('BRQ_SIGNATURE')
if not reference or not pay_id or not shasign:
error_msg = 'Buckaroo: received data with missing reference (%s) or pay_id (%s) or shashign (%s)' % (reference, pay_id, shasign)
_logger.error(error_msg)
raise ValidationError(error_msg)
tx_ids = self.search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Buckaroo: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
#verify shasign
shasign_check = self.pool['payment.acquirer']._buckaroo_generate_digital_sign(tx.acquirer_id, 'out' ,data)
if shasign_check.upper() != shasign.upper():
error_msg = 'Buckaroo: invalid shasign, received %s, computed %s, for data %s' % (shasign, shasign_check, data)
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx
def _buckaroo_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
if tx.acquirer_reference and data.get('BRQ_TRANSACTIONS') != tx.acquirer_reference:
invalid_parameters.append(('Transaction Id', data.get('BRQ_TRANSACTIONS'), tx.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('BRQ_AMOUNT', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('Amount', data.get('BRQ_AMOUNT'), '%.2f' % tx.amount))
if data.get('BRQ_CURRENCY') != tx.currency_id.name:
invalid_parameters.append(('Currency', data.get('BRQ_CURRENCY'), tx.currency_id.name))
return invalid_parameters
def _buckaroo_form_validate(self, cr, uid, tx, data, context=None):
status_code = int(data.get('BRQ_STATUSCODE','0'))
if status_code in self._buckaroo_valid_tx_status:
tx.write({
'state': 'done',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_pending_tx_status:
tx.write({
'state': 'pending',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_cancel_tx_status:
tx.write({
'state': 'cancel',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
else:
error = 'Buckaroo: feedback error'
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return False
|
aptana/Pydev
|
refs/heads/development
|
bundles/org.python.pydev/pysrc/tests_python/_debugger_case3.py
|
104
|
import time
if __name__ == '__main__':
for i in range(15):
print('here')
time.sleep(.2)
print('TEST SUCEEDED')
|
drawks/ansible
|
refs/heads/devel
|
lib/ansible/modules/database/postgresql/postgresql_idx.py
|
7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Andrey Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_idx
short_description: Create or drop indexes from a PostgreSQL database
description:
- Create or drop indexes from a PostgreSQL database.
- For more information see U(https://www.postgresql.org/docs/current/sql-createindex.html),
U(https://www.postgresql.org/docs/current/sql-dropindex.html).
version_added: '2.8'
options:
idxname:
description:
- Name of the index to create or drop.
type: str
required: true
aliases:
- name
db:
description:
- Name of database to connect to and where the index will be created/dropped.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
schema:
description:
- Name of a database schema where the index will be created.
type: str
state:
description:
- Index state.
- I(state=present) implies the index will be created if it does not exist.
- I(state=absent) implies the index will be dropped if it exists.
type: str
default: present
choices: [ absent, present ]
table:
description:
- Table to create index on it.
- Mutually exclusive with I(state=absent).
type: str
required: true
columns:
description:
- List of index columns that need to be covered by index.
- Mutually exclusive with I(state=absent).
type: list
aliases:
- column
cond:
description:
- Index conditions.
- Mutually exclusive with I(state=absent).
type: str
idxtype:
description:
- Index type (like btree, gist, gin, etc.).
- Mutually exclusive with I(state=absent).
type: str
aliases:
- type
concurrent:
description:
- Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
- Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process.
For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
- If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid.
In this case it should be dropped and created again.
- Mutually exclusive with I(cascade=yes).
type: bool
default: yes
tablespace:
description:
- Set a tablespace for the index.
- Mutually exclusive with I(state=absent).
required: false
type: str
storage_params:
description:
- Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
- Mutually exclusive with I(state=absent).
type: list
cascade:
description:
- Automatically drop objects that depend on the index,
and in turn all objects that depend on those objects U(https://www.postgresql.org/docs/current/sql-dropindex.html).
- It used only with I(state=absent).
- Mutually exclusive with I(concurrent=yes)
type: bool
default: no
notes:
- The index building process can affect database performance.
- To avoid table locks on production databases, use I(concurrent=yes) (default behavior).
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host.
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements:
- psycopg2
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
postgresql_idx:
db: acme
table: products
columns: id,name
name: test_idx
- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
postgresql_idx:
db: acme
table: products
columns:
- id
- name
idxname: test_idx
tablespace: ssd
storage_params:
- fillfactor=90
- name: Create gist index test_gist_idx concurrently on column geo_data of table map
postgresql_idx:
db: somedb
table: map
idxtype: gist
columns: geo_data
idxname: test_gist_idx
# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
- name: Create gin index gin0_idx not concurrently on column comment of table test
postgresql_idx:
idxname: gin0_idx
table: test
columns: comment gin_trgm_ops
concurrent: no
idxtype: gin
- name: Drop btree test_idx concurrently
postgresql_idx:
db: mydb
idxname: test_idx
state: absent
- name: Drop test_idx cascade
postgresql_idx:
db: mydb
idxname: test_idx
state: absent
cascade: yes
concurrent: no
- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
postgresql_idx:
db: mydb
table: test
columns: id,comment
idxname: test_idx
cond: id > 1
'''
RETURN = r'''
name:
description: Index name.
returned: always
type: str
sample: 'foo_idx'
state:
description: Index state.
returned: always
type: str
sample: 'present'
schema:
description: Schema where index exists.
returned: always
type: str
sample: 'public'
tablespace:
description: Tablespace where index exists.
returned: always
type: str
sample: 'ssd'
query:
description: Query that was tried to be executed.
returned: always
type: str
sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
storage_params:
description: Index storage parameters.
returned: always
type: list
sample: [ "fillfactor=90" ]
valid:
description: Index validity.
returned: always
type: bool
sample: true
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError
from ansible.module_utils.postgres import connect_to_db, postgres_common_argument_spec
from ansible.module_utils._text import to_native
VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
# ===========================================
# PostgreSQL module specific support methods.
#
class Index(object):
def __init__(self, module, cursor, schema, name):
self.name = name
if schema:
self.schema = schema
else:
self.schema = 'public'
self.module = module
self.cursor = cursor
self.info = {
'name': self.name,
'state': 'absent',
'schema': '',
'tblname': '',
'tblspace': '',
'valid': True,
'storage_params': [],
}
self.exists = False
self.__exists_in_db()
self.executed_query = ''
def get_info(self):
"""
Getter to refresh and return table info
"""
self.__exists_in_db()
return self.info
def __exists_in_db(self):
"""
Check index and collect info
"""
query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
"pi.indisvalid, c.reloptions "
"FROM pg_catalog.pg_indexes AS i "
"JOIN pg_catalog.pg_class AS c "
"ON i.indexname = c.relname "
"JOIN pg_catalog.pg_index AS pi "
"ON c.oid = pi.indexrelid "
"WHERE i.indexname = '%s'" % self.name)
res = self.__exec_sql(query)
if res:
self.exists = True
self.info = dict(
name=self.name,
state='present',
schema=res[0][0],
tblname=res[0][1],
tblspace=res[0][2] if res[0][2] else '',
valid=res[0][3],
storage_params=res[0][4] if res[0][4] else [],
)
return True
else:
self.exists = False
return False
def create(self, tblname, idxtype, columns, cond, tblspace, storage_params, concurrent=True):
"""
Create PostgreSQL index.
"""
# To change existing index we should write
# 'postgresql_alter_table' standalone module.
if self.exists:
return False
changed = False
if idxtype is None:
idxtype = "BTREE"
query = 'CREATE INDEX'
if concurrent:
query += ' CONCURRENTLY'
query += ' %s' % self.name
if self.schema:
query += ' ON %s.%s ' % (self.schema, tblname)
else:
query += 'public.%s ' % tblname
query += 'USING %s (%s)' % (idxtype, columns)
if storage_params:
query += ' WITH (%s)' % storage_params
if tblspace:
query += ' TABLESPACE %s' % tblspace
if cond:
query += ' WHERE %s' % cond
self.executed_query = query
if self.__exec_sql(query, ddl=True):
return True
return False
def drop(self, schema, cascade=False, concurrent=True):
"""
Drop PostgreSQL index.
"""
changed = False
if not self.exists:
return False
query = 'DROP INDEX'
if concurrent:
query += ' CONCURRENTLY'
if not schema:
query += ' public.%s' % self.name
else:
query += ' %s.%s' % (schema, self.name)
if cascade:
query += ' CASCADE'
self.executed_query = query
if self.__exec_sql(query, ddl=True):
return True
return False
def __exec_sql(self, query, ddl=False):
try:
self.cursor.execute(query)
if not ddl:
res = self.cursor.fetchall()
return res
return True
except SQLParseError as e:
self.module.fail_json(msg=to_native(e))
except Exception as e:
self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
idxname=dict(type='str', required=True, aliases=['name']),
db=dict(type='str', aliases=['login_db']),
state=dict(type='str', default='present', choices=['absent', 'present']),
concurrent=dict(type='bool', default=True),
table=dict(type='str'),
idxtype=dict(type='str', aliases=['type']),
columns=dict(type='list', aliases=['column']),
cond=dict(type='str'),
session_role=dict(type='str'),
tablespace=dict(type='str'),
storage_params=dict(type='list'),
cascade=dict(type='bool', default=False),
schema=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
idxname = module.params["idxname"]
state = module.params["state"]
concurrent = module.params["concurrent"]
table = module.params["table"]
idxtype = module.params["idxtype"]
columns = module.params["columns"]
cond = module.params["cond"]
tablespace = module.params["tablespace"]
storage_params = module.params["storage_params"]
cascade = module.params["cascade"]
schema = module.params["schema"]
if concurrent and cascade:
module.fail_json(msg="Cuncurrent mode and cascade parameters are mutually exclusive")
if state == 'present':
if not table:
module.fail_json(msg="Table must be specified")
if not columns:
module.fail_json(msg="At least one column must be specified")
else:
if table or columns or cond or idxtype or tablespace:
module.fail_json(msg="Index %s is going to be removed, so it does not "
"make sense to pass a table name, columns, conditions, "
"index type, or tablespace" % idxname)
if cascade and state != 'absent':
module.fail_json(msg="cascade parameter used only with state=absent")
db_connection = connect_to_db(module, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Set defaults:
changed = False
# Do job:
index = Index(module, cursor, schema, idxname)
kw = index.get_info()
kw['query'] = ''
#
# check_mode start
if module.check_mode:
if state == 'present' and index.exists:
kw['changed'] = False
module.exit_json(**kw)
elif state == 'present' and not index.exists:
kw['changed'] = True
module.exit_json(**kw)
elif state == 'absent' and not index.exists:
kw['changed'] = False
module.exit_json(**kw)
elif state == 'absent' and index.exists:
kw['changed'] = True
module.exit_json(**kw)
# check_mode end
#
if state == "present":
if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
columns = ','.join(columns)
if storage_params:
storage_params = ','.join(storage_params)
changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent)
if changed:
kw = index.get_info()
kw['state'] = 'present'
kw['query'] = index.executed_query
else:
changed = index.drop(schema, cascade, concurrent)
if changed:
kw['state'] = 'absent'
kw['query'] = index.executed_query
if not kw['valid']:
db_connection.rollback()
module.warn("Index %s is invalid! ROLLBACK" % idxname)
if not concurrent:
db_connection.commit()
kw['changed'] = changed
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()
|
veger/ansible
|
refs/heads/devel
|
lib/ansible/modules/monitoring/zabbix/zabbix_proxy.py
|
7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Alen Komic
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_proxy
short_description: Zabbix proxy creates/deletes/gets/updates
description:
- This module allows you to create, modify, get and delete Zabbix proxy entries.
version_added: "2.5"
author:
- "Alen Komic (@akomic)"
requirements:
- "python >= 2.6"
- "zabbix-api >= 0.5.3"
options:
proxy_name:
description:
- Name of the proxy in Zabbix.
required: true
description:
description:
- Description of the proxy..
required: false
status:
description:
- Type of proxy. (4 - active, 5 - passive)
required: false
choices: ['active', 'passive']
default: "active"
tls_connect:
description:
- Connections to proxy.
required: false
choices: ['no_encryption','PSK','certificate']
default: 'no_encryption'
tls_accept:
description:
- Connections from proxy.
required: false
choices: ['no_encryption','PSK','certificate']
default: 'no_encryption'
tls_issuer:
description:
- Certificate issuer.
required: false
tls_subject:
description:
- Certificate subject.
required: false
tls_psk_identity:
description:
- PSK identity. Required if either I(tls_connect) or I(tls_accept) has PSK enabled.
required: false
tls_psk:
description:
- The preshared key, at least 32 hex digits. Required if either I(tls_connect) or I(tls_accept) has PSK enabled.
required: false
state:
description:
- State of the proxy.
- On C(present), it will create if proxy does not exist or update the proxy if the associated data is different.
- On C(absent) will remove a proxy if it exists.
required: false
choices: ['present', 'absent']
default: "present"
interface:
description:
- Dictionary with params for the interface when proxy is in passive mode
- 'Available values are: dns, ip, main, port, type and useip.'
- Please review the interface documentation for more information on the supported properties
- U(https://www.zabbix.com/documentation/3.2/manual/api/reference/proxy/object#proxy_interface)
required: false
default: {}
extends_documentation_fragment:
- zabbix
'''
EXAMPLES = '''
- name: Create a new proxy or update an existing proxies info
local_action:
module: zabbix_proxy
server_url: http://monitor.example.com
login_user: username
login_password: password
proxy_name: ExampleProxy
description: ExampleProxy
status: active
state: present
interface:
type: 0
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 10050
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
try:
from zabbix_api import ZabbixAPI
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
class Proxy(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
self.existing_data = None
def proxy_exists(self, proxy_name):
result = self._zapi.proxy.get({
'output': 'extend', 'selectInterface': 'extend',
'filter': {'host': proxy_name}})
if len(result) > 0 and 'proxyid' in result[0]:
self.existing_data = result[0]
return result[0]['proxyid']
else:
return result
def add_proxy(self, data):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {}
for item in data:
if data[item]:
parameters[item] = data[item]
proxy_ids_list = self._zapi.proxy.create(parameters)
self._module.exit_json(changed=True,
result="Successfully added proxy %s (%s)" %
(data['host'], data['status']))
if len(proxy_ids_list) >= 1:
return proxy_ids_list['proxyids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create proxy %s: %s" %
(data['host'], e))
def delete_proxy(self, proxy_id, proxy_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.proxy.delete([proxy_id])
self._module.exit_json(changed=True,
result="Successfully deleted" +
" proxy %s" % proxy_name)
except Exception as e:
self._module.fail_json(msg="Failed to delete proxy %s: %s" %
(proxy_name, str(e)))
def compile_interface_params(self, new_interface):
old_interface = {}
if 'interface' in self.existing_data and \
len(self.existing_data['interface']) > 0:
old_interface = self.existing_data['interface']
final_interface = old_interface.copy()
final_interface.update(new_interface)
final_interface = dict((k, str(v)) for k, v in final_interface.items())
if final_interface != old_interface:
return final_interface
else:
return {}
def update_proxy(self, proxy_id, data):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'proxyid': proxy_id}
for item in data:
if data[item] and item in self.existing_data and \
self.existing_data[item] != data[item]:
parameters[item] = data[item]
if 'interface' in parameters:
parameters.pop('interface')
if 'interface' in data and data['status'] == '6':
new_interface = self.compile_interface_params(data['interface'])
if len(new_interface) > 0:
parameters['interface'] = new_interface
if len(parameters) > 1:
self._zapi.proxy.update(parameters)
self._module.exit_json(
changed=True,
result="Successfully updated proxy %s (%s)" %
(data['host'], proxy_id)
)
else:
self._module.exit_json(changed=False)
except Exception as e:
self._module.fail_json(msg="Failed to update proxy %s: %s" %
(data['host'], e))
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
proxy_name=dict(type='str', required=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False,
default=None, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
status=dict(default="active", choices=['active', 'passive']),
state=dict(default="present", choices=['present', 'absent']),
description=dict(type='str', required=False),
tls_connect=dict(default='no_encryption',
choices=['no_encryption', 'PSK', 'certificate']),
tls_accept=dict(default='no_encryption',
choices=['no_encryption', 'PSK', 'certificate']),
tls_issuer=dict(type='str', required=False, default=None),
tls_subject=dict(type='str', required=False, default=None),
tls_psk_identity=dict(type='str', required=False, default=None),
tls_psk=dict(type='str', required=False, default=None),
timeout=dict(type='int', default=10),
interface=dict(type='dict', required=False, default={})
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module" +
" (check docs or install with:" +
" pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
proxy_name = module.params['proxy_name']
description = module.params['description']
status = module.params['status']
tls_connect = module.params['tls_connect']
tls_accept = module.params['tls_accept']
tls_issuer = module.params['tls_issuer']
tls_subject = module.params['tls_subject']
tls_psk_identity = module.params['tls_psk_identity']
tls_psk = module.params['tls_psk']
state = module.params['state']
timeout = module.params['timeout']
interface = module.params['interface']
# convert enabled to 0; disabled to 1
status = 6 if status == "passive" else 5
if tls_connect == 'certificate':
tls_connect = 4
elif tls_connect == 'PSK':
tls_connect = 2
else:
tls_connect = 1
if tls_accept == 'certificate':
tls_accept = 4
elif tls_accept == 'PSK':
tls_accept = 2
else:
tls_accept = 1
zbx = None
# login to zabbix
try:
zbx = ZabbixAPI(server_url, timeout=timeout,
user=http_login_user,
passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
proxy = Proxy(module, zbx)
# check if proxy already exists
proxy_id = proxy.proxy_exists(proxy_name)
if proxy_id:
if state == "absent":
# remove proxy
proxy.delete_proxy(proxy_id, proxy_name)
else:
proxy.update_proxy(proxy_id, {
'host': proxy_name,
'description': description,
'status': str(status),
'tls_connect': str(tls_connect),
'tls_accept': str(tls_accept),
'tls_issuer': tls_issuer,
'tls_subject': tls_subject,
'tls_psk_identity': tls_psk_identity,
'tls_psk': tls_psk,
'interface': interface
})
else:
if state == "absent":
# the proxy is already deleted.
module.exit_json(changed=False)
proxy_id = proxy.add_proxy(data={
'host': proxy_name,
'description': description,
'status': str(status),
'tls_connect': str(tls_connect),
'tls_accept': str(tls_accept),
'tls_issuer': tls_issuer,
'tls_subject': tls_subject,
'tls_psk_identity': tls_psk_identity,
'tls_psk': tls_psk,
'interface': interface
})
if __name__ == '__main__':
main()
|
makiftasova/rpi-auxdisplay
|
refs/heads/master
|
client/datetimedaemon.py
|
1
|
import datetime
import json
import logging
import utils
class DateTimeDaemon(utils.LoopTask):
__DATA_TYPE = 'datetime'
def __init__(self, display, interval=0.25):
super(DateTimeDaemon, self).__init__(interval=interval, name="datetimedaemon-thread")
self.display = display
self.logger = logging.getLogger(__name__)
def loop(self):
dt_now = datetime.datetime.now()
datetime_info = {'date': dt_now.strftime("%Y %m %d"), 'time': dt_now.strftime("%H:%M")}
json_str = json.dumps({'type': self.__DATA_TYPE, 'data': datetime_info})
self.display.send_json(json_str)
self.logger.info("datetime pushed to display")
|
casanovainformationservices/LazyLibrarian
|
refs/heads/master
|
cherrypy/tutorial/tut01_helloworld.py
|
7
|
"""
Tutorial - Hello World
The most basic (working) CherryPy application possible.
"""
# Import CherryPy global namespace
import cherrypy
class HelloWorld:
""" Sample request handler class. """
# Expose the index method through the web. CherryPy will never
# publish methods that don't have the exposed attribute set to True.
@cherrypy.expose
def index(self):
# CherryPy will call this method for the root URI ("/") and send
# its return value to the client. Because this is tutorial
# lesson number 01, we'll just send something really simple.
# How about...
return "Hello world!"
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HelloWorld(), config=tutconf)
|
attakei/readthedocs-oauth
|
refs/heads/master
|
readthedocs/__init__.py
|
12133432
| |
faun/django_test
|
refs/heads/master
|
tests/regressiontests/staticfiles_tests/apps/__init__.py
|
12133432
| |
Pajinek/spacewalk
|
refs/heads/master
|
backend/server/test/unit-test/rhnSQL/test_rhnChannel.py
|
12
|
#!/usr/bin/python
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
#
#
import sys
import time
import unittest
from spacewalk.server import rhnSQL, rhnChannel
import misc_functions
DB_SETTINGS = misc_functions.db_settings("oracle")
class Tests(unittest.TestCase):
def setUp(self):
rhnSQL.initDB(
backend="oracle",
username=DB_SETTINGS["user"],
password=DB_SETTINGS["password"],
database=DB_SETTINGS["database"]
)
rhnSQL.clear_log_id()
def tearDown(self):
# Roll back any unsaved data
rhnSQL.rollback()
def test_new_channel_1(self):
"""Tests new channel creation"""
cf = rhnChannel.ChannelFamily()
cf.load_from_dict(self._new_channel_family_dict())
cf.save()
label = cf.get_label()
vdict = self._new_channel_dict(label=label, channel_family=label)
c = rhnChannel.Channel()
for k, v in vdict.items():
method = getattr(c, "set_" + k)
method(v)
c.save()
channel_id = c.get_id()
c = rhnChannel.Channel()
c.load_by_label(label)
for k, v in vdict.items():
method = getattr(c, "get_" + k)
dbv = method()
self.assertEqual(v, dbv)
rhnSQL.commit()
return c
def test_new_channel_2(self):
"""Tests new channel creation from dictionary"""
cf = rhnChannel.ChannelFamily()
cf.load_from_dict(self._new_channel_family_dict())
cf.save()
label = cf.get_label()
vdict = self._new_channel_dict(label=label, channel_family=label)
c = rhnChannel.Channel()
c.load_from_dict(vdict)
c.save()
channel_id = c.get_id()
c = rhnChannel.Channel()
c.load_by_label(label)
for k, v in vdict.items():
method = getattr(c, "get_" + k)
dbv = method()
self.assertEqual(v, dbv)
rhnSQL.commit()
return c
def test_new_channel_family_1(self):
"""Tests new channel family creation"""
vdict = self._new_channel_family_dict()
label = vdict['label']
c = rhnChannel.ChannelFamily()
for k, v in vdict.items():
method = getattr(c, "set_" + k)
method(v)
c.save()
channel_id = c.get_id()
c = rhnChannel.ChannelFamily()
c.load_by_label(label)
for k, v in vdict.items():
method = getattr(c, "get_" + k)
dbv = method()
self.assertEqual(v, dbv)
rhnSQL.commit()
return c
def test_new_channel_family_2(self):
"""Tests new channel family creation from a dict"""
vdict = self._new_channel_family_dict()
label = vdict['label']
c = rhnChannel.ChannelFamily()
c.load_from_dict(vdict)
c.save()
channel_id = c.get_id()
c = rhnChannel.ChannelFamily()
c.load_by_label(label)
for k, v in vdict.items():
method = getattr(c, "get_" + k)
dbv = method()
self.assertEqual(v, dbv)
rhnSQL.commit()
return c
def test_list_channel_families_1(self):
"""Tests rhnChannel.list_channel_families"""
channel_families = rhnChannel.list_channel_families()
self.failUnless(len(channel_families) > 0)
def test_list_channels_1(self):
"""Tests rhnChannel.list_channels"""
# create some channel
cf = rhnChannel.ChannelFamily()
cf.load_from_dict(self._new_channel_family_dict())
cf.save()
label = cf.get_label()
vdict = self._new_channel_dict(label=label, channel_family=label)
c = rhnChannel.Channel()
for k, v in vdict.items():
method = getattr(c, "set_" + k)
method(v)
c.save()
channels = rhnChannel.list_channels(pattern="rhn-unittest-%")
self.failUnless(len(channels) > 0)
def _new_channel_dict(self, **kwargs):
if not hasattr(self, '_counter'):
self._counter = 0
label = kwargs.get('label')
if label is None:
label = 'rhn-unittest-%.3f-%s' % (time.time(), self._counter)
self._counter = self._counter + 1
release = kwargs.get('release') or 'release-' + label
os = kwargs.get('os') or 'Unittest Distro'
if kwargs.has_key('org_id'):
org_id = kwargs['org_id']
else:
org_id = misc_functions.create_new_org()
vdict = {
'label': label,
'name': kwargs.get('name') or label,
'summary': kwargs.get('summary') or label,
'description': kwargs.get('description') or label,
'basedir': kwargs.get('basedir') or '/',
'channel_arch': kwargs.get('channel_arch') or 'channel-x86_64',
'channel_families': [kwargs.get('channel_family') or label],
'org_id': org_id,
'gpg_key_url': kwargs.get('gpg_key_url'),
'gpg_key_id': kwargs.get('gpg_key_id'),
'gpg_key_fp': kwargs.get('gpg_key_fp'),
'end_of_life': kwargs.get('end_of_life'),
'dists': [{
'release': release,
'os': os,
}],
}
return vdict
def _new_channel_family_dict(self, **kwargs):
if not hasattr(self, '_counter'):
self._counter = 0
label = kwargs.get('label')
if label is None:
label = 'rhn-unittest-%.3f-%s' % (time.time(), self._counter)
self._counter = self._counter + 1
product_url = kwargs.get('product_url') or 'http://rhn.redhat.com'
vdict = {
'label': label,
'name': kwargs.get('name') or label,
'product_url': product_url,
}
return vdict
if __name__ == '__main__':
sys.exit(unittest.main() or 0)
|
lsaffre/lino
|
refs/heads/master
|
lino/modlib/smtpd/signals.py
|
1
|
# -*- coding: UTF-8 -*-
# Copyright 2014-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from django.dispatch import Signal
mail_received = Signal(['msg'])
"""Sent for every incoming mail.
sender:
the Site instance
peer, mailfrom, rcpttos, data:
are those passed to the standard Python
`smtpd.SMTPServer.process_message` method:
peer is the remote host’s address, mailfrom is the envelope
originator, rcpttos are the envelope recipients and data is a
string containing the contents of the e-mail (which should be in
:rfc:`2822` format).
"""
|
trueblue2704/AskMeAnything
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py
|
124
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from gettext import gettext
_ = gettext
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), _("Text nodes have no children")
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
|
photoninger/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/system/net_ping.py
|
9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: net_ping
version_added: "2.4"
author: "Jacob McGill (@jmcgill298)"
short_description: Tests reachability using ping from a network device
description:
- Tests reachability using ping from network device to a remote destination.
- For Windows targets, use the M(win_ping) module instead.
- For targets running Python, use the M(ping) module instead.
options:
count:
description:
- Number of packets to send.
required: false
default: 5
dest:
description:
- The IP Address or hostname (resolvable by switch) of the remote node.
required: true
source:
description:
- The source IP Address.
required: false
default: null
state:
description:
- Determines if the expected result is success or fail.
choices: [ absent, present ]
default: present
vrf:
description:
- The VRF to use for forwarding.
required: false
default: default
notes:
- For Windows targets, use the M(win_ping) module instead.
- For targets running Python, use the M(ping) module instead.
'''
EXAMPLES = r'''
- provider:
host: "{{ ansible_host }}"
username: "{{ username }}"
password: "{{ password }}"
network_os: "{{ network_os }}"
- name: Test reachability to 10.10.10.10 using default vrf
net_ping:
provider: "{{ provider }}"
dest: 10.10.10.10
- name: Test reachability to 10.20.20.20 using prod vrf
net_ping:
provider: "{{ provider }}"
dest: 10.20.20.20
vrf: prod
- name: Test unreachability to 10.30.30.30 using default vrf
net_ping:
provider: "{{ provider }}"
dest: 10.30.30.30
state: absent
- name: Test reachability to 10.40.40.40 using prod vrf and setting count and source
net_ping:
provider: "{{ provider }}"
dest: 10.40.40.40
source: loopback0
vrf: prod
count: 20
'''
RETURN = r'''
commands:
description: Show the command sent.
returned: always
type: list
sample: ["ping vrf prod 10.40.40.40 count 20 source loopback0"]
packet_loss:
description: Percentage of packets lost.
returned: always
type: str
sample: "0%"
packets_rx:
description: Packets successfully received.
returned: always
type: int
sample: 20
packets_tx:
description: Packets successfully transmitted.
returned: always
type: int
sample: 20
rtt:
description: Show RTT stats.
returned: always
type: dict
sample: {"avg": 2, "max": 8, "min": 1}
'''
|
xyzz/vcmi-build
|
refs/heads/master
|
project/jni/python/src/Demo/tkinter/guido/svkill.py
|
47
|
#! /usr/bin/env python
# Tkinter interface to SYSV `ps' and `kill' commands.
from Tkinter import *
if TkVersion < 4.0:
raise ImportError, "This version of svkill requires Tk 4.0 or later"
from string import splitfields
from string import split
import commands
import os
user = os.environ['LOGNAME']
class BarButton(Menubutton):
def __init__(self, master=None, **cnf):
apply(Menubutton.__init__, (self, master), cnf)
self.pack(side=LEFT)
self.menu = Menu(self, name='menu')
self['menu'] = self.menu
class Kill(Frame):
# List of (name, option, pid_column)
view_list = [
('Default', ''),
('Every (-e)', '-e'),
('Non process group leaders (-d)', '-d'),
('Non leaders with tty (-a)', '-a'),
('For this user (-u %s)' % user, '-u %s' % user),
]
format_list = [
('Default', '', 0),
('Long (-l)', '-l', 3),
('Full (-f)', '-f', 1),
('Full Long (-f -l)', '-l -f', 3),
('Session and group ID (-j)', '-j', 0),
('Scheduler properties (-c)', '-c', 0),
]
def kill(self, selected):
c = self.format_list[self.format.get()][2]
pid = split(selected)[c]
os.system('kill -9 ' + pid)
self.do_update()
def do_update(self):
format = self.format_list[self.format.get()][1]
view = self.view_list[self.view.get()][1]
s = commands.getoutput('ps %s %s' % (view, format))
list = splitfields(s, '\n')
self.header.set(list[0] + ' ')
del list[0]
self.frame.list.delete(0, AtEnd())
for line in list:
self.frame.list.insert(0, line)
def do_motion(self, e):
e.widget.select_clear('0', 'end')
e.widget.select_set(e.widget.nearest(e.y))
def do_leave(self, e):
e.widget.select_clear('0', 'end')
def do_1(self, e):
self.kill(e.widget.get(e.widget.nearest(e.y)))
def __init__(self, master=None, **cnf):
apply(Frame.__init__, (self, master), cnf)
self.pack(expand=1, fill=BOTH)
self.bar = Frame(self, name='bar', relief=RAISED,
borderwidth=2)
self.bar.pack(fill=X)
self.bar.file = BarButton(self.bar, text='File')
self.bar.file.menu.add_command(
label='Quit', command=self.quit)
self.bar.view = BarButton(self.bar, text='View')
self.bar.format = BarButton(self.bar, text='Format')
self.view = IntVar(self)
self.view.set(0)
self.format = IntVar(self)
self.format.set(0)
for num in range(len(self.view_list)):
label, option = self.view_list[num]
self.bar.view.menu.add_radiobutton(
label=label,
command=self.do_update,
variable=self.view,
value=num)
for num in range(len(self.format_list)):
label, option, col = self.format_list[num]
self.bar.format.menu.add_radiobutton(
label=label,
command=self.do_update,
variable=self.format,
value=num)
self.bar.tk_menuBar(self.bar.file,
self.bar.view,
self.bar.format)
self.frame = Frame(self, relief=RAISED, borderwidth=2)
self.frame.pack(expand=1, fill=BOTH)
self.header = StringVar(self)
self.frame.label = Label(
self.frame, relief=FLAT, anchor=NW, borderwidth=0,
font='*-Courier-Bold-R-Normal-*-120-*',
textvariable=self.header)
self.frame.label.pack(fill=Y, anchor=W)
self.frame.vscroll = Scrollbar(self.frame, orient=VERTICAL)
self.frame.list = Listbox(
self.frame,
relief=SUNKEN,
font='*-Courier-Medium-R-Normal-*-120-*',
width=40, height=10,
selectbackground='#eed5b7',
selectborderwidth=0,
selectmode=BROWSE,
yscroll=self.frame.vscroll.set)
self.frame.vscroll['command'] = self.frame.list.yview
self.frame.vscroll.pack(side=RIGHT, fill=Y)
self.frame.list.pack(expand=1, fill=BOTH)
self.update = Button(self, text='Update',
command=self.do_update)
self.update.pack(fill=X)
self.frame.list.bind('<Motion>', self.do_motion)
self.frame.list.bind('<Leave>', self.do_leave)
self.frame.list.bind('<1>', self.do_1)
self.do_update()
if __name__ == '__main__':
kill = Kill(None, borderwidth=5)
kill.winfo_toplevel().title('Tkinter Process Killer (SYSV)')
kill.winfo_toplevel().minsize(1, 1)
kill.mainloop()
|
lmprice/ansible
|
refs/heads/devel
|
test/units/cli/test_console.py
|
194
|
# (c) 2016, Thilo Uttendorfer <tlo@sengaya.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.cli.console import ConsoleCLI
class TestConsoleCLI(unittest.TestCase):
def test_parse(self):
cli = ConsoleCLI([])
cli.parse()
self.assertTrue(cli.parser is not None)
def test_module_args(self):
cli = ConsoleCLI([])
cli.parse()
res = cli.module_args('copy')
self.assertTrue(cli.parser is not None)
self.assertIn('src', res)
self.assertIn('backup', res)
self.assertIsInstance(res, list)
@patch('ansible.utils.display.Display.display')
def test_helpdefault(self, mock_display):
cli = ConsoleCLI([])
cli.parse()
cli.modules = set(['copy'])
cli.helpdefault('copy')
self.assertTrue(cli.parser is not None)
self.assertTrue(len(mock_display.call_args_list) > 0,
"display.display should have been called but was not")
|
elba7r/frameworking
|
refs/heads/master
|
frappe/handler.py
|
2
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import frappe.utils
import frappe.async
import frappe.sessions
import frappe.utils.file_manager
import frappe.desk.form.run_method
from frappe.utils.response import build_response
def handle():
"""handle request"""
cmd = frappe.local.form_dict.cmd
if cmd!='login':
execute_cmd(cmd)
return build_response("json")
def execute_cmd(cmd, from_async=False):
"""execute a request as python module"""
for hook in frappe.get_hooks("override_whitelisted_methods", {}).get(cmd, []):
# override using the first hook
cmd = hook
break
try:
method = get_attr(cmd)
except:
frappe.throw('Invalid method', frappe.NotFound)
if from_async:
method = method.queue
is_whitelisted(method)
ret = frappe.call(method, **frappe.form_dict)
# returns with a message
if ret:
frappe.response['message'] = ret
def is_whitelisted(method):
# check if whitelisted
if frappe.session['user'] == 'Guest':
if (method not in frappe.guest_methods):
frappe.msgprint(_("Not permitted"))
raise frappe.PermissionError('Not Allowed, {0}'.format(method))
if method not in frappe.xss_safe_methods:
# strictly sanitize form_dict
# escapes html characters like <> except for predefined tags like a, b, ul etc.
for key, value in frappe.form_dict.items():
if isinstance(value, basestring):
frappe.form_dict[key] = frappe.utils.sanitize_html(value)
else:
if not method in frappe.whitelisted:
frappe.msgprint(_("Not permitted"))
raise frappe.PermissionError('Not Allowed, {0}'.format(method))
@frappe.whitelist(allow_guest=True)
def version():
return frappe.__version__
@frappe.whitelist()
def runserverobj(method, docs=None, dt=None, dn=None, arg=None, args=None):
frappe.desk.form.run_method.runserverobj(method, docs=docs, dt=dt, dn=dn, arg=arg, args=args)
@frappe.whitelist(allow_guest=True)
def logout():
frappe.local.login_manager.logout()
frappe.db.commit()
@frappe.whitelist(allow_guest=True)
def web_logout():
frappe.local.login_manager.logout()
frappe.db.commit()
frappe.respond_as_web_page("Logged Out", """<p><a href="/index" class="text-muted">Back to Home</a></p>""")
@frappe.whitelist(allow_guest=True)
def run_custom_method(doctype, name, custom_method):
"""cmd=run_custom_method&doctype={doctype}&name={name}&custom_method={custom_method}"""
doc = frappe.get_doc(doctype, name)
if getattr(doc, custom_method, frappe._dict()).is_whitelisted:
frappe.call(getattr(doc, custom_method), **frappe.local.form_dict)
else:
frappe.throw(_("Not permitted"), frappe.PermissionError)
@frappe.whitelist()
def uploadfile():
try:
if frappe.form_dict.get('from_form'):
try:
ret = frappe.utils.file_manager.upload()
except frappe.DuplicateEntryError:
# ignore pass
ret = None
frappe.db.rollback()
else:
if frappe.form_dict.get('method'):
method = frappe.get_attr(frappe.form_dict.method)
is_whitelisted(method)
ret = method()
except Exception:
frappe.errprint(frappe.utils.get_traceback())
ret = None
return ret
def get_attr(cmd):
"""get method object from cmd"""
if '.' in cmd:
method = frappe.get_attr(cmd)
else:
method = globals()[cmd]
frappe.log("method:" + cmd)
return method
@frappe.whitelist()
def ping():
return "pong"
|
marcoantoniooliveira/labweb
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/pyflakes/test/test_other.py
|
6
|
"""
Tests for various Pyflakes behavior.
"""
from sys import version_info
from pyflakes import messages as m
from pyflakes.test.harness import TestCase, skip, skipIf
class Test(TestCase):
def test_duplicateArgs(self):
self.flakes('def fu(bar, bar): pass', m.DuplicateArgument)
def test_localReferencedBeforeAssignment(self):
self.flakes('''
a = 1
def f():
a; a=1
f()
''', m.UndefinedLocal, m.UnusedVariable)
def test_redefinedInListComp(self):
"""
Test that shadowing a variable in a list comprehension raises
a warning.
"""
self.flakes('''
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
class A:
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
def f():
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
[1 for a, b in [(1, 2)]]
[1 for a, b in [(1, 2)]]
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
[1 for a, b in [(1, 2)]]
''')
def test_redefinedInGenerator(self):
"""
Test that reusing a variable in a generator does not raise
a warning.
"""
self.flakes('''
a = 1
(1 for a, b in [(1, 2)])
''')
self.flakes('''
class A:
a = 1
list(1 for a, b in [(1, 2)])
''')
self.flakes('''
def f():
a = 1
(1 for a, b in [(1, 2)])
''', m.UnusedVariable)
self.flakes('''
(1 for a, b in [(1, 2)])
(1 for a, b in [(1, 2)])
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
(1 for a, b in [(1, 2)])
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInSetComprehension(self):
"""
Test that reusing a variable in a set comprehension does not raise
a warning.
"""
self.flakes('''
a = 1
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
class A:
a = 1
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
def f():
a = 1
{1 for a, b in [(1, 2)]}
''', m.UnusedVariable)
self.flakes('''
{1 for a, b in [(1, 2)]}
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
{1 for a, b in [(1, 2)]}
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInDictComprehension(self):
"""
Test that reusing a variable in a dict comprehension does not raise
a warning.
"""
self.flakes('''
a = 1
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
class A:
a = 1
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
def f():
a = 1
{1: 42 for a, b in [(1, 2)]}
''', m.UnusedVariable)
self.flakes('''
{1: 42 for a, b in [(1, 2)]}
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
{1: 42 for a, b in [(1, 2)]}
''')
def test_redefinedFunction(self):
"""
Test that shadowing a function definition with another one raises a
warning.
"""
self.flakes('''
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedClassFunction(self):
"""
Test that shadowing a function definition in a class suite with another
one raises a warning.
"""
self.flakes('''
class A:
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedIfElseFunction(self):
"""
Test that shadowing a function definition twice in an if
and else block does not raise a warning.
"""
self.flakes('''
if True:
def a(): pass
else:
def a(): pass
''')
def test_redefinedIfFunction(self):
"""
Test that shadowing a function definition within an if block
raises a warning.
"""
self.flakes('''
if True:
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedTryExceptFunction(self):
"""
Test that shadowing a function definition twice in try
and except block does not raise a warning.
"""
self.flakes('''
try:
def a(): pass
except:
def a(): pass
''')
def test_redefinedTryFunction(self):
"""
Test that shadowing a function definition within a try block
raises a warning.
"""
self.flakes('''
try:
def a(): pass
def a(): pass
except:
pass
''', m.RedefinedWhileUnused)
def test_redefinedIfElseInListComp(self):
"""
Test that shadowing a variable in a list comprehension in
an if and else block does not raise a warning.
"""
self.flakes('''
if False:
a = 1
else:
[a for a in '12']
''')
def test_redefinedElseInListComp(self):
"""
Test that shadowing a variable in a list comprehension in
an else (or if) block raises a warning.
"""
self.flakes('''
if False:
pass
else:
a = 1
[a for a in '12']
''', m.RedefinedInListComp)
def test_functionDecorator(self):
"""
Test that shadowing a function definition with a decorated version of
that function does not raise a warning.
"""
self.flakes('''
from somewhere import somedecorator
def a(): pass
a = somedecorator(a)
''')
def test_classFunctionDecorator(self):
"""
Test that shadowing a function definition in a class suite with a
decorated version of that function does not raise a warning.
"""
self.flakes('''
class A:
def a(): pass
a = classmethod(a)
''')
@skipIf(version_info < (2, 6), "Python >= 2.6 only")
def test_modernProperty(self):
self.flakes("""
class A:
@property
def t(self):
pass
@t.setter
def t(self, value):
pass
@t.deleter
def t(self):
pass
""")
def test_unaryPlus(self):
"""Don't die on unary +."""
self.flakes('+1')
def test_undefinedBaseClass(self):
"""
If a name in the base list of a class definition is undefined, a
warning is emitted.
"""
self.flakes('''
class foo(foo):
pass
''', m.UndefinedName)
def test_classNameUndefinedInClassBody(self):
"""
If a class name is used in the body of that class's definition and
the name is not already defined, a warning is emitted.
"""
self.flakes('''
class foo:
foo
''', m.UndefinedName)
def test_classNameDefinedPreviously(self):
"""
If a class name is used in the body of that class's definition and
the name was previously defined in some other way, no warning is
emitted.
"""
self.flakes('''
foo = None
class foo:
foo
''')
def test_classRedefinition(self):
"""
If a class is defined twice in the same module, a warning is emitted.
"""
self.flakes('''
class Foo:
pass
class Foo:
pass
''', m.RedefinedWhileUnused)
def test_functionRedefinedAsClass(self):
"""
If a function is redefined as a class, a warning is emitted.
"""
self.flakes('''
def Foo():
pass
class Foo:
pass
''', m.RedefinedWhileUnused)
def test_classRedefinedAsFunction(self):
"""
If a class is redefined as a function, a warning is emitted.
"""
self.flakes('''
class Foo:
pass
def Foo():
pass
''', m.RedefinedWhileUnused)
@skip("todo: Too hard to make this warn but other cases stay silent")
def test_doubleAssignment(self):
"""
If a variable is re-assigned to without being used, no warning is
emitted.
"""
self.flakes('''
x = 10
x = 20
''', m.RedefinedWhileUnused)
def test_doubleAssignmentConditionally(self):
"""
If a variable is re-assigned within a conditional, no warning is
emitted.
"""
self.flakes('''
x = 10
if True:
x = 20
''')
def test_doubleAssignmentWithUse(self):
"""
If a variable is re-assigned to after being used, no warning is
emitted.
"""
self.flakes('''
x = 10
y = x * 2
x = 20
''')
def test_comparison(self):
"""
If a defined name is used on either side of any of the six comparison
operators, no warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x < y
x <= y
x == y
x != y
x >= y
x > y
''')
def test_identity(self):
"""
If a defined name is used on either side of an identity test, no
warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x is y
x is not y
''')
def test_containment(self):
"""
If a defined name is used on either side of a containment test, no
warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x in y
x not in y
''')
def test_loopControl(self):
"""
break and continue statements are supported.
"""
self.flakes('''
for x in [1, 2]:
break
''')
self.flakes('''
for x in [1, 2]:
continue
''')
def test_ellipsis(self):
"""
Ellipsis in a slice is supported.
"""
self.flakes('''
[1, 2][...]
''')
def test_extendedSlice(self):
"""
Extended slices are supported.
"""
self.flakes('''
x = 3
[1, 2][x,:]
''')
def test_varAugmentedAssignment(self):
"""
Augmented assignment of a variable is supported.
We don't care about var refs.
"""
self.flakes('''
foo = 0
foo += 1
''')
def test_attrAugmentedAssignment(self):
"""
Augmented assignment of attributes is supported.
We don't care about attr refs.
"""
self.flakes('''
foo = None
foo.bar += foo.baz
''')
class TestUnusedAssignment(TestCase):
"""
Tests for warning about unused assignments.
"""
def test_unusedVariable(self):
"""
Warn when a variable in a function is assigned a value that's never
used.
"""
self.flakes('''
def a():
b = 1
''', m.UnusedVariable)
def test_unusedVariableAsLocals(self):
"""
Using locals() it is perfectly valid to have unused variables
"""
self.flakes('''
def a():
b = 1
return locals()
''')
def test_unusedVariableNoLocals(self):
"""
Using locals() in wrong scope should not matter
"""
self.flakes('''
def a():
locals()
def a():
b = 1
return
''', m.UnusedVariable)
def test_assignToGlobal(self):
"""
Assigning to a global and then not using that global is perfectly
acceptable. Do not mistake it for an unused local variable.
"""
self.flakes('''
b = 0
def a():
global b
b = 1
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_assignToNonlocal(self):
"""
Assigning to a nonlocal and then not using that binding is perfectly
acceptable. Do not mistake it for an unused local variable.
"""
self.flakes('''
b = b'0'
def a():
nonlocal b
b = b'1'
''')
def test_assignToMember(self):
"""
Assigning to a member of another object and then not using that member
variable is perfectly acceptable. Do not mistake it for an unused
local variable.
"""
# XXX: Adding this test didn't generate a failure. Maybe not
# necessary?
self.flakes('''
class b:
pass
def a():
b.foo = 1
''')
def test_assignInForLoop(self):
"""
Don't warn when a variable in a for loop is assigned to but not used.
"""
self.flakes('''
def f():
for i in range(10):
pass
''')
def test_assignInListComprehension(self):
"""
Don't warn when a variable in a list comprehension is
assigned to but not used.
"""
self.flakes('''
def f():
[None for i in range(10)]
''')
def test_generatorExpression(self):
"""
Don't warn when a variable in a generator expression is
assigned to but not used.
"""
self.flakes('''
def f():
(None for i in range(10))
''')
def test_assignmentInsideLoop(self):
"""
Don't warn when a variable assignment occurs lexically after its use.
"""
self.flakes('''
def f():
x = None
for i in range(10):
if i > 2:
return x
x = i * 2
''')
def test_tupleUnpacking(self):
"""
Don't warn when a variable included in tuple unpacking is unused. It's
very common for variables in a tuple unpacking assignment to be unused
in good Python code, so warning will only create false positives.
"""
self.flakes('''
def f(tup):
(x, y) = tup
''')
self.flakes('''
def f():
(x, y) = 1, 2
''', m.UnusedVariable, m.UnusedVariable)
self.flakes('''
def f():
(x, y) = coords = 1, 2
if x > 1:
print(coords)
''')
self.flakes('''
def f():
(x, y) = coords = 1, 2
''', m.UnusedVariable)
self.flakes('''
def f():
coords = (x, y) = 1, 2
''', m.UnusedVariable)
def test_listUnpacking(self):
"""
Don't warn when a variable included in list unpacking is unused.
"""
self.flakes('''
def f(tup):
[x, y] = tup
''')
self.flakes('''
def f():
[x, y] = [1, 2]
''', m.UnusedVariable, m.UnusedVariable)
def test_closedOver(self):
"""
Don't warn when the assignment is used in an inner function.
"""
self.flakes('''
def barMaker():
foo = 5
def bar():
return foo
return bar
''')
def test_doubleClosedOver(self):
"""
Don't warn when the assignment is used in an inner function, even if
that inner function itself is in an inner function.
"""
self.flakes('''
def barMaker():
foo = 5
def bar():
def baz():
return foo
return bar
''')
def test_tracebackhideSpecialVariable(self):
"""
Do not warn about unused local variable __tracebackhide__, which is
a special variable for py.test.
"""
self.flakes("""
def helper():
__tracebackhide__ = True
""")
def test_ifexp(self):
"""
Test C{foo if bar else baz} statements.
"""
self.flakes("a = 'moo' if True else 'oink'")
self.flakes("a = foo if True else 'oink'", m.UndefinedName)
self.flakes("a = 'moo' if True else bar", m.UndefinedName)
def test_withStatementNoNames(self):
"""
No warnings are emitted for using inside or after a nameless C{with}
statement a name defined beforehand.
"""
self.flakes('''
from __future__ import with_statement
bar = None
with open("foo"):
bar
bar
''')
def test_withStatementSingleName(self):
"""
No warnings are emitted for using a name defined by a C{with} statement
within the suite or afterwards.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as bar:
bar
bar
''')
def test_withStatementAttributeName(self):
"""
No warnings are emitted for using an attribute as the target of a
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import foo
with open('foo') as foo.bar:
pass
''')
def test_withStatementSubscript(self):
"""
No warnings are emitted for using a subscript as the target of a
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import foo
with open('foo') as foo[0]:
pass
''')
def test_withStatementSubscriptUndefined(self):
"""
An undefined name warning is emitted if the subscript used as the
target of a C{with} statement is not defined.
"""
self.flakes('''
from __future__ import with_statement
import foo
with open('foo') as foo[bar]:
pass
''', m.UndefinedName)
def test_withStatementTupleNames(self):
"""
No warnings are emitted for using any of the tuple of names defined by
a C{with} statement within the suite or afterwards.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as (bar, baz):
bar, baz
bar, baz
''')
def test_withStatementListNames(self):
"""
No warnings are emitted for using any of the list of names defined by a
C{with} statement within the suite or afterwards.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as [bar, baz]:
bar, baz
bar, baz
''')
def test_withStatementComplicatedTarget(self):
"""
If the target of a C{with} statement uses any or all of the valid forms
for that part of the grammar (See
U{http://docs.python.org/reference/compound_stmts.html#the-with-statement}),
the names involved are checked both for definedness and any bindings
created are respected in the suite of the statement and afterwards.
"""
self.flakes('''
from __future__ import with_statement
c = d = e = g = h = i = None
with open('foo') as [(a, b), c[d], e.f, g[h:i]]:
a, b, c, d, e, g, h, i
a, b, c, d, e, g, h, i
''')
def test_withStatementSingleNameUndefined(self):
"""
An undefined name warning is emitted if the name first defined by a
C{with} statement is used before the C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
bar
with open('foo') as bar:
pass
''', m.UndefinedName)
def test_withStatementTupleNamesUndefined(self):
"""
An undefined name warning is emitted if a name first defined by a the
tuple-unpacking form of the C{with} statement is used before the
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
baz
with open('foo') as (bar, baz):
pass
''', m.UndefinedName)
def test_withStatementSingleNameRedefined(self):
"""
A redefined name warning is emitted if a name bound by an import is
rebound by the name defined by a C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import bar
with open('foo') as bar:
pass
''', m.RedefinedWhileUnused)
def test_withStatementTupleNamesRedefined(self):
"""
A redefined name warning is emitted if a name bound by an import is
rebound by one of the names defined by the tuple-unpacking form of a
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import bar
with open('foo') as (bar, baz):
pass
''', m.RedefinedWhileUnused)
def test_withStatementUndefinedInside(self):
"""
An undefined name warning is emitted if a name is used inside the
body of a C{with} statement without first being bound.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as bar:
baz
''', m.UndefinedName)
def test_withStatementNameDefinedInBody(self):
"""
A name defined in the body of a C{with} statement can be used after
the body ends without warning.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as bar:
baz = 10
baz
''')
def test_withStatementUndefinedInExpression(self):
"""
An undefined name warning is emitted if a name in the I{test}
expression of a C{with} statement is undefined.
"""
self.flakes('''
from __future__ import with_statement
with bar as baz:
pass
''', m.UndefinedName)
self.flakes('''
from __future__ import with_statement
with bar as bar:
pass
''', m.UndefinedName)
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_dictComprehension(self):
"""
Dict comprehensions are properly handled.
"""
self.flakes('''
a = {1: x for x in range(10)}
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_setComprehensionAndLiteral(self):
"""
Set comprehensions are properly handled.
"""
self.flakes('''
a = {1, 2, 3}
b = {x for x in range(10)}
''')
def test_exceptionUsedInExcept(self):
as_exc = ', ' if version_info < (2, 6) else ' as '
self.flakes('''
try: pass
except Exception%se: e
''' % as_exc)
self.flakes('''
def download_review():
try: pass
except Exception%se: e
''' % as_exc)
def test_exceptWithoutNameInFunction(self):
"""
Don't issue false warning when an unnamed exception is used.
Previously, there would be a false warning, but only when the
try..except was in a function
"""
self.flakes('''
import tokenize
def foo():
try: pass
except tokenize.TokenError: pass
''')
def test_exceptWithoutNameInFunctionTuple(self):
"""
Don't issue false warning when an unnamed exception is used.
This example catches a tuple of exception types.
"""
self.flakes('''
import tokenize
def foo():
try: pass
except (tokenize.TokenError, IndentationError): pass
''')
def test_augmentedAssignmentImportedFunctionCall(self):
"""
Consider a function that is called on the right part of an
augassign operation to be used.
"""
self.flakes('''
from foo import bar
baz = 0
baz += bar()
''')
@skipIf(version_info < (3, 3), 'new in Python 3.3')
def test_yieldFromUndefined(self):
"""
Test C{yield from} statement
"""
self.flakes('''
def bar():
yield from foo()
''', m.UndefinedName)
|
davgibbs/django
|
refs/heads/master
|
tests/migrations/test_operations.py
|
105
|
from __future__ import unicode_literals
import unittest
from django.db import connection, migrations, models, transaction
from django.db.migrations.migration import Migration
from django.db.migrations.state import ProjectState
from django.db.models.fields import NOT_PROVIDED
from django.db.transaction import atomic
from django.db.utils import IntegrityError
from django.test import override_settings, skipUnlessDBFeature
from django.utils import six
from .models import FoodManager, FoodQuerySet
from .test_base import MigrationTestBase
try:
import sqlparse
except ImportError:
sqlparse = None
class OperationTestBase(MigrationTestBase):
"""
Common functions to help test operations.
"""
def apply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.apply(project_state, editor)
def unapply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.unapply(project_state, editor)
def make_test_state(self, app_label, operation, **kwargs):
"""
Makes a test state using set_up_test_model and returns the
original state and the state after the migration is applied.
"""
project_state = self.set_up_test_model(app_label, **kwargs)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
return project_state, new_state
def set_up_test_model(self, app_label, second_model=False, third_model=False,
related_model=False, mti_model=False, proxy_model=False, manager_model=False,
unique_together=False, options=False, db_table=None, index_together=False):
"""
Creates a test model state and database table.
"""
# Delete the tables if they already exist
table_names = [
# Start with ManyToMany tables
'_pony_stables', '_pony_vans',
# Then standard model tables
'_pony', '_stable', '_van',
]
tables = [(app_label + table_name) for table_name in table_names]
with connection.cursor() as cursor:
table_names = connection.introspection.table_names(cursor)
connection.disable_constraint_checking()
sql_delete_table = connection.schema_editor().sql_delete_table
with transaction.atomic():
for table in tables:
if table in table_names:
cursor.execute(sql_delete_table % {
"table": connection.ops.quote_name(table),
})
connection.enable_constraint_checking()
# Make the "current" state
model_options = {
"swappable": "TEST_SWAP_MODEL",
"index_together": [["weight", "pink"]] if index_together else [],
"unique_together": [["pink", "weight"]] if unique_together else [],
}
if options:
model_options["permissions"] = [("can_groom", "Can groom")]
if db_table:
model_options["db_table"] = db_table
operations = [migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options=model_options,
)]
if second_model:
operations.append(migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
]
))
if third_model:
operations.append(migrations.CreateModel(
"Van",
[
("id", models.AutoField(primary_key=True)),
]
))
if related_model:
operations.append(migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
("friend", models.ForeignKey("self", models.CASCADE))
],
))
if mti_model:
operations.append(migrations.CreateModel(
"ShetlandPony",
fields=[
('pony_ptr', models.OneToOneField(
'Pony',
models.CASCADE,
auto_created=True,
primary_key=True,
to_field='id',
serialize=False,
)),
("cuteness", models.IntegerField(default=1)),
],
bases=['%s.Pony' % app_label],
))
if proxy_model:
operations.append(migrations.CreateModel(
"ProxyPony",
fields=[],
options={"proxy": True},
bases=['%s.Pony' % app_label],
))
if manager_model:
operations.append(migrations.CreateModel(
"Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
]
))
return self.apply_operations(app_label, ProjectState(), operations)
class OperationTests(OperationTestBase):
"""
Tests running the operations and making sure they do what they say they do.
Each test looks at their state changing, and then their database operation -
both forwards and backwards.
"""
def test_create_model(self):
"""
Tests the CreateModel operation.
Most other tests use this operation as part of setup, so check failures here first.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
self.assertEqual(operation.describe(), "Create model Pony")
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(new_state.models["test_crmo", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crmo", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2].keys()), ["fields", "name"])
# And default manager not in set
operation = migrations.CreateModel("Foo", fields=[], managers=[("objects", models.Manager())])
definition = operation.deconstruct()
self.assertNotIn('managers', definition[2])
def test_create_model_with_unique_after(self):
"""
Tests the CreateModel operation directly followed by an
AlterUniqueTogether (bug #22844 - sqlite remake issues)
"""
operation1 = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
operation2 = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("number", models.IntegerField(default=1)),
("pony", models.ForeignKey("test_crmoua.Pony", models.CASCADE)),
],
)
operation3 = migrations.AlterUniqueTogether(
"Rider",
[
("number", "pony"),
],
)
# Test the database alteration
project_state = ProjectState()
self.assertTableNotExists("test_crmoua_pony")
self.assertTableNotExists("test_crmoua_rider")
with connection.schema_editor() as editor:
new_state = project_state.clone()
operation1.state_forwards("test_crmoua", new_state)
operation1.database_forwards("test_crmoua", editor, project_state, new_state)
project_state, new_state = new_state, new_state.clone()
operation2.state_forwards("test_crmoua", new_state)
operation2.database_forwards("test_crmoua", editor, project_state, new_state)
project_state, new_state = new_state, new_state.clone()
operation3.state_forwards("test_crmoua", new_state)
operation3.database_forwards("test_crmoua", editor, project_state, new_state)
self.assertTableExists("test_crmoua_pony")
self.assertTableExists("test_crmoua_rider")
def test_create_model_m2m(self):
"""
Test the creation of a model with a ManyToMany field and the
auto-created "through" model.
"""
project_state = self.set_up_test_model("test_crmomm")
operation = migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("Pony", related_name="stables"))
]
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_crmomm", new_state)
# Test the database alteration
self.assertTableNotExists("test_crmomm_stable_ponies")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmomm", editor, project_state, new_state)
self.assertTableExists("test_crmomm_stable")
self.assertTableExists("test_crmomm_stable_ponies")
self.assertColumnNotExists("test_crmomm_stable", "ponies")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_crmomm", "Pony")
Stable = new_state.apps.get_model("test_crmomm", "Stable")
stable = Stable.objects.create()
p1 = Pony.objects.create(pink=False, weight=4.55)
p2 = Pony.objects.create(pink=True, weight=5.43)
stable.ponies.add(p1, p2)
self.assertEqual(stable.ponies.count(), 2)
stable.ponies.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmomm", editor, new_state, project_state)
self.assertTableNotExists("test_crmomm_stable")
self.assertTableNotExists("test_crmomm_stable_ponies")
def test_create_model_inheritance(self):
"""
Tests the CreateModel operation on a multi-table inheritance setup.
"""
project_state = self.set_up_test_model("test_crmoih")
# Test the state alteration
operation = migrations.CreateModel(
"ShetlandPony",
[
('pony_ptr', models.OneToOneField(
'test_crmoih.Pony',
models.CASCADE,
auto_created=True,
primary_key=True,
to_field='id',
serialize=False,
)),
("cuteness", models.IntegerField(default=1)),
],
)
new_state = project_state.clone()
operation.state_forwards("test_crmoih", new_state)
self.assertIn(("test_crmoih", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crmoih_shetlandpony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmoih", editor, project_state, new_state)
self.assertTableExists("test_crmoih_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmoih", editor, new_state, project_state)
self.assertTableNotExists("test_crmoih_shetlandpony")
def test_create_proxy_model(self):
"""
Tests that CreateModel ignores proxy models.
"""
project_state = self.set_up_test_model("test_crprmo")
# Test the state alteration
operation = migrations.CreateModel(
"ProxyPony",
[],
options={"proxy": True},
bases=("test_crprmo.Pony", ),
)
self.assertEqual(operation.describe(), "Create proxy model ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_crprmo", new_state)
self.assertIn(("test_crprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crprmo", editor, project_state, new_state)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crprmo", editor, new_state, project_state)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2].keys()), ["bases", "fields", "name", "options"])
def test_create_unmanaged_model(self):
"""
Tests that CreateModel ignores unmanaged models.
"""
project_state = self.set_up_test_model("test_crummo")
# Test the state alteration
operation = migrations.CreateModel(
"UnmanagedPony",
[],
options={"proxy": True},
bases=("test_crummo.Pony", ),
)
self.assertEqual(operation.describe(), "Create proxy model UnmanagedPony")
new_state = project_state.clone()
operation.state_forwards("test_crummo", new_state)
self.assertIn(("test_crummo", "unmanagedpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crummo", editor, project_state, new_state)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crummo", editor, new_state, project_state)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
def test_create_model_managers(self):
"""
Tests that the managers on a model are set.
"""
project_state = self.set_up_test_model("test_cmoma")
# Test the state alteration
operation = migrations.CreateModel(
"Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
]
)
self.assertEqual(operation.describe(), "Create model Food")
new_state = project_state.clone()
operation.state_forwards("test_cmoma", new_state)
self.assertIn(("test_cmoma", "food"), new_state.models)
managers = new_state.models["test_cmoma", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
def test_delete_model(self):
"""
Tests the DeleteModel operation.
"""
project_state = self.set_up_test_model("test_dlmo")
# Test the state alteration
operation = migrations.DeleteModel("Pony")
self.assertEqual(operation.describe(), "Delete model Pony")
new_state = project_state.clone()
operation.state_forwards("test_dlmo", new_state)
self.assertNotIn(("test_dlmo", "pony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlmo", editor, project_state, new_state)
self.assertTableNotExists("test_dlmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dlmo", editor, new_state, project_state)
self.assertTableExists("test_dlmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "DeleteModel")
self.assertEqual(definition[1], [])
self.assertEqual(list(definition[2]), ["name"])
def test_delete_proxy_model(self):
"""
Tests the DeleteModel operation ignores proxy models.
"""
project_state = self.set_up_test_model("test_dlprmo", proxy_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_dlprmo", new_state)
self.assertIn(("test_dlprmo", "proxypony"), project_state.models)
self.assertNotIn(("test_dlprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlprmo", editor, project_state, new_state)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dlprmo", editor, new_state, project_state)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
def test_rename_model(self):
"""
Tests the RenameModel operation.
"""
project_state = self.set_up_test_model("test_rnmo", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Pony", "Horse")
self.assertEqual(operation.describe(), "Rename model Pony to Horse")
# Test initial state and database
self.assertIn(("test_rnmo", "pony"), project_state.models)
self.assertNotIn(("test_rnmo", "horse"), project_state.models)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id"))
self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id"))
# Migrate forwards
new_state = project_state.clone()
new_state = self.apply_operations("test_rnmo", new_state, [operation])
# Test new state and database
self.assertNotIn(("test_rnmo", "pony"), new_state.models)
self.assertIn(("test_rnmo", "horse"), new_state.models)
# RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual("test_rnmo.Horse", new_state.models["test_rnmo", "rider"].fields[1][1].remote_field.model)
self.assertTableNotExists("test_rnmo_pony")
self.assertTableExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id"))
self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id"))
# Migrate backwards
original_state = self.unapply_operations("test_rnmo", project_state, [operation])
# Test original state and database
self.assertIn(("test_rnmo", "pony"), original_state.models)
self.assertNotIn(("test_rnmo", "horse"), original_state.models)
self.assertEqual("Pony", original_state.models["test_rnmo", "rider"].fields[1][1].remote_field.model)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id"))
self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id"))
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'old_name': "Pony", 'new_name': "Horse"})
def test_rename_model_with_self_referential_fk(self):
"""
Tests the RenameModel operation on model with self referential FK.
"""
project_state = self.set_up_test_model("test_rmwsrf", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Rider", "HorseRider")
self.assertEqual(operation.describe(), "Rename model Rider to HorseRider")
new_state = project_state.clone()
operation.state_forwards("test_rmwsrf", new_state)
self.assertNotIn(("test_rmwsrf", "rider"), new_state.models)
self.assertIn(("test_rmwsrf", "horserider"), new_state.models)
# Remember, RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
"test_rmwsrf.HorseRider",
new_state.models["test_rmwsrf", "horserider"].fields[2][1].remote_field.model
)
# Test the database alteration
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id"))
self.assertFKNotExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id"))
with connection.schema_editor() as editor:
operation.database_forwards("test_rmwsrf", editor, project_state, new_state)
self.assertTableNotExists("test_rmwsrf_rider")
self.assertTableExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKNotExists("test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_rider", "id"))
self.assertFKExists("test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_horserider", "id"))
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmwsrf", editor, new_state, project_state)
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id"))
self.assertFKNotExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id"))
def test_rename_model_with_superclass_fk(self):
"""
Tests the RenameModel operation on a model which has a superclass that
has a foreign key.
"""
project_state = self.set_up_test_model("test_rmwsc", related_model=True, mti_model=True)
# Test the state alteration
operation = migrations.RenameModel("ShetlandPony", "LittleHorse")
self.assertEqual(operation.describe(), "Rename model ShetlandPony to LittleHorse")
new_state = project_state.clone()
operation.state_forwards("test_rmwsc", new_state)
self.assertNotIn(("test_rmwsc", "shetlandpony"), new_state.models)
self.assertIn(("test_rmwsc", "littlehorse"), new_state.models)
# RenameModel shouldn't repoint the superclass's relations, only local ones
self.assertEqual(
project_state.models["test_rmwsc", "rider"].fields[1][1].remote_field.model,
new_state.models["test_rmwsc", "rider"].fields[1][1].remote_field.model
)
# Before running the migration we have a table for Shetland Pony, not Little Horse
self.assertTableExists("test_rmwsc_shetlandpony")
self.assertTableNotExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# and the foreign key on rider points to pony, not shetland pony
self.assertFKExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id"))
self.assertFKNotExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_shetlandpony", "id"))
with connection.schema_editor() as editor:
operation.database_forwards("test_rmwsc", editor, project_state, new_state)
# Now we have a little horse table, not shetland pony
self.assertTableNotExists("test_rmwsc_shetlandpony")
self.assertTableExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# but the Foreign keys still point at pony, not little horse
self.assertFKExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id"))
self.assertFKNotExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_littlehorse", "id"))
def test_rename_model_with_self_referential_m2m(self):
app_label = "test_rename_model_with_self_referential_m2m"
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel("ReflexivePony", fields=[
("ponies", models.ManyToManyField("self")),
]),
])
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.RenameModel("ReflexivePony", "ReflexivePony2"),
])
Pony = project_state.apps.get_model(app_label, "ReflexivePony2")
pony = Pony.objects.create()
pony.ponies.add(pony)
def test_rename_model_with_m2m(self):
app_label = "test_rename_model_with_m2m"
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel("Rider", fields=[]),
migrations.CreateModel("Pony", fields=[
("riders", models.ManyToManyField("Rider")),
]),
])
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.RenameModel("Pony", "Pony2"),
])
Pony = project_state.apps.get_model(app_label, "Pony2")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(Pony._meta.get_field('riders').remote_field.through.objects.count(), 2)
def test_rename_m2m_target_model(self):
app_label = "test_rename_m2m_target_model"
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel("Rider", fields=[]),
migrations.CreateModel("Pony", fields=[
("riders", models.ManyToManyField("Rider")),
]),
])
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.RenameModel("Rider", "Rider2"),
])
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider2")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(Pony._meta.get_field('riders').remote_field.through.objects.count(), 2)
def test_add_field(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
self.assertEqual(operation.describe(), "Add field height to Pony")
project_state, new_state = self.make_test_state("test_adfl", operation)
self.assertEqual(len(new_state.models["test_adfl", "pony"].fields), 4)
field = [
f for n, f in new_state.models["test_adfl", "pony"].fields
if n == "height"
][0]
self.assertEqual(field.default, 5)
# Test the database alteration
self.assertColumnNotExists("test_adfl_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adfl", editor, project_state, new_state)
self.assertColumnExists("test_adfl_pony", "height")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adfl", editor, new_state, project_state)
self.assertColumnNotExists("test_adfl_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_add_charfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adchfl")
Pony = project_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations("test_adchfl", project_state, [
migrations.AddField(
"Pony",
"text",
models.CharField(max_length=10, default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.CharField(max_length=10, default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.CharField(max_length=10, default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.CharField(max_length=10, default='"\'"'),
),
])
Pony = new_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_textfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adtxtfl")
Pony = project_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations("test_adtxtfl", project_state, [
migrations.AddField(
"Pony",
"text",
models.TextField(default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.TextField(default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.TextField(default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.TextField(default='"\'"'),
),
])
Pony = new_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_binaryfield(self):
"""
Tests the AddField operation on TextField/BinaryField.
"""
project_state = self.set_up_test_model("test_adbinfl")
Pony = project_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations("test_adbinfl", project_state, [
migrations.AddField(
"Pony",
"blob",
models.BinaryField(default=b"some text"),
),
migrations.AddField(
"Pony",
"empty",
models.BinaryField(default=b""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.BinaryField(default=b"42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.BinaryField(default=b'"\'"'),
),
])
Pony = new_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
# SQLite returns buffer/memoryview, cast to bytes for checking.
self.assertEqual(bytes(pony.blob), b"some text")
self.assertEqual(bytes(pony.empty), b"")
self.assertEqual(bytes(pony.digits), b"42")
self.assertEqual(bytes(pony.quotes), b'"\'"')
def test_column_name_quoting(self):
"""
Column names that are SQL keywords shouldn't cause problems when used
in migrations (#22168).
"""
project_state = self.set_up_test_model("test_regr22168")
operation = migrations.AddField(
"Pony",
"order",
models.IntegerField(default=0),
)
new_state = project_state.clone()
operation.state_forwards("test_regr22168", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("test_regr22168", editor, project_state, new_state)
self.assertColumnExists("test_regr22168_pony", "order")
def test_add_field_preserve_default(self):
"""
Tests the AddField operation's state alteration
when preserve_default = False.
"""
project_state = self.set_up_test_model("test_adflpd")
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=4),
preserve_default=False,
)
new_state = project_state.clone()
operation.state_forwards("test_adflpd", new_state)
self.assertEqual(len(new_state.models["test_adflpd", "pony"].fields), 4)
field = [
f for n, f in new_state.models["test_adflpd", "pony"].fields
if n == "height"
][0]
self.assertEqual(field.default, NOT_PROVIDED)
# Test the database alteration
project_state.apps.get_model("test_adflpd", "pony").objects.create(
weight=4,
)
self.assertColumnNotExists("test_adflpd_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflpd", editor, project_state, new_state)
self.assertColumnExists("test_adflpd_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name", "preserve_default"])
def test_add_field_m2m(self):
"""
Tests the AddField operation with a ManyToManyField.
"""
project_state = self.set_up_test_model("test_adflmm", second_model=True)
# Test the state alteration
operation = migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies"))
new_state = project_state.clone()
operation.state_forwards("test_adflmm", new_state)
self.assertEqual(len(new_state.models["test_adflmm", "pony"].fields), 4)
# Test the database alteration
self.assertTableNotExists("test_adflmm_pony_stables")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflmm", editor, project_state, new_state)
self.assertTableExists("test_adflmm_pony_stables")
self.assertColumnNotExists("test_adflmm_pony", "stables")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_adflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.stables.create()
self.assertEqual(p.stables.count(), 1)
p.stables.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adflmm", editor, new_state, project_state)
self.assertTableNotExists("test_adflmm_pony_stables")
def test_alter_field_m2m(self):
project_state = self.set_up_test_model("test_alflmm", second_model=True)
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies"))
])
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertFalse(Pony._meta.get_field('stables').blank)
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AlterField(
"Pony", "stables", models.ManyToManyField(to="Stable", related_name="ponies", blank=True)
)
])
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertTrue(Pony._meta.get_field('stables').blank)
def test_repoint_field_m2m(self):
project_state = self.set_up_test_model("test_alflmm", second_model=True, third_model=True)
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AddField("Pony", "places", models.ManyToManyField("Stable", related_name="ponies"))
])
Pony = project_state.apps.get_model("test_alflmm", "Pony")
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AlterField("Pony", "places", models.ManyToManyField(to="Van", related_name="ponies"))
])
# Ensure the new field actually works
Pony = project_state.apps.get_model("test_alflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.places.create()
self.assertEqual(p.places.count(), 1)
p.places.all().delete()
def test_remove_field_m2m(self):
project_state = self.set_up_test_model("test_rmflmm", second_model=True)
project_state = self.apply_operations("test_rmflmm", project_state, operations=[
migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies"))
])
self.assertTableExists("test_rmflmm_pony_stables")
with_field_state = project_state.clone()
operations = [migrations.RemoveField("Pony", "stables")]
project_state = self.apply_operations("test_rmflmm", project_state, operations=operations)
self.assertTableNotExists("test_rmflmm_pony_stables")
# And test reversal
self.unapply_operations("test_rmflmm", with_field_state, operations=operations)
self.assertTableExists("test_rmflmm_pony_stables")
def test_remove_field_m2m_with_through(self):
project_state = self.set_up_test_model("test_rmflmmwt", second_model=True)
self.assertTableNotExists("test_rmflmmwt_ponystables")
project_state = self.apply_operations("test_rmflmmwt", project_state, operations=[
migrations.CreateModel("PonyStables", fields=[
("pony", models.ForeignKey('test_rmflmmwt.Pony', models.CASCADE)),
("stable", models.ForeignKey('test_rmflmmwt.Stable', models.CASCADE)),
]),
migrations.AddField(
"Pony", "stables",
models.ManyToManyField("Stable", related_name="ponies", through='test_rmflmmwt.PonyStables')
)
])
self.assertTableExists("test_rmflmmwt_ponystables")
operations = [migrations.RemoveField("Pony", "stables")]
self.apply_operations("test_rmflmmwt", project_state, operations=operations)
def test_remove_field(self):
"""
Tests the RemoveField operation.
"""
project_state = self.set_up_test_model("test_rmfl")
# Test the state alteration
operation = migrations.RemoveField("Pony", "pink")
self.assertEqual(operation.describe(), "Remove field pink from Pony")
new_state = project_state.clone()
operation.state_forwards("test_rmfl", new_state)
self.assertEqual(len(new_state.models["test_rmfl", "pony"].fields), 2)
# Test the database alteration
self.assertColumnExists("test_rmfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_rmfl", editor, project_state, new_state)
self.assertColumnNotExists("test_rmfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmfl", editor, new_state, project_state)
self.assertColumnExists("test_rmfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveField")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'model_name': "Pony", 'name': 'pink'})
def test_remove_fk(self):
"""
Tests the RemoveField operation on a foreign key.
"""
project_state = self.set_up_test_model("test_rfk", related_model=True)
self.assertColumnExists("test_rfk_rider", "pony_id")
operation = migrations.RemoveField("Rider", "pony")
new_state = project_state.clone()
operation.state_forwards("test_rfk", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("test_rfk", editor, project_state, new_state)
self.assertColumnNotExists("test_rfk_rider", "pony_id")
with connection.schema_editor() as editor:
operation.database_backwards("test_rfk", editor, new_state, project_state)
self.assertColumnExists("test_rfk_rider", "pony_id")
def test_alter_model_table(self):
"""
Tests the AlterModelTable operation.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony_2")
self.assertEqual(operation.describe(), "Rename table for Pony to test_almota_pony_2")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(new_state.models["test_almota", "pony"].options["db_table"], "test_almota_pony_2")
# Test the database alteration
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableNotExists("test_almota_pony")
self.assertTableExists("test_almota_pony_2")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_almota", editor, new_state, project_state)
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelTable")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'table': "test_almota_pony_2"})
def test_alter_model_table_noop(self):
"""
Tests the AlterModelTable operation if the table name is not changed.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(new_state.models["test_almota", "pony"].options["db_table"], "test_almota_pony")
# Test the database alteration
self.assertTableExists("test_almota_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableExists("test_almota_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_almota", editor, new_state, project_state)
self.assertTableExists("test_almota_pony")
def test_alter_model_table_m2m(self):
"""
AlterModelTable should rename auto-generated M2M tables.
"""
app_label = "test_talflmltlm2m"
pony_db_table = 'pony_foo'
project_state = self.set_up_test_model(app_label, second_model=True, db_table=pony_db_table)
# Add the M2M field
first_state = project_state.clone()
operation = migrations.AddField("Pony", "stables", models.ManyToManyField("Stable"))
operation.state_forwards(app_label, first_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, first_state)
original_m2m_table = "%s_%s" % (pony_db_table, "stables")
new_m2m_table = "%s_%s" % (app_label, "pony_stables")
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
# Rename the Pony db_table which should also rename the m2m table.
second_state = first_state.clone()
operation = migrations.AlterModelTable(name='pony', table=None)
operation.state_forwards(app_label, second_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, first_state, second_state)
self.assertTableExists(new_m2m_table)
self.assertTableNotExists(original_m2m_table)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, second_state, first_state)
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
def test_alter_field(self):
"""
Tests the AlterField operation.
"""
project_state = self.set_up_test_model("test_alfl")
# Test the state alteration
operation = migrations.AlterField("Pony", "pink", models.IntegerField(null=True))
self.assertEqual(operation.describe(), "Alter field pink on Pony")
new_state = project_state.clone()
operation.state_forwards("test_alfl", new_state)
self.assertEqual(project_state.models["test_alfl", "pony"].get_field_by_name("pink").null, False)
self.assertEqual(new_state.models["test_alfl", "pony"].get_field_by_name("pink").null, True)
# Test the database alteration
self.assertColumnNotNull("test_alfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alfl", editor, project_state, new_state)
self.assertColumnNull("test_alfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alfl", editor, new_state, project_state)
self.assertColumnNotNull("test_alfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_alter_field_pk(self):
"""
Tests the AlterField operation on primary keys (for things like PostgreSQL's SERIAL weirdness)
"""
project_state = self.set_up_test_model("test_alflpk")
# Test the state alteration
operation = migrations.AlterField("Pony", "id", models.IntegerField(primary_key=True))
new_state = project_state.clone()
operation.state_forwards("test_alflpk", new_state)
self.assertIsInstance(project_state.models["test_alflpk", "pony"].get_field_by_name("id"), models.AutoField)
self.assertIsInstance(new_state.models["test_alflpk", "pony"].get_field_by_name("id"), models.IntegerField)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alflpk", editor, project_state, new_state)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alflpk", editor, new_state, project_state)
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_field_pk_fk(self):
"""
Tests the AlterField operation on primary keys changes any FKs pointing to it.
"""
project_state = self.set_up_test_model("test_alflpkfk", related_model=True)
# Test the state alteration
operation = migrations.AlterField("Pony", "id", models.FloatField(primary_key=True))
new_state = project_state.clone()
operation.state_forwards("test_alflpkfk", new_state)
self.assertIsInstance(project_state.models["test_alflpkfk", "pony"].get_field_by_name("id"), models.AutoField)
self.assertIsInstance(new_state.models["test_alflpkfk", "pony"].get_field_by_name("id"), models.FloatField)
def assertIdTypeEqualsFkType():
with connection.cursor() as cursor:
id_type, id_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(cursor, "test_alflpkfk_pony")
if c.name == "id"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(cursor, "test_alflpkfk_rider")
if c.name == "pony_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_null, fk_null)
assertIdTypeEqualsFkType()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alflpkfk", editor, project_state, new_state)
assertIdTypeEqualsFkType()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alflpkfk", editor, new_state, project_state)
assertIdTypeEqualsFkType()
def test_rename_field(self):
"""
Tests the RenameField operation.
"""
project_state = self.set_up_test_model("test_rnfl", unique_together=True, index_together=True)
# Test the state alteration
operation = migrations.RenameField("Pony", "pink", "blue")
self.assertEqual(operation.describe(), "Rename field pink on Pony to blue")
new_state = project_state.clone()
operation.state_forwards("test_rnfl", new_state)
self.assertIn("blue", [n for n, f in new_state.models["test_rnfl", "pony"].fields])
self.assertNotIn("pink", [n for n, f in new_state.models["test_rnfl", "pony"].fields])
# Make sure the unique_together has the renamed column too
self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['unique_together'][0])
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['unique_together'][0])
# Make sure the index_together has the renamed column too
self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['index_together'][0])
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['index_together'][0])
# Test the database alteration
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnfl", editor, project_state, new_state)
self.assertColumnExists("test_rnfl_pony", "blue")
self.assertColumnNotExists("test_rnfl_pony", "pink")
# Ensure the unique constraint has been ported over
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute("INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_rnfl_pony")
# Ensure the index constraint has been ported over
self.assertIndexExists("test_rnfl_pony", ["weight", "blue"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rnfl", editor, new_state, project_state)
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
# Ensure the index constraint has been reset
self.assertIndexExists("test_rnfl_pony", ["weight", "pink"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameField")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'model_name': "Pony", 'old_name': "pink", 'new_name': "blue"})
def test_alter_unique_together(self):
"""
Tests the AlterUniqueTogether operation.
"""
project_state = self.set_up_test_model("test_alunto")
# Test the state alteration
operation = migrations.AlterUniqueTogether("Pony", [("pink", "weight")])
self.assertEqual(operation.describe(), "Alter unique_together for Pony (1 constraint(s))")
new_state = project_state.clone()
operation.state_forwards("test_alunto", new_state)
self.assertEqual(len(project_state.models["test_alunto", "pony"].options.get("unique_together", set())), 0)
self.assertEqual(len(new_state.models["test_alunto", "pony"].options.get("unique_together", set())), 1)
# Make sure we can insert duplicate rows
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alunto", editor, project_state, new_state)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alunto", editor, new_state, project_state)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test flat unique_together
operation = migrations.AlterUniqueTogether("Pony", ("pink", "weight"))
operation.state_forwards("test_alunto", new_state)
self.assertEqual(len(new_state.models["test_alunto", "pony"].options.get("unique_together", set())), 1)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterUniqueTogether")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'unique_together': {("pink", "weight")}})
def test_alter_unique_together_remove(self):
operation = migrations.AlterUniqueTogether("Pony", None)
self.assertEqual(operation.describe(), "Alter unique_together for Pony (0 constraint(s))")
def test_alter_index_together(self):
"""
Tests the AlterIndexTogether operation.
"""
project_state = self.set_up_test_model("test_alinto")
# Test the state alteration
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
self.assertEqual(operation.describe(), "Alter index_together for Pony (1 constraint(s))")
new_state = project_state.clone()
operation.state_forwards("test_alinto", new_state)
self.assertEqual(len(project_state.models["test_alinto", "pony"].options.get("index_together", set())), 0)
self.assertEqual(len(new_state.models["test_alinto", "pony"].options.get("index_together", set())), 1)
# Make sure there's no matching index
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alinto", editor, project_state, new_state)
self.assertIndexExists("test_alinto_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alinto", editor, new_state, project_state)
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterIndexTogether")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'index_together': {("pink", "weight")}})
def test_alter_index_together_remove(self):
operation = migrations.AlterIndexTogether("Pony", None)
self.assertEqual(operation.describe(), "Alter index_together for Pony (0 constraint(s))")
def test_alter_model_options(self):
"""
Tests the AlterModelOptions operation.
"""
project_state = self.set_up_test_model("test_almoop")
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions("Pony", {"permissions": [("can_groom", "Can groom")]})
self.assertEqual(operation.describe(), "Change Meta options on Pony")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(len(project_state.models["test_almoop", "pony"].options.get("permissions", [])), 0)
self.assertEqual(len(new_state.models["test_almoop", "pony"].options.get("permissions", [])), 1)
self.assertEqual(new_state.models["test_almoop", "pony"].options["permissions"][0][0], "can_groom")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'options': {"permissions": [("can_groom", "Can groom")]}})
def test_alter_model_options_emptying(self):
"""
Tests that the AlterModelOptions operation removes keys from the dict (#23121)
"""
project_state = self.set_up_test_model("test_almoop", options=True)
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions("Pony", {})
self.assertEqual(operation.describe(), "Change Meta options on Pony")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(len(project_state.models["test_almoop", "pony"].options.get("permissions", [])), 1)
self.assertEqual(len(new_state.models["test_almoop", "pony"].options.get("permissions", [])), 0)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'options': {}})
def test_alter_order_with_respect_to(self):
"""
Tests the AlterOrderWithRespectTo operation.
"""
project_state = self.set_up_test_model("test_alorwrtto", related_model=True)
# Test the state alteration
operation = migrations.AlterOrderWithRespectTo("Rider", "pony")
self.assertEqual(operation.describe(), "Set order_with_respect_to on Rider to pony")
new_state = project_state.clone()
operation.state_forwards("test_alorwrtto", new_state)
self.assertIsNone(
project_state.models["test_alorwrtto", "rider"].options.get("order_with_respect_to", None)
)
self.assertEqual(
new_state.models["test_alorwrtto", "rider"].options.get("order_with_respect_to", None),
"pony"
)
# Make sure there's no matching index
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# Create some rows before alteration
rendered_state = project_state.apps
pony = rendered_state.get_model("test_alorwrtto", "Pony").objects.create(weight=50)
rendered_state.get_model("test_alorwrtto", "Rider").objects.create(pony=pony, friend_id=1)
rendered_state.get_model("test_alorwrtto", "Rider").objects.create(pony=pony, friend_id=2)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alorwrtto", editor, project_state, new_state)
self.assertColumnExists("test_alorwrtto_rider", "_order")
# Check for correct value in rows
updated_riders = new_state.apps.get_model("test_alorwrtto", "Rider").objects.all()
self.assertEqual(updated_riders[0]._order, 0)
self.assertEqual(updated_riders[1]._order, 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alorwrtto", editor, new_state, project_state)
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterOrderWithRespectTo")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Rider", 'order_with_respect_to': "pony"})
def test_alter_model_managers(self):
"""
Tests that the managers on a model are set.
"""
project_state = self.set_up_test_model("test_almoma")
# Test the state alteration
operation = migrations.AlterModelManagers(
"Pony",
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
]
)
self.assertEqual(operation.describe(), "Change managers on Pony")
managers = project_state.models["test_almoma", "pony"].managers
self.assertEqual(managers, [])
new_state = project_state.clone()
operation.state_forwards("test_almoma", new_state)
self.assertIn(("test_almoma", "pony"), new_state.models)
managers = new_state.models["test_almoma", "pony"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
def test_alter_model_managers_emptying(self):
"""
Tests that the managers on a model are set.
"""
project_state = self.set_up_test_model("test_almomae", manager_model=True)
# Test the state alteration
operation = migrations.AlterModelManagers("Food", managers=[])
self.assertEqual(operation.describe(), "Change managers on Food")
self.assertIn(("test_almomae", "food"), project_state.models)
managers = project_state.models["test_almomae", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
new_state = project_state.clone()
operation.state_forwards("test_almomae", new_state)
managers = new_state.models["test_almomae", "food"].managers
self.assertEqual(managers, [])
def test_alter_fk(self):
"""
Tests that creating and then altering an FK works correctly
and deals with the pending SQL (#23091)
"""
project_state = self.set_up_test_model("test_alfk")
# Test adding and then altering the FK in one go
create_operation = migrations.CreateModel(
name="Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
)
create_state = project_state.clone()
create_operation.state_forwards("test_alfk", create_state)
alter_operation = migrations.AlterField(
model_name='Rider',
name='pony',
field=models.ForeignKey("Pony", models.CASCADE, editable=False),
)
alter_state = create_state.clone()
alter_operation.state_forwards("test_alfk", alter_state)
with connection.schema_editor() as editor:
create_operation.database_forwards("test_alfk", editor, project_state, create_state)
alter_operation.database_forwards("test_alfk", editor, create_state, alter_state)
def test_alter_fk_non_fk(self):
"""
Tests that altering an FK to a non-FK works (#23244)
"""
# Test the state alteration
operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.FloatField(),
)
project_state, new_state = self.make_test_state("test_afknfk", operation, related_model=True)
# Test the database alteration
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_afknfk", editor, project_state, new_state)
self.assertColumnExists("test_afknfk_rider", "pony")
self.assertColumnNotExists("test_afknfk_rider", "pony_id")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_afknfk", editor, new_state, project_state)
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
def test_run_sql(self):
"""
Tests the RunSQL operation.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
# Use a multi-line string with a comment to test splitting on SQLite and MySQL respectively
"CREATE TABLE i_love_ponies (id int, special_thing varchar(15));\n"
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'i love ponies'); -- this is magic!\n"
"INSERT INTO i_love_ponies (id, special_thing) VALUES (2, 'i love django');\n"
"UPDATE i_love_ponies SET special_thing = 'Ponies' WHERE special_thing LIKE '%%ponies';"
"UPDATE i_love_ponies SET special_thing = 'Django' WHERE special_thing LIKE '%django';",
# Run delete queries to test for parameter substitution failure
# reported in #23426
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%Django%';"
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%%Ponies%%';"
"DROP TABLE i_love_ponies",
state_operations=[migrations.CreateModel("SomethingElse", [("id", models.AutoField(primary_key=True))])],
)
self.assertEqual(operation.describe(), "Raw SQL operation")
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_runsql", new_state)
self.assertEqual(len(new_state.models["test_runsql", "somethingelse"].fields), 1)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test SQL collection
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertIn("LIKE '%%ponies';", "\n".join(editor.collected_sql))
operation.database_backwards("test_runsql", editor, project_state, new_state)
self.assertIn("LIKE '%%Ponies%%';", "\n".join(editor.collected_sql))
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertTableExists("i_love_ponies")
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 2)
cursor.execute("SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Django'")
self.assertEqual(cursor.fetchall()[0][0], 1)
cursor.execute("SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Ponies'")
self.assertEqual(cursor.fetchall()[0][0], 1)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards("test_runsql", editor, new_state, project_state)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunSQL")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["reverse_sql", "sql", "state_operations"])
def test_run_sql_params(self):
"""
#23426 - RunSQL should accept parameters.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
["CREATE TABLE i_love_ponies (id int, special_thing varchar(15));"],
["DROP TABLE i_love_ponies"],
)
param_operation = migrations.RunSQL(
# forwards
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'Django');",
["INSERT INTO i_love_ponies (id, special_thing) VALUES (2, %s);", ['Ponies']],
("INSERT INTO i_love_ponies (id, special_thing) VALUES (%s, %s);", (3, 'Python',)),
),
# backwards
[
"DELETE FROM i_love_ponies WHERE special_thing = 'Django';",
["DELETE FROM i_love_ponies WHERE special_thing = 'Ponies';", None],
("DELETE FROM i_love_ponies WHERE id = %s OR special_thing = %s;", [3, 'Python']),
]
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
new_state = project_state.clone()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
# Test parameter passing
with connection.schema_editor() as editor:
param_operation.database_forwards("test_runsql", editor, project_state, new_state)
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 3)
with connection.schema_editor() as editor:
param_operation.database_backwards("test_runsql", editor, new_state, project_state)
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_runsql", editor, new_state, project_state)
self.assertTableNotExists("i_love_ponies")
def test_run_sql_params_invalid(self):
"""
#23426 - RunSQL should fail when a list of statements with an incorrect
number of tuples is given.
"""
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
operation = migrations.RunSQL(
# forwards
[
["INSERT INTO foo (bar) VALUES ('buz');"]
],
# backwards
(
("DELETE FROM foo WHERE bar = 'buz';", 'invalid', 'parameter count'),
),
)
with connection.schema_editor() as editor:
six.assertRaisesRegex(self, ValueError,
"Expected a 2-tuple but got 1",
operation.database_forwards,
"test_runsql", editor, project_state, new_state)
with connection.schema_editor() as editor:
six.assertRaisesRegex(self, ValueError,
"Expected a 2-tuple but got 3",
operation.database_backwards,
"test_runsql", editor, new_state, project_state)
def test_run_sql_noop(self):
"""
#24098 - Tests no-op RunSQL operations.
"""
operation = migrations.RunSQL(migrations.RunSQL.noop, migrations.RunSQL.noop)
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, None, None)
operation.database_backwards("test_runsql", editor, None, None)
def test_run_python(self):
"""
Tests the RunPython operation
"""
project_state = self.set_up_test_model("test_runpython", mti_model=True)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
def inner_method_reverse(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.filter(pink=1, weight=3.55).delete()
Pony.objects.filter(weight=5).delete()
operation = migrations.RunPython(inner_method, reverse_code=inner_method_reverse)
self.assertEqual(operation.describe(), "Raw Python operation")
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards("test_runpython", new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2)
# Now test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0)
# Now test we can't use a string
with self.assertRaises(ValueError):
migrations.RunPython("print 'ahahaha'")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code", "reverse_code"])
# Also test reversal fails, with an operation identical to above but without reverse_code set
no_reverse_operation = migrations.RunPython(inner_method)
self.assertFalse(no_reverse_operation.reversible)
with connection.schema_editor() as editor:
no_reverse_operation.database_forwards("test_runpython", editor, project_state, new_state)
with self.assertRaises(NotImplementedError):
no_reverse_operation.database_backwards("test_runpython", editor, new_state, project_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2)
def create_ponies(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
pony1 = Pony.objects.create(pink=1, weight=3.55)
self.assertIsNot(pony1.pk, None)
pony2 = Pony.objects.create(weight=5)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_ponies)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 4)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code"])
def create_shetlandponies(models, schema_editor):
ShetlandPony = models.get_model("test_runpython", "ShetlandPony")
pony1 = ShetlandPony.objects.create(weight=4.0)
self.assertIsNot(pony1.pk, None)
pony2 = ShetlandPony.objects.create(weight=5.0)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_shetlandponies)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 6)
self.assertEqual(project_state.apps.get_model("test_runpython", "ShetlandPony").objects.count(), 2)
def test_run_python_atomic(self):
"""
Tests the RunPython operation correctly handles the "atomic" keyword
"""
project_state = self.set_up_test_model("test_runpythonatomic", mti_model=True)
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpythonatomic", "Pony")
Pony.objects.create(pink=1, weight=3.55)
raise ValueError("Adrian hates ponies.")
atomic_migration = Migration("test", "test_runpythonatomic")
atomic_migration.operations = [migrations.RunPython(inner_method)]
non_atomic_migration = Migration("test", "test_runpythonatomic")
non_atomic_migration.operations = [migrations.RunPython(inner_method, atomic=False)]
# If we're a fully-transactional database, both versions should rollback
if connection.features.can_rollback_ddl:
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
# Otherwise, the non-atomic operation should leave a row there
else:
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 1)
# And deconstruction
definition = non_atomic_migration.operations[0].deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["atomic", "code"])
def test_run_python_related_assignment(self):
"""
#24282 - Tests that model changes to a FK reverse side update the model
on the FK side as well.
"""
def inner_method(models, schema_editor):
Author = models.get_model("test_authors", "Author")
Book = models.get_model("test_books", "Book")
author = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author)
create_author = migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
("author", models.ForeignKey("test_authors.Author", models.CASCADE))
],
options={},
)
add_hometown = migrations.AddField(
"Author",
"hometown",
models.CharField(max_length=100),
)
create_old_man = migrations.RunPython(inner_method, inner_method)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_authors", new_state)
create_author.database_forwards("test_authors", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_books", new_state)
create_book.database_forwards("test_books", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
add_hometown.state_forwards("test_authors", new_state)
add_hometown.database_forwards("test_authors", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_old_man.state_forwards("test_books", new_state)
create_old_man.database_forwards("test_books", editor, project_state, new_state)
def test_run_python_noop(self):
"""
#24098 - Tests no-op RunPython operations.
"""
project_state = ProjectState()
new_state = project_state.clone()
operation = migrations.RunPython(migrations.RunPython.noop, migrations.RunPython.noop)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
operation.database_backwards("test_runpython", editor, new_state, project_state)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
def test_separate_database_and_state(self):
"""
Tests the SeparateDatabaseAndState operation.
"""
project_state = self.set_up_test_model("test_separatedatabaseandstate")
# Create the operation
database_operation = migrations.RunSQL(
"CREATE TABLE i_love_ponies (id int, special_thing int);",
"DROP TABLE i_love_ponies;"
)
state_operation = migrations.CreateModel("SomethingElse", [("id", models.AutoField(primary_key=True))])
operation = migrations.SeparateDatabaseAndState(
state_operations=[state_operation],
database_operations=[database_operation]
)
self.assertEqual(operation.describe(), "Custom state/database change combination")
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_separatedatabaseandstate", new_state)
self.assertEqual(len(new_state.models["test_separatedatabaseandstate", "somethingelse"].fields), 1)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_separatedatabaseandstate", editor, project_state, new_state)
self.assertTableExists("i_love_ponies")
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards("test_separatedatabaseandstate", editor, new_state, project_state)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "SeparateDatabaseAndState")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["database_operations", "state_operations"])
class SwappableOperationTests(OperationTestBase):
"""
Tests that key operations ignore swappable models
(we don't want to replicate all of them here, as the functionality
is in a common base class anyway)
"""
available_apps = [
"migrations",
"django.contrib.auth",
"django.contrib.contenttypes",
]
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_create_ignore_swapped(self):
"""
Tests that the CreateTable operation ignores swapped models.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
options={
"swappable": "TEST_SWAP_MODEL",
},
)
# Test the state alteration (it should still be there!)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crigsw", new_state)
self.assertEqual(new_state.models["test_crigsw", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crigsw", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crigsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crigsw", editor, project_state, new_state)
self.assertTableNotExists("test_crigsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crigsw", editor, new_state, project_state)
self.assertTableNotExists("test_crigsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_delete_ignore_swapped(self):
"""
Tests the DeleteModel operation ignores swapped models.
"""
operation = migrations.DeleteModel("Pony")
project_state, new_state = self.make_test_state("test_dligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_dligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dligsw", editor, project_state, new_state)
self.assertTableNotExists("test_dligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dligsw", editor, new_state, project_state)
self.assertTableNotExists("test_dligsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_add_field_ignore_swapped(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
project_state, new_state = self.make_test_state("test_adfligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_adfligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_adfligsw", editor, project_state, new_state)
self.assertTableNotExists("test_adfligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adfligsw", editor, new_state, project_state)
self.assertTableNotExists("test_adfligsw_pony")
|
LifeDJIK/S.H.I.V.A.
|
refs/heads/master
|
containers/shiva/hazelcast/protocol/codec/map_add_partition_lost_listener_codec.py
|
2
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.map_message_type import *
from hazelcast.protocol.event_response_const import *
REQUEST_TYPE = MAP_ADDPARTITIONLOSTLISTENER
RESPONSE_TYPE = 104
RETRYABLE = False
def calculate_size(name, local_only):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += BOOLEAN_SIZE_IN_BYTES
return data_size
def encode_request(name, local_only):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, local_only))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_bool(local_only)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_str()
return parameters
def handle(client_message, handle_event_mappartitionlost = None, to_object=None):
""" Event handler """
message_type = client_message.get_message_type()
if message_type == EVENT_MAPPARTITIONLOST and handle_event_mappartitionlost is not None:
partition_id = client_message.read_int()
uuid = client_message.read_str()
handle_event_mappartitionlost(partition_id=partition_id, uuid=uuid)
|
FabianKnapp/nexmon
|
refs/heads/master
|
buildtools/gcc-arm-none-eabi-5_4-2016q2-osx/arm-none-eabi/share/gdb/python/gdb/command/unwinders.py
|
50
|
# Unwinder commands.
# Copyright 2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
import re
def validate_regexp(exp, idstring):
try:
return re.compile(exp)
except SyntaxError:
raise SyntaxError("Invalid %s regexp: %s." % (idstring, exp))
def parse_unwinder_command_args(arg):
"""Internal utility to parse unwinder command argv.
Arguments:
arg: The arguments to the command. The format is:
[locus-regexp [name-regexp]]
Returns:
A 2-tuple of compiled regular expressions.
Raises:
SyntaxError: an error processing ARG
"""
argv = gdb.string_to_argv(arg)
argc = len(argv)
if argc > 2:
raise SyntaxError("Too many arguments.")
locus_regexp = ""
name_regexp = ""
if argc >= 1:
locus_regexp = argv[0]
if argc >= 2:
name_regexp = argv[1]
return (validate_regexp(locus_regexp, "locus"),
validate_regexp(name_regexp, "unwinder"))
class InfoUnwinder(gdb.Command):
"""GDB command to list unwinders.
Usage: info unwinder [locus-regexp [name-regexp]]
LOCUS-REGEXP is a regular expression matching the location of the
unwinder. If it is omitted, all registered unwinders from all
loci are listed. A locus can be 'global', 'progspace' to list
the unwinders from the current progspace, or a regular expression
matching filenames of objfiles.
NAME-REGEXP is a regular expression to filter unwinder names. If
this omitted for a specified locus, then all registered unwinders
in the locus are listed.
"""
def __init__(self):
super(InfoUnwinder, self).__init__("info unwinder",
gdb.COMMAND_STACK)
def list_unwinders(self, title, unwinders, name_re):
"""Lists the unwinders whose name matches regexp.
Arguments:
title: The line to print before the list.
unwinders: The list of the unwinders.
name_re: unwinder name filter.
"""
if not unwinders:
return
print(title)
for unwinder in unwinders:
if name_re.match(unwinder.name):
print(" %s%s" % (unwinder.name,
"" if unwinder.enabled else " [disabled]"))
def invoke(self, arg, from_tty):
locus_re, name_re = parse_unwinder_command_args(arg)
if locus_re.match("global"):
self.list_unwinders("Global:", gdb.frame_unwinders,
name_re)
if locus_re.match("progspace"):
cp = gdb.current_progspace()
self.list_unwinders("Progspace %s:" % cp.filename,
cp.frame_unwinders, name_re)
for objfile in gdb.objfiles():
if locus_re.match(objfile.filename):
self.list_unwinders("Objfile %s:" % objfile.filename,
objfile.frame_unwinders, name_re)
def do_enable_unwinder1(unwinders, name_re, flag):
"""Enable/disable unwinders whose names match given regex.
Arguments:
unwinders: The list of unwinders.
name_re: Unwinder name filter.
flag: Enable/disable.
Returns:
The number of unwinders affected.
"""
total = 0
for unwinder in unwinders:
if name_re.match(unwinder.name):
unwinder.enabled = flag
total += 1
return total
def do_enable_unwinder(arg, flag):
"""Enable/disable unwinder(s)."""
(locus_re, name_re) = parse_unwinder_command_args(arg)
total = 0
if locus_re.match("global"):
total += do_enable_unwinder1(gdb.frame_unwinders, name_re, flag)
if locus_re.match("progspace"):
total += do_enable_unwinder1(gdb.current_progspace().frame_unwinders,
name_re, flag)
for objfile in gdb.objfiles():
if locus_re.match(objfile.filename):
total += do_enable_unwinder1(objfile.frame_unwinders, name_re,
flag)
print("%d unwinder%s %s" % (total, "" if total == 1 else "s",
"enabled" if flag else "disabled"))
class EnableUnwinder(gdb.Command):
"""GDB command to enable unwinders.
Usage: enable unwinder [locus-regexp [name-regexp]]
LOCUS-REGEXP is a regular expression specifying the unwinders to
enable. It can 'global', 'progspace', or the name of an objfile
within that progspace.
NAME_REGEXP is a regular expression to filter unwinder names. If
this omitted for a specified locus, then all registered unwinders
in the locus are affected.
"""
def __init__(self):
super(EnableUnwinder, self).__init__("enable unwinder",
gdb.COMMAND_STACK)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_unwinder(arg, True)
class DisableUnwinder(gdb.Command):
"""GDB command to disable the specified unwinder.
Usage: disable unwinder [locus-regexp [name-regexp]]
LOCUS-REGEXP is a regular expression specifying the unwinders to
disable. It can 'global', 'progspace', or the name of an objfile
within that progspace.
NAME_REGEXP is a regular expression to filter unwinder names. If
this omitted for a specified locus, then all registered unwinders
in the locus are affected.
"""
def __init__(self):
super(DisableUnwinder, self).__init__("disable unwinder",
gdb.COMMAND_STACK)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_unwinder(arg, False)
def register_unwinder_commands():
"""Installs the unwinder commands."""
InfoUnwinder()
EnableUnwinder()
DisableUnwinder()
register_unwinder_commands()
|
suyashphadtare/gd-erp
|
refs/heads/develop
|
erpnext/accounts/doctype/journal_voucher/journal_voucher.py
|
4
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt, fmt_money, formatdate, getdate
from frappe import msgprint, _, scrub
from frappe import msgprint, _
from erpnext.setup.utils import get_company_currency
from erpnext.controllers.accounts_controller import AccountsController
class JournalVoucher(AccountsController):
def __init__(self, arg1, arg2=None):
super(JournalVoucher, self).__init__(arg1, arg2)
def validate(self):
if not self.is_opening:
self.is_opening='No'
self.clearance_date = None
super(JournalVoucher, self).validate_date_with_fiscal_year()
self.validate_cheque_info()
self.validate_entries_for_advance()
self.validate_debit_and_credit()
self.validate_against_jv()
self.validate_against_sales_invoice()
self.validate_against_purchase_invoice()
self.set_against_account()
self.create_remarks()
self.set_aging_date()
self.set_print_format_fields()
self.validate_against_sales_order()
self.validate_against_purchase_order()
def on_submit(self):
if self.voucher_type in ['Bank Voucher', 'Contra Voucher', 'Journal Entry']:
self.check_reference_date()
self.make_gl_entries()
self.check_credit_limit()
self.update_advance_paid()
def update_advance_paid(self):
advance_paid = frappe._dict()
for d in self.get("entries"):
if d.is_advance:
if d.against_sales_order:
advance_paid.setdefault("Sales Order", []).append(d.against_sales_order)
elif d.against_purchase_order:
advance_paid.setdefault("Purchase Order", []).append(d.against_purchase_order)
for voucher_type, order_list in advance_paid.items():
for voucher_no in list(set(order_list)):
frappe.get_doc(voucher_type, voucher_no).set_total_advance_paid()
def on_cancel(self):
from erpnext.accounts.utils import remove_against_link_from_jv
remove_against_link_from_jv(self.doctype, self.name, "against_jv")
self.make_gl_entries(1)
self.update_advance_paid()
def validate_cheque_info(self):
if self.voucher_type in ['Bank Voucher']:
if not self.cheque_no or not self.cheque_date:
msgprint(_("Reference No & Reference Date is required for {0}").format(self.voucher_type),
raise_exception=1)
if self.cheque_date and not self.cheque_no:
msgprint(_("Reference No is mandatory if you entered Reference Date"), raise_exception=1)
def validate_entries_for_advance(self):
for d in self.get('entries'):
if not (d.against_voucher and d.against_invoice and d.against_jv):
master_type = frappe.db.get_value("Account", d.account, "master_type")
if (master_type == 'Customer' and flt(d.credit) > 0) or \
(master_type == 'Supplier' and flt(d.debit) > 0):
if not d.is_advance:
msgprint(_("Row {0}: Please check 'Is Advance' against Account {1} if this is an advance entry.").format(d.idx, d.account))
elif (d.against_sales_order or d.against_purchase_order) and d.is_advance != "Yes":
frappe.throw(_("Row {0}: Payment against Sales/Purchase Order should always be marked as advance").format(d.idx))
def validate_against_jv(self):
for d in self.get('entries'):
if d.against_jv:
account_root_type = frappe.db.get_value("Account", d.account, "root_type")
if account_root_type == "Asset" and flt(d.debit) > 0:
frappe.throw(_("For {0}, only credit entries can be linked against another debit entry")
.format(d.account))
elif account_root_type == "Liability" and flt(d.credit) > 0:
frappe.throw(_("For {0}, only debit entries can be linked against another credit entry")
.format(d.account))
if d.against_jv == self.name:
frappe.throw(_("You can not enter current voucher in 'Against Journal Voucher' column"))
against_entries = frappe.db.sql("""select * from `tabJournal Voucher Detail`
where account = %s and docstatus = 1 and parent = %s
and ifnull(against_jv, '') = '' and ifnull(against_invoice, '') = ''
and ifnull(against_voucher, '') = ''""", (d.account, d.against_jv), as_dict=True)
if not against_entries:
frappe.throw(_("Journal Voucher {0} does not have account {1} or already matched against other voucher")
.format(d.against_jv, d.account))
else:
dr_or_cr = "debit" if d.credit > 0 else "credit"
valid = False
for jvd in against_entries:
if flt(jvd[dr_or_cr]) > 0:
valid = True
if not valid:
frappe.throw(_("Against Journal Voucher {0} does not have any unmatched {1} entry")
.format(d.against_jv, dr_or_cr))
def validate_against_sales_invoice(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_invoice", "Sales Invoice")
self.validate_against_invoice_fields("Sales Invoice", payment_against_voucher)
def validate_against_purchase_invoice(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_voucher", "Purchase Invoice")
self.validate_against_invoice_fields("Purchase Invoice", payment_against_voucher)
def validate_against_sales_order(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_sales_order", "Sales Order")
self.validate_against_order_fields("Sales Order", payment_against_voucher)
def validate_against_purchase_order(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_purchase_order", "Purchase Order")
self.validate_against_order_fields("Purchase Order", payment_against_voucher)
def validate_account_in_against_voucher(self, against_field, doctype):
payment_against_voucher = frappe._dict()
field_dict = {'Sales Invoice': "Debit To",
'Purchase Invoice': "Credit To",
'Sales Order': "Customer",
'Purchase Order': "Supplier"
}
for d in self.get("entries"):
if d.get(against_field):
dr_or_cr = "credit" if against_field in ["against_invoice", "against_sales_order"] \
else "debit"
if against_field in ["against_invoice", "against_sales_order"] \
and flt(d.debit) > 0:
frappe.throw(_("Row {0}: Debit entry can not be linked with a {1}").format(d.idx, doctype))
if against_field in ["against_voucher", "against_purchase_order"] \
and flt(d.credit) > 0:
frappe.throw(_("Row {0}: Credit entry can not be linked with a {1}").format(d.idx, doctype))
voucher_account = frappe.db.get_value(doctype, d.get(against_field), \
scrub(field_dict.get(doctype)))
account_master_name = frappe.db.get_value("Account", d.account, "master_name")
if against_field in ["against_invoice", "against_voucher"] \
and voucher_account != d.account:
frappe.throw(_("Row {0}: Account {1} does not match with {2} {3} account") \
.format(d.idx, d.account, doctype, field_dict.get(doctype)))
if against_field in ["against_sales_order", "against_purchase_order"]:
if voucher_account != account_master_name:
frappe.throw(_("Row {0}: Account {1} does not match with {2} {3} Name") \
.format(d.idx, d.account, doctype, field_dict.get(doctype)))
elif d.is_advance == "Yes":
payment_against_voucher.setdefault(d.get(against_field), []).append(flt(d.get(dr_or_cr)))
return payment_against_voucher
def validate_against_invoice_fields(self, doctype, payment_against_voucher):
for voucher_no, payment_list in payment_against_voucher.items():
voucher_properties = frappe.db.get_value(doctype, voucher_no,
["docstatus", "outstanding_amount"])
if voucher_properties[0] != 1:
frappe.throw(_("{0} {1} is not submitted").format(doctype, voucher_no))
if flt(voucher_properties[1]) < flt(sum(payment_list)):
frappe.throw(_("Payment against {0} {1} cannot be greater \
than Outstanding Amount {2}").format(doctype, voucher_no, voucher_properties[1]))
def validate_against_order_fields(self, doctype, payment_against_voucher):
for voucher_no, payment_list in payment_against_voucher.items():
voucher_properties = frappe.db.get_value(doctype, voucher_no,
["docstatus", "per_billed", "status", "advance_paid", "grand_total"])
if voucher_properties[0] != 1:
frappe.throw(_("{0} {1} is not submitted").format(doctype, voucher_no))
if flt(voucher_properties[1]) >= 100:
frappe.throw(_("{0} {1} is fully billed").format(doctype, voucher_no))
if cstr(voucher_properties[2]) == "Stopped":
frappe.throw(_("{0} {1} is stopped").format(doctype, voucher_no))
if flt(voucher_properties[4]) < flt(voucher_properties[3]) + flt(sum(payment_list)):
frappe.throw(_("Advance paid against {0} {1} cannot be greater \
than Grand Total {2}").format(doctype, voucher_no, voucher_properties[3]))
def set_against_account(self):
accounts_debited, accounts_credited = [], []
for d in self.get("entries"):
if flt(d.debit > 0): accounts_debited.append(d.account)
if flt(d.credit) > 0: accounts_credited.append(d.account)
for d in self.get("entries"):
if flt(d.debit > 0): d.against_account = ", ".join(list(set(accounts_credited)))
if flt(d.credit > 0): d.against_account = ", ".join(list(set(accounts_debited)))
def validate_debit_and_credit(self):
self.total_debit, self.total_credit, self.difference = 0, 0, 0
for d in self.get("entries"):
if d.debit and d.credit:
frappe.throw(_("You cannot credit and debit same account at the same time"))
self.total_debit = flt(self.total_debit) + flt(d.debit, self.precision("debit", "entries"))
self.total_credit = flt(self.total_credit) + flt(d.credit, self.precision("credit", "entries"))
self.difference = flt(self.total_debit, self.precision("total_debit")) - \
flt(self.total_credit, self.precision("total_credit"))
if self.difference:
frappe.throw(_("Total Debit must be equal to Total Credit. The difference is {0}")
.format(self.difference))
def create_remarks(self):
r = []
if self.cheque_no:
if self.cheque_date:
r.append(_('Reference #{0} dated {1}').format(self.cheque_no, formatdate(self.cheque_date)))
else:
msgprint(_("Please enter Reference date"), raise_exception=frappe.MandatoryError)
for d in self.get('entries'):
if d.against_invoice and d.credit:
currency = frappe.db.get_value("Sales Invoice", d.against_invoice, "currency")
r.append(_("{0} against Sales Invoice {1}").format(fmt_money(flt(d.credit), currency = currency), \
d.against_invoice))
if d.against_sales_order and d.credit:
currency = frappe.db.get_value("Sales Order", d.against_sales_order, "currency")
r.append(_("{0} against Sales Order {1}").format(fmt_money(flt(d.credit), currency = currency), \
d.against_sales_order))
if d.against_voucher and d.debit:
bill_no = frappe.db.sql("""select bill_no, bill_date, currency
from `tabPurchase Invoice` where name=%s""", d.against_voucher)
if bill_no and bill_no[0][0] and bill_no[0][0].lower().strip() \
not in ['na', 'not applicable', 'none']:
r.append(_('{0} {1} against Bill {2} dated {3}').format(bill_no[0][2],
fmt_money(flt(d.debit)), bill_no[0][0],
bill_no[0][1] and formatdate(bill_no[0][1].strftime('%Y-%m-%d'))))
if d.against_purchase_order and d.debit:
currency = frappe.db.get_value("Purchase Order", d.against_purchase_order, "currency")
r.append(_("{0} against Purchase Order {1}").format(fmt_money(flt(d.credit), currency = currency), \
d.against_purchase_order))
if self.user_remark:
r.append(_("Note: {0}").format(self.user_remark))
if r:
self.remark = ("\n").join(r) #User Remarks is not mandatory
def set_aging_date(self):
if self.is_opening != 'Yes':
self.aging_date = self.posting_date
else:
# check account type whether supplier or customer
exists = False
for d in self.get('entries'):
account_type = frappe.db.get_value("Account", d.account, "account_type")
if account_type in ["Supplier", "Customer"]:
exists = True
break
# If customer/supplier account, aging date is mandatory
if exists and not self.aging_date:
msgprint(_("Aging Date is mandatory for opening entry"), raise_exception=1)
else:
self.aging_date = self.posting_date
def set_print_format_fields(self):
for d in self.get('entries'):
acc = frappe.db.get_value("Account", d.account, ["account_type", "master_type"], as_dict=1)
if not acc: continue
if acc.master_type in ['Supplier', 'Customer']:
if not self.pay_to_recd_from:
self.pay_to_recd_from = frappe.db.get_value(acc.master_type, ' - '.join(d.account.split(' - ')[:-1]),
acc.master_type == 'Customer' and 'customer_name' or 'supplier_name')
if self.voucher_type in ["Credit Note", "Debit Note"]:
self.set_total_amount(d.debit or d.credit)
if acc.account_type in ['Bank', 'Cash']:
self.set_total_amount(d.debit or d.credit)
def set_total_amount(self, amt):
company_currency = get_company_currency(self.company)
self.total_amount = fmt_money(amt, currency=company_currency)
from frappe.utils import money_in_words
self.total_amount_in_words = money_in_words(amt, company_currency)
def check_reference_date(self):
if self.cheque_date:
for d in self.get("entries"):
due_date = None
if d.against_invoice and flt(d.credit) > 0:
due_date = frappe.db.get_value("Sales Invoice", d.against_invoice, "due_date")
elif d.against_voucher and flt(d.debit) > 0:
due_date = frappe.db.get_value("Purchase Invoice", d.against_voucher, "due_date")
if due_date and getdate(self.cheque_date) > getdate(due_date):
msgprint(_("Note: Reference Date {0} is after invoice due date {1}")
.format(formatdate(self.cheque_date), formatdate(due_date)))
def make_gl_entries(self, cancel=0, adv_adj=0):
from erpnext.accounts.general_ledger import make_gl_entries
gl_map = []
for d in self.get("entries"):
if d.debit or d.credit:
gl_map.append(
self.get_gl_dict({
"account": d.account,
"against": d.against_account,
"debit": flt(d.debit, self.precision("debit", "entries")),
"credit": flt(d.credit, self.precision("credit", "entries")),
"against_voucher_type": (("Purchase Invoice" if d.against_voucher else None)
or ("Sales Invoice" if d.against_invoice else None)
or ("Journal Voucher" if d.against_jv else None)
or ("Sales Order" if d.against_sales_order else None)
or ("Purchase Order" if d.against_purchase_order else None)),
"against_voucher": d.against_voucher or d.against_invoice or d.against_jv
or d.against_sales_order or d.against_purchase_order,
"remarks": self.remark,
"cost_center": d.cost_center
})
)
if gl_map:
make_gl_entries(gl_map, cancel=cancel, adv_adj=adv_adj)
def check_credit_limit(self):
for d in self.get("entries"):
master_type, master_name = frappe.db.get_value("Account", d.account,
["master_type", "master_name"])
if master_type == "Customer" and master_name and flt(d.debit) > 0:
super(JournalVoucher, self).check_credit_limit(d.account)
def get_balance(self):
if not self.get('entries'):
msgprint(_("'Entries' cannot be empty"), raise_exception=True)
else:
flag, self.total_debit, self.total_credit = 0, 0, 0
diff = flt(self.difference, self.precision("difference"))
# If any row without amount, set the diff on that row
for d in self.get('entries'):
if not d.credit and not d.debit and diff != 0:
if diff>0:
d.credit = diff
elif diff<0:
d.debit = diff
flag = 1
# Set the diff in a new row
if flag == 0 and diff != 0:
jd = self.append('entries', {})
if diff>0:
jd.credit = abs(diff)
elif diff<0:
jd.debit = abs(diff)
self.validate_debit_and_credit()
def get_outstanding_invoices(self):
self.set('entries', [])
total = 0
for d in self.get_values():
total += flt(d.outstanding_amount, self.precision("credit", "entries"))
jd1 = self.append('entries', {})
jd1.account = d.account
if self.write_off_based_on == 'Accounts Receivable':
jd1.credit = flt(d.outstanding_amount, self.precision("credit", "entries"))
jd1.against_invoice = cstr(d.name)
elif self.write_off_based_on == 'Accounts Payable':
jd1.debit = flt(d.outstanding_amount, self.precision("debit", "entries"))
jd1.against_voucher = cstr(d.name)
jd2 = self.append('entries', {})
if self.write_off_based_on == 'Accounts Receivable':
jd2.debit = total
elif self.write_off_based_on == 'Accounts Payable':
jd2.credit = total
self.validate_debit_and_credit()
def get_values(self):
cond = " and outstanding_amount <= {0}".format(self.write_off_amount) \
if flt(self.write_off_amount) > 0 else ""
if self.write_off_based_on == 'Accounts Receivable':
return frappe.db.sql("""select name, debit_to as account, outstanding_amount
from `tabSales Invoice` where docstatus = 1 and company = %s
and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True)
elif self.write_off_based_on == 'Accounts Payable':
return frappe.db.sql("""select name, credit_to as account, outstanding_amount
from `tabPurchase Invoice` where docstatus = 1 and company = %s
and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True)
@frappe.whitelist()
def get_default_bank_cash_account(company, voucher_type):
from erpnext.accounts.utils import get_balance_on
account = frappe.db.get_value("Company", company,
voucher_type=="Bank Voucher" and "default_bank_account" or "default_cash_account")
if account:
return {
"account": account,
"balance": get_balance_on(account)
}
@frappe.whitelist()
def get_payment_entry_from_sales_invoice(sales_invoice):
from erpnext.accounts.utils import get_balance_on
si = frappe.get_doc("Sales Invoice", sales_invoice)
jv = get_payment_entry(si)
jv.remark = 'Payment received against Sales Invoice {0}. {1}'.format(si.name, si.remarks)
# credit customer
jv.get("entries")[0].account = si.debit_to
jv.get("entries")[0].balance = get_balance_on(si.debit_to)
jv.get("entries")[0].credit = si.outstanding_amount
jv.get("entries")[0].against_invoice = si.name
# debit bank
jv.get("entries")[1].debit = si.outstanding_amount
return jv.as_dict()
@frappe.whitelist()
def get_payment_entry_from_purchase_invoice(purchase_invoice):
from erpnext.accounts.utils import get_balance_on
pi = frappe.get_doc("Purchase Invoice", purchase_invoice)
jv = get_payment_entry(pi)
jv.remark = 'Payment against Purchase Invoice {0}. {1}'.format(pi.name, pi.remarks)
# credit supplier
jv.get("entries")[0].account = pi.credit_to
jv.get("entries")[0].balance = get_balance_on(pi.credit_to)
jv.get("entries")[0].debit = pi.outstanding_amount
jv.get("entries")[0].against_voucher = pi.name
# credit bank
jv.get("entries")[1].credit = pi.outstanding_amount
return jv.as_dict()
def get_payment_entry(doc):
bank_account = get_default_bank_cash_account(doc.company, "Bank Voucher")
jv = frappe.new_doc('Journal Voucher')
jv.voucher_type = 'Bank Voucher'
jv.company = doc.company
jv.fiscal_year = doc.fiscal_year
jv.append("entries")
d2 = jv.append("entries")
if bank_account:
d2.account = bank_account["account"]
d2.balance = bank_account["balance"]
return jv
@frappe.whitelist()
def get_opening_accounts(company):
"""get all balance sheet accounts for opening entry"""
from erpnext.accounts.utils import get_balance_on
accounts = frappe.db.sql_list("""select name from tabAccount
where group_or_ledger='Ledger' and report_type='Balance Sheet' and company=%s""", company)
return [{"account": a, "balance": get_balance_on(a)} for a in accounts]
def get_against_purchase_invoice(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, credit_to, outstanding_amount, bill_no, bill_date
from `tabPurchase Invoice` where credit_to = %s and docstatus = 1
and outstanding_amount > 0 and %s like %s order by name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
def get_against_sales_invoice(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, debit_to, outstanding_amount
from `tabSales Invoice` where debit_to = %s and docstatus = 1
and outstanding_amount > 0 and `%s` like %s order by name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
def get_against_jv(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select distinct jv.name, jv.posting_date, jv.user_remark
from `tabJournal Voucher` jv, `tabJournal Voucher Detail` jvd
where jvd.parent = jv.name and jvd.account = %s and jv.docstatus = 1
and (ifnull(jvd.against_invoice, '') = '' and ifnull(jvd.against_voucher, '') = '' and ifnull(jvd.against_jv, '') = '' )
and jv.%s like %s order by jv.name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
@frappe.whitelist()
def get_outstanding(args):
args = eval(args)
if args.get("doctype") == "Journal Voucher" and args.get("account"):
against_jv_amount = frappe.db.sql("""
select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from `tabJournal Voucher Detail` where parent=%s and account=%s
and ifnull(against_invoice, '')='' and ifnull(against_voucher, '')=''
and ifnull(against_jv, '')=''""", (args['docname'], args['account']))
against_jv_amount = flt(against_jv_amount[0][0]) if against_jv_amount else 0
if against_jv_amount > 0:
return {"credit": against_jv_amount}
else:
return {"debit": -1* against_jv_amount}
elif args.get("doctype") == "Sales Invoice":
return {
"credit": flt(frappe.db.get_value("Sales Invoice", args["docname"],
"outstanding_amount"))
}
elif args.get("doctype") == "Purchase Invoice":
return {
"debit": flt(frappe.db.get_value("Purchase Invoice", args["docname"],
"outstanding_amount"))
}
|
alexlo03/ansible
|
refs/heads/devel
|
lib/ansible/inventory/data.py
|
18
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.module_utils.six import iteritems
from ansible.utils.vars import combine_vars
from ansible.utils.path import basedir
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class InventoryData(object):
"""
Holds inventory data (host and group objects).
Using it's methods should guarantee expected relationships and data.
"""
def __init__(self):
# the inventory object holds a list of groups
self.groups = {}
self.hosts = {}
# provides 'groups' magic var, host object has group_names
self._groups_dict_cache = {}
# current localhost, implicit or explicit
self.localhost = None
self.current_source = None
# Always create the 'all' and 'ungrouped' groups,
for group in ('all', 'ungrouped'):
self.add_group(group)
self.add_child('all', 'ungrouped')
def serialize(self):
self._groups_dict_cache = None
data = {
'groups': self.groups,
'hosts': self.hosts,
'local': self.localhost,
'source': self.current_source,
}
return data
def deserialize(self, data):
self._groups_dict_cache = {}
self.hosts = data.get('hosts')
self.groups = data.get('groups')
self.localhost = data.get('local')
self.current_source = data.get('source')
def _create_implicit_localhost(self, pattern):
if self.localhost:
new_host = self.localhost
else:
new_host = Host(pattern)
new_host.address = "127.0.0.1"
new_host.implicit = True
# set localhost defaults
py_interp = sys.executable
if not py_interp:
# sys.executable is not set in some cornercases. see issue #13585
py_interp = '/usr/bin/python'
display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default. '
'You can correct this by setting ansible_python_interpreter for localhost')
new_host.set_variable("ansible_python_interpreter", py_interp)
new_host.set_variable("ansible_connection", 'local')
self.localhost = new_host
return new_host
def reconcile_inventory(self):
''' Ensure inventory basic rules, run after updates '''
display.debug('Reconcile groups and hosts in inventory.')
self.current_source = None
group_names = set()
# set group vars from group_vars/ files and vars plugins
for g in self.groups:
group = self.groups[g]
group_names.add(group.name)
# ensure all groups inherit from 'all'
if group.name != 'all' and not group.get_ancestors():
self.add_child('all', group.name)
host_names = set()
# get host vars from host_vars/ files and vars plugins
for host in self.hosts.values():
host_names.add(host.name)
mygroups = host.get_groups()
if self.groups['ungrouped'] in mygroups:
# clear ungrouped of any incorrectly stored by parser
if set(mygroups).difference(set([self.groups['all'], self.groups['ungrouped']])):
self.groups['ungrouped'].remove_host(host)
elif not host.implicit:
# add ungrouped hosts to ungrouped, except implicit
length = len(mygroups)
if length == 0 or (length == 1 and self.groups['all'] in mygroups):
self.add_child('ungrouped', host.name)
# special case for implicit hosts
if host.implicit:
host.vars = combine_vars(self.groups['all'].get_vars(), host.vars)
# warn if overloading identifier as both group and host
for conflict in group_names.intersection(host_names):
display.warning("Found both group and host with same name: %s" % conflict)
self._groups_dict_cache = {}
def get_host(self, hostname):
''' fetch host object using name deal with implicit localhost '''
matching_host = self.hosts.get(hostname, None)
# if host is not in hosts dict
if matching_host is None and hostname in C.LOCALHOST:
# might need to create implicit localhost
matching_host = self._create_implicit_localhost(hostname)
return matching_host
def add_group(self, group):
''' adds a group to inventory if not there already '''
if group:
if group not in self.groups:
g = Group(group)
self.groups[group] = g
self._groups_dict_cache = {}
display.debug("Added group %s to inventory" % group)
else:
display.debug("group %s already in inventory" % group)
else:
raise AnsibleError("Invalid empty/false group name provided: %s" % group)
def remove_group(self, group):
if group in self.groups:
del self.groups[group]
display.debug("Removed group %s from inventory" % group)
self._groups_dict_cache = {}
for host in self.hosts:
h = self.hosts[host]
h.remove_group(group)
def add_host(self, host, group=None, port=None):
''' adds a host to inventory and possibly a group if not there already '''
if host:
g = None
if group:
if group in self.groups:
g = self.groups[group]
else:
raise AnsibleError("Could not find group %s in inventory" % group)
if host not in self.hosts:
h = Host(host, port)
self.hosts[host] = h
if self.current_source: # set to 'first source' in which host was encountered
self.set_variable(host, 'inventory_file', self.current_source)
self.set_variable(host, 'inventory_dir', basedir(self.current_source))
else:
self.set_variable(host, 'inventory_file', None)
self.set_variable(host, 'inventory_dir', None)
display.debug("Added host %s to inventory" % (host))
# set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
if host in C.LOCALHOST:
if self.localhost is None:
self.localhost = self.hosts[host]
display.vvvv("Set default localhost to %s" % h)
else:
display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
else:
h = self.hosts[host]
if g:
g.add_host(h)
self._groups_dict_cache = {}
display.debug("Added host %s to group %s" % (host, group))
else:
raise AnsibleError("Invalid empty host name provided: %s" % host)
def remove_host(self, host):
if host in self.hosts:
del self.hosts[host]
for group in self.groups:
g = self.groups[group]
g.remove_host(host)
def set_variable(self, entity, varname, value):
''' sets a varible for an inventory object '''
if entity in self.groups:
inv_object = self.groups[entity]
elif entity in self.hosts:
inv_object = self.hosts[entity]
else:
raise AnsibleError("Could not identify group or host named %s" % entity)
inv_object.set_variable(varname, value)
display.debug('set %s for %s' % (varname, entity))
def add_child(self, group, child):
''' Add host or group to group '''
if group in self.groups:
g = self.groups[group]
if child in self.groups:
g.add_child_group(self.groups[child])
elif child in self.hosts:
g.add_host(self.hosts[child])
else:
raise AnsibleError("%s is not a known host nor group" % child)
self._groups_dict_cache = {}
display.debug('Group %s now contains %s' % (group, child))
else:
raise AnsibleError("%s is not a known group" % group)
def get_groups_dict(self):
"""
We merge a 'magic' var 'groups' with group name keys and hostname list values into every host variable set. Cache for speed.
"""
if not self._groups_dict_cache:
for (group_name, group) in iteritems(self.groups):
self._groups_dict_cache[group_name] = [h.name for h in group.get_hosts()]
return self._groups_dict_cache
|
mozilla/zamboni
|
refs/heads/master
|
mkt/monolith/management/commands/__init__.py
|
12133432
| |
carsongee/edx-platform
|
refs/heads/master
|
lms/djangoapps/bulk_email/migrations/__init__.py
|
12133432
| |
runekaagaard/django-contrib-locking
|
refs/heads/master
|
tests/save_delete_hooks/__init__.py
|
12133432
| |
mohamed-mamdouh95/pedestrainTracker
|
refs/heads/master
|
darkflow/net/mnist/run.py
|
12133432
| |
Jimmy-Morzaria/scikit-learn
|
refs/heads/master
|
doc/sphinxext/github_link.py
|
314
|
from operator import attrgetter
import inspect
import subprocess
import os
import sys
from functools import partial
REVISION_CMD = 'git rev-parse --short HEAD'
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except subprocess.CalledProcessError:
print('Failed to execute git to get revision')
return None
return revision.decode('utf-8')
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ('py', 'pyx'):
return
if not info.get('module') or not info.get('fullname'):
return
class_name = info['fullname'].split('.')[0]
if type(class_name) != str:
# Python 2 only
class_name = class_name.encode('utf-8')
module = __import__(info['module'], fromlist=[class_name])
obj = attrgetter(info['fullname'])(module)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn,
start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ''
return url_fmt.format(revision=revision, package=package,
path=fn, lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(_linkcode_resolve, revision=revision, package=package,
url_fmt=url_fmt)
|
dhylands/micropython
|
refs/heads/master
|
tests/basics/builtin_slice.py
|
56
|
# test builtin slice
# print slice
class A:
def __getitem__(self, idx):
print(idx)
A()[1:2:3]
|
izhan/Stream-Framework
|
refs/heads/master
|
feedly/serializers/aggregated_activity_serializer.py
|
4
|
from feedly.activity import AggregatedActivity, Activity
from feedly.exceptions import SerializationException
from feedly.serializers.activity_serializer import ActivitySerializer
from feedly.serializers.utils import check_reserved
from feedly.utils import epoch_to_datetime, datetime_to_epoch
from feedly.serializers.base import BaseAggregatedSerializer
class AggregatedActivitySerializer(BaseAggregatedSerializer):
'''
Optimized version of the Activity serializer for AggregatedActivities
v3group;;created_at;;updated_at;;seen_at;;read_at;;aggregated_activities
Main advantage is that it prevents you from increasing the storage of
a notification without realizing you are adding the extra data
Depending on dehydrate it will either dump dehydrated aggregated activities
or store the full aggregated activity
'''
#: indicates if dumps returns dehydrated aggregated activities
dehydrate = True
identifier = 'v3'
reserved_characters = [';', ',', ';;']
date_fields = ['created_at', 'updated_at', 'seen_at', 'read_at']
activity_serializer_class = ActivitySerializer
def dumps(self, aggregated):
self.check_type(aggregated)
activity_serializer = self.activity_serializer_class(Activity)
# start by storing the group
parts = [aggregated.group]
check_reserved(aggregated.group, [';;'])
# store the dates
for date_field in self.date_fields:
value = getattr(aggregated, date_field)
epoch = datetime_to_epoch(value) if value is not None else -1
parts += [epoch]
# add the activities serialization
serialized_activities = []
if self.dehydrate:
if not aggregated.dehydrated:
aggregated = aggregated.get_dehydrated()
serialized_activities = map(str, aggregated._activity_ids)
else:
for activity in aggregated.activities:
serialized = activity_serializer.dumps(activity)
check_reserved(serialized, [';', ';;'])
serialized_activities.append(serialized)
serialized_activities_part = ';'.join(serialized_activities)
parts.append(serialized_activities_part)
# add the minified activities
parts.append(aggregated.minimized_activities)
# stick everything together
serialized_aggregated = ';;'.join(map(str, parts))
serialized = '%s%s' % (self.identifier, serialized_aggregated)
return serialized
def loads(self, serialized_aggregated):
activity_serializer = self.activity_serializer_class(Activity)
try:
serialized_aggregated = serialized_aggregated[2:]
parts = serialized_aggregated.split(';;')
# start with the group
group = parts[0]
aggregated = self.aggregated_activity_class(group)
# get the date and activities
date_dict = dict(zip(self.date_fields, parts[1:5]))
for k, v in date_dict.items():
date_value = None
if v != '-1':
date_value = epoch_to_datetime(float(v))
setattr(aggregated, k, date_value)
# write the activities
serializations = parts[5].split(';')
if self.dehydrate:
activity_ids = map(int, serializations)
aggregated._activity_ids = activity_ids
aggregated.dehydrated = True
else:
activities = [activity_serializer.loads(s)
for s in serializations]
aggregated.activities = activities
aggregated.dehydrated = False
# write the minimized activities
minimized = int(parts[6])
aggregated.minimized_activities = minimized
return aggregated
except Exception, e:
msg = unicode(e)
raise SerializationException(msg)
class NotificationSerializer(AggregatedActivitySerializer):
#: indicates if dumps returns dehydrated aggregated activities
dehydrate = False
|
jeasoft/odoo
|
refs/heads/marcos-8.0
|
comunity_modules/product_historical_price/__openerp__.py
|
2
|
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) Vauxoo (<http://vauxoo.com>).
# All Rights Reserved
###############Credits######################################################
# Coded by: Vauxoo C.A.
# Planified by: Nhomar Hernandez
# Audited by: Vauxoo C.A.
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# "security/groups.xml",
{
"name": "Product Historical Price",
"version": "0.2",
"author": "Vauxoo",
"category": "Generic Modules/Product",
"description": """
What do this module:
This module gets the historical price of a product
""",
"website": "http://Vauxoo.com",
"license": "",
"depends": [
"product",
"decimal_precision",
"account",
"sale",
],
"demo": [],
"data": [
"view/product_view.xml",
"data/product_data.xml",
"security/ir.model.access.csv"
],
"test": [],
"js": [],
"css": [],
"qweb": [],
"installable": True,
"auto_install": False,
"active": False
}
|
kennethreitz/pipenv
|
refs/heads/master
|
pipenv/patched/notpip/_vendor/distlib/util.py
|
33
|
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote, urlparse)
logger = logging.getLogger(__name__)
#
# Requirement parsing code as per PEP 508
#
IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
OR = re.compile(r'^or\b\s*')
AND = re.compile(r'^and\b\s*')
NON_SPACE = re.compile(r'(\S+)\s*')
STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
def parse_marker(marker_string):
"""
Parse a marker string and return a dictionary containing a marker expression.
The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
the expression grammar, or strings. A string contained in quotes is to be
interpreted as a literal string, and a string not contained in quotes is a
variable (such as os_name).
"""
def marker_var(remaining):
# either identifier, or literal string
m = IDENTIFIER.match(remaining)
if m:
result = m.groups()[0]
remaining = remaining[m.end():]
elif not remaining:
raise SyntaxError('unexpected end of input')
else:
q = remaining[0]
if q not in '\'"':
raise SyntaxError('invalid expression: %s' % remaining)
oq = '\'"'.replace(q, '')
remaining = remaining[1:]
parts = [q]
while remaining:
# either a string chunk, or oq, or q to terminate
if remaining[0] == q:
break
elif remaining[0] == oq:
parts.append(oq)
remaining = remaining[1:]
else:
m = STRING_CHUNK.match(remaining)
if not m:
raise SyntaxError('error in string literal: %s' % remaining)
parts.append(m.groups()[0])
remaining = remaining[m.end():]
else:
s = ''.join(parts)
raise SyntaxError('unterminated string: %s' % s)
parts.append(q)
result = ''.join(parts)
remaining = remaining[1:].lstrip() # skip past closing quote
return result, remaining
def marker_expr(remaining):
if remaining and remaining[0] == '(':
result, remaining = marker(remaining[1:].lstrip())
if remaining[0] != ')':
raise SyntaxError('unterminated parenthesis: %s' % remaining)
remaining = remaining[1:].lstrip()
else:
lhs, remaining = marker_var(remaining)
while remaining:
m = MARKER_OP.match(remaining)
if not m:
break
op = m.groups()[0]
remaining = remaining[m.end():]
rhs, remaining = marker_var(remaining)
lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
result = lhs
return result, remaining
def marker_and(remaining):
lhs, remaining = marker_expr(remaining)
while remaining:
m = AND.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_expr(remaining)
lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
def marker(remaining):
lhs, remaining = marker_and(remaining)
while remaining:
m = OR.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_and(remaining)
lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
return marker(marker_string)
def parse_requirement(req):
"""
Parse a requirement passed in as a string. Return a Container
whose attributes contain the various parts of the requirement.
"""
remaining = req.strip()
if not remaining or remaining.startswith('#'):
return None
m = IDENTIFIER.match(remaining)
if not m:
raise SyntaxError('name expected: %s' % remaining)
distname = m.groups()[0]
remaining = remaining[m.end():]
extras = mark_expr = versions = uri = None
if remaining and remaining[0] == '[':
i = remaining.find(']', 1)
if i < 0:
raise SyntaxError('unterminated extra: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
extras = []
while s:
m = IDENTIFIER.match(s)
if not m:
raise SyntaxError('malformed extra: %s' % s)
extras.append(m.groups()[0])
s = s[m.end():]
if not s:
break
if s[0] != ',':
raise SyntaxError('comma expected in extras: %s' % s)
s = s[1:].lstrip()
if not extras:
extras = None
if remaining:
if remaining[0] == '@':
# it's a URI
remaining = remaining[1:].lstrip()
m = NON_SPACE.match(remaining)
if not m:
raise SyntaxError('invalid URI: %s' % remaining)
uri = m.groups()[0]
t = urlparse(uri)
# there are issues with Python and URL parsing, so this test
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
# always parse invalid URLs correctly - it should raise
# exceptions for malformed URLs
if not (t.scheme and t.netloc):
raise SyntaxError('Invalid URL: %s' % uri)
remaining = remaining[m.end():].lstrip()
else:
def get_versions(ver_remaining):
"""
Return a list of operator, version tuples if any are
specified, else None.
"""
m = COMPARE_OP.match(ver_remaining)
versions = None
if m:
versions = []
while True:
op = m.groups()[0]
ver_remaining = ver_remaining[m.end():]
m = VERSION_IDENTIFIER.match(ver_remaining)
if not m:
raise SyntaxError('invalid version: %s' % ver_remaining)
v = m.groups()[0]
versions.append((op, v))
ver_remaining = ver_remaining[m.end():]
if not ver_remaining or ver_remaining[0] != ',':
break
ver_remaining = ver_remaining[1:].lstrip()
m = COMPARE_OP.match(ver_remaining)
if not m:
raise SyntaxError('invalid constraint: %s' % ver_remaining)
if not versions:
versions = None
return versions, ver_remaining
if remaining[0] != '(':
versions, remaining = get_versions(remaining)
else:
i = remaining.find(')', 1)
if i < 0:
raise SyntaxError('unterminated parenthesis: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
# As a special diversion from PEP 508, allow a version number
# a.b.c in parentheses as a synonym for ~= a.b.c (because this
# is allowed in earlier PEPs)
if COMPARE_OP.match(s):
versions, _ = get_versions(s)
else:
m = VERSION_IDENTIFIER.match(s)
if not m:
raise SyntaxError('invalid constraint: %s' % s)
v = m.groups()[0]
s = s[m.end():].lstrip()
if s:
raise SyntaxError('invalid constraint: %s' % s)
versions = [('~=', v)]
if remaining:
if remaining[0] != ';':
raise SyntaxError('invalid requirement: %s' % remaining)
remaining = remaining[1:].lstrip()
mark_expr, remaining = parse_marker(remaining)
if remaining and remaining[0] != '#':
raise SyntaxError('unexpected trailing data: %s' % remaining)
if not versions:
rs = distname
else:
rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
return Container(name=distname, extras=extras, constraints=versions,
marker=mark_expr, url=uri, requirement=rs)
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(root, path):
# normalizes and returns a lstripped-/-separated path
root = root.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(root)
return path[len(root):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
if os.path.exists(path):
os.remove(path)
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.write_binary_file(path, data.encode(encoding))
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
compile_kwargs = {}
if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.rsplit('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
if username:
username = unquote(username)
if password:
password = unquote(password)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(ssl, 'OP_NO_SSLv2'):
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
# Python 3 determines encoding from locale. Force 'utf-8'
# file encoding to match other forced utf-8 encoding
kwargs['encoding'] = 'utf-8'
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
class SubprocessMixin(object):
"""
Mixin for running subprocesses and capturing their output
"""
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
|
datalogics-robb/scons
|
refs/heads/master
|
test/option-r.py
|
2
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
import string
import sys
test = TestSCons.TestSCons()
test.write('SConstruct', "")
test.run(arguments = '-r .',
stderr = "Warning: the -r option is not yet implemented\n")
test.run(arguments = '--no-builtin-rules .',
stderr = "Warning: the --no-builtin-rules option is not yet implemented\n")
test.pass_test()
|
endlessm/chromium-browser
|
refs/heads/master
|
third_party/catapult/third_party/gsutil/gslib/vendored/boto/boto/dynamodb2/fields.py
|
163
|
from boto.dynamodb2.types import STRING
class BaseSchemaField(object):
"""
An abstract class for defining schema fields.
Contains most of the core functionality for the field. Subclasses must
define an ``attr_type`` to pass to DynamoDB.
"""
attr_type = None
def __init__(self, name, data_type=STRING):
"""
Creates a Python schema field, to represent the data to pass to
DynamoDB.
Requires a ``name`` parameter, which should be a string name of the
field.
Optionally accepts a ``data_type`` parameter, which should be a
constant from ``boto.dynamodb2.types``. (Default: ``STRING``)
"""
self.name = name
self.data_type = data_type
def definition(self):
"""
Returns the attribute definition structure DynamoDB expects.
Example::
>>> field.definition()
{
'AttributeName': 'username',
'AttributeType': 'S',
}
"""
return {
'AttributeName': self.name,
'AttributeType': self.data_type,
}
def schema(self):
"""
Returns the schema structure DynamoDB expects.
Example::
>>> field.schema()
{
'AttributeName': 'username',
'KeyType': 'HASH',
}
"""
return {
'AttributeName': self.name,
'KeyType': self.attr_type,
}
class HashKey(BaseSchemaField):
"""
An field representing a hash key.
Example::
>>> from boto.dynamodb2.types import NUMBER
>>> HashKey('username')
>>> HashKey('date_joined', data_type=NUMBER)
"""
attr_type = 'HASH'
class RangeKey(BaseSchemaField):
"""
An field representing a range key.
Example::
>>> from boto.dynamodb2.types import NUMBER
>>> HashKey('username')
>>> HashKey('date_joined', data_type=NUMBER)
"""
attr_type = 'RANGE'
class BaseIndexField(object):
"""
An abstract class for defining schema indexes.
Contains most of the core functionality for the index. Subclasses must
define a ``projection_type`` to pass to DynamoDB.
"""
def __init__(self, name, parts):
self.name = name
self.parts = parts
def definition(self):
"""
Returns the attribute definition structure DynamoDB expects.
Example::
>>> index.definition()
{
'AttributeName': 'username',
'AttributeType': 'S',
}
"""
definition = []
for part in self.parts:
definition.append({
'AttributeName': part.name,
'AttributeType': part.data_type,
})
return definition
def schema(self):
"""
Returns the schema structure DynamoDB expects.
Example::
>>> index.schema()
{
'IndexName': 'LastNameIndex',
'KeySchema': [
{
'AttributeName': 'username',
'KeyType': 'HASH',
},
],
'Projection': {
'ProjectionType': 'KEYS_ONLY',
}
}
"""
key_schema = []
for part in self.parts:
key_schema.append(part.schema())
return {
'IndexName': self.name,
'KeySchema': key_schema,
'Projection': {
'ProjectionType': self.projection_type,
}
}
class AllIndex(BaseIndexField):
"""
An index signifying all fields should be in the index.
Example::
>>> AllIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ])
"""
projection_type = 'ALL'
class KeysOnlyIndex(BaseIndexField):
"""
An index signifying only key fields should be in the index.
Example::
>>> KeysOnlyIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ])
"""
projection_type = 'KEYS_ONLY'
class IncludeIndex(BaseIndexField):
"""
An index signifying only certain fields should be in the index.
Example::
>>> IncludeIndex('GenderIndex', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ], includes=['gender'])
"""
projection_type = 'INCLUDE'
def __init__(self, *args, **kwargs):
self.includes_fields = kwargs.pop('includes', [])
super(IncludeIndex, self).__init__(*args, **kwargs)
def schema(self):
schema_data = super(IncludeIndex, self).schema()
schema_data['Projection']['NonKeyAttributes'] = self.includes_fields
return schema_data
class GlobalBaseIndexField(BaseIndexField):
"""
An abstract class for defining global indexes.
Contains most of the core functionality for the index. Subclasses must
define a ``projection_type`` to pass to DynamoDB.
"""
throughput = {
'read': 5,
'write': 5,
}
def __init__(self, *args, **kwargs):
throughput = kwargs.pop('throughput', None)
if throughput is not None:
self.throughput = throughput
super(GlobalBaseIndexField, self).__init__(*args, **kwargs)
def schema(self):
"""
Returns the schema structure DynamoDB expects.
Example::
>>> index.schema()
{
'IndexName': 'LastNameIndex',
'KeySchema': [
{
'AttributeName': 'username',
'KeyType': 'HASH',
},
],
'Projection': {
'ProjectionType': 'KEYS_ONLY',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
}
"""
schema_data = super(GlobalBaseIndexField, self).schema()
schema_data['ProvisionedThroughput'] = {
'ReadCapacityUnits': int(self.throughput['read']),
'WriteCapacityUnits': int(self.throughput['write']),
}
return schema_data
class GlobalAllIndex(GlobalBaseIndexField):
"""
An index signifying all fields should be in the index.
Example::
>>> GlobalAllIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'ALL'
class GlobalKeysOnlyIndex(GlobalBaseIndexField):
"""
An index signifying only key fields should be in the index.
Example::
>>> GlobalKeysOnlyIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'KEYS_ONLY'
class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex):
"""
An index signifying only certain fields should be in the index.
Example::
>>> GlobalIncludeIndex('GenderIndex', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... includes=['gender'],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'INCLUDE'
def __init__(self, *args, **kwargs):
throughput = kwargs.pop('throughput', None)
IncludeIndex.__init__(self, *args, **kwargs)
if throughput:
kwargs['throughput'] = throughput
GlobalBaseIndexField.__init__(self, *args, **kwargs)
def schema(self):
# Pick up the includes.
schema_data = IncludeIndex.schema(self)
# Also the throughput.
schema_data.update(GlobalBaseIndexField.schema(self))
return schema_data
|
LunarLanding/moviepy
|
refs/heads/master
|
moviepy/audio/io/__init__.py
|
19
|
"""
Class and methods to read, write, preview audiofiles.
"""
|
cmichal/python-social-auth
|
refs/heads/master
|
social/tests/backends/__init__.py
|
12133432
| |
ESS-LLP/erpnext-healthcare
|
refs/heads/master
|
erpnext/stock/report/__init__.py
|
12133432
| |
mattcongy/itshop
|
refs/heads/master
|
docker-images/taigav2/taiga-back/taiga/base/formats/es/__init__.py
|
12133432
| |
BT-fgarbely/e-commerce
|
refs/heads/8.0
|
__unported__/sale_payment_method_transaction_id/sale.py
|
18
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class sale_order(orm.Model):
_inherit = 'sale.order'
def _prepare_payment_move_line(self, cr, uid, move_name, sale, journal,
period, amount, date, context=None):
debit_line, credit_line = super(sale_order, self).\
_prepare_payment_move_line(cr, uid, move_name, sale, journal,
period, amount, date, context=context)
if sale.transaction_id:
debit_line['transaction_ref'] = sale.transaction_id
credit_line['transaction_ref'] = sale.transaction_id
return debit_line, credit_line
|
ptisserand/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_domain_group.py
|
52
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_domain_group
version_added: '2.4'
short_description: Creates, modifies or removes domain groups
description:
- Creates, modifies or removes groups in Active Directory.
- For local groups, use the M(win_group) module instead.
options:
attributes:
description:
- A dict of custom LDAP attributes to set on the group.
- This can be used to set custom attributes that are not exposed as module
parameters, e.g. C(mail).
- See the examples on how to format this parameter.
category:
description:
- The category of the group, this is the value to assign to the LDAP
C(groupType) attribute.
- If a new group is created then C(security) will be used by default.
choices: [ distribution, security ]
description:
description:
- The value to be assigned to the LDAP C(description) attribute.
display_name:
description:
- The value to assign to the LDAP C(displayName) attribute.
domain_username:
description:
- The username to use when interacting with AD.
- If this is not set then the user Ansible used to log in with will be
used instead.
domain_password:
description:
- The password for C(username).
domain_server:
description:
- Specifies the Active Directory Domain Services instance to connect to.
- Can be in the form of an FQDN or NetBIOS name.
- If not specified then the value is based on the domain of the computer
running PowerShell.
version_added: '2.5'
ignore_protection:
description:
- Will ignore the C(ProtectedFromAccidentalDeletion) flag when deleting or
moving a group.
- The module will fail if one of these actions need to occur and this value
is set to C(no).
type: bool
default: 'no'
managed_by:
description:
- The value to be assigned to the LDAP C(managedBy) attribute.
- This value can be in the forms C(Distinguished Name), C(objectGUID),
C(objectSid) or C(sAMAccountName), see examples for more details.
name:
description:
- The name of the group to create, modify or remove.
- This value can be in the forms C(Distinguished Name), C(objectGUID),
C(objectSid) or C(sAMAccountName), see examples for more details.
required: yes
organizational_unit:
description:
- The full LDAP path to create or move the group to.
- This should be the path to the parent object to create or move the group
to.
- See examples for details of how this path is formed.
aliases: [ ou, path ]
protect:
description:
- Will set the C(ProtectedFromAccidentalDeletion) flag based on this value.
- This flag stops a user from deleting or moving a group to a different
path.
type: bool
scope:
description:
- The scope of the group.
- If C(state=present) and the group doesn't exist then this must be set.
choices: [domainlocal, global, universal]
state:
description:
- If C(state=present) this module will ensure the group is created and is
configured accordingly.
- If C(state=absent) this module will delete the group if it exists
choices: [ absent, present ]
default: present
notes:
- This must be run on a host that has the ActiveDirectory powershell module
installed.
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Ensure the group Cow exists using sAMAccountName
win_domain_group:
name: Cow
scope: global
path: OU=groups,DC=ansible,DC=local
- name: Ensure the group Cow does't exist using the Distinguished Name
win_domain_group:
name: CN=Cow,OU=groups,DC=ansible,DC=local
state: absent
- name: Delete group ignoring the protection flag
win_domain_group:
name: Cow
state: absent
ignore_protection: yes
- name: Create group with delete protection enabled and custom attributes
win_domain_group:
name: Ansible Users
scope: domainlocal
category: security
attributes:
mail: helpdesk@ansible.com
wWWHomePage: www.ansible.com
ignore_protection: yes
- name: Change the OU of a group using the SID and ignore the protection flag
win_domain_group:
name: S-1-5-21-2171456218-3732823212-122182344-1189
scope: global
organizational_unit: OU=groups,DC=ansible,DC=local
ignore_protection: yes
- name: Add managed_by user
win_domain_group:
name: Group Name Here
managed_by: Domain Admins
- name: Add group and specify the AD domain services to use for the create
win_domain_group:
name: Test Group
domain_username: user@CORP.ANSIBLE.COM
domain_password: Password01!
domain_server: corp-DC12.corp.ansible.com
scope: domainlocal
'''
RETURN = r'''
attributes:
description: Custom attributes that were set by the module. This does not
show all the custom attributes rather just the ones that were set by the
module.
returned: group exists and attributes are set on the module invocation
type: dict
sample:
mail: 'helpdesk@ansible.com'
wWWHomePage: 'www.ansible.com'
canonical_name:
description: The canonical name of the group.
returned: group exists
type: string
sample: ansible.local/groups/Cow
category:
description: The Group type value of the group, i.e. Security or Distribution.
returned: group exists
type: string
sample: Security
description:
description: The Description of the group.
returned: group exists
type: string
sample: Group Description
display_name:
description: The Display name of the group.
returned: group exists
type: string
sample: Users who connect through RDP
distinguished_name:
description: The full Distinguished Name of the group.
returned: group exists
type: string
sample: CN=Cow,OU=groups,DC=ansible,DC=local
group_scope:
description: The Group scope value of the group.
returned: group exists
type: string
sample: Universal
guid:
description: The guid of the group.
returned: group exists
type: string
sample: 512a9adb-3fc0-4a26-9df0-e6ea1740cf45
managed_by:
description: The full Distinguished Name of the AD object that is set on the
managedBy attribute.
returned: group exists
type: string
sample: CN=Domain Admins,CN=Users,DC=ansible,DC=local
name:
description: The name of the group.
returned: group exists
type: string
sample: Cow
protected_from_accidental_deletion:
description: Whether the group is protected from accidental deletion.
returned: group exists
type: bool
sample: True
sid:
description: The Security ID of the group.
returned: group exists
type: string
sample: S-1-5-21-2171456218-3732823212-122182344-1189
'''
|
shenlong3030/asv-django-guestbook
|
refs/heads/master
|
django/views/i18n.py
|
10
|
import os
import gettext as gettext_module
from django import http
from django.conf import settings
from django.utils import importlib
from django.utils.translation import check_for_language, activate, to_locale, get_language
from django.utils.text import javascript_quote
from django.utils.encoding import smart_unicode
from django.utils.formats import get_format_modules
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.REQUEST.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', None)
if not next:
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
try:
result[attr] = getattr(module, attr)
except AttributeError:
pass
src = []
for k, v in result.items():
if isinstance(v, (basestring, int)):
src.append("formats['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(smart_unicode(v))))
elif isinstance(v, (tuple, list)):
v = [javascript_quote(smart_unicode(value)) for value in v]
src.append("formats['%s'] = ['%s'];\n" % (javascript_quote(k), "', '".join(v)))
return ''.join(src)
NullSource = """
/* gettext identity library */
function gettext(msgid) { return msgid; }
function ngettext(singular, plural, count) { return (count == 1) ? singular : plural; }
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) { return msgid; }
function npgettext(context, singular, plural, count) { return (count == 1) ? singular : plural; }
"""
LibHead = """
/* gettext library */
var catalog = new Array();
"""
LibFoot = """
function gettext(msgid) {
var value = catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
}
function ngettext(singular, plural, count) {
value = catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[pluralidx(count)];
}
}
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) {
var value = gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
}
function npgettext(context, singular, plural, count) {
var value = ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = ngettext(singular, plural, count);
}
return value;
}
"""
LibFormatHead = """
/* formatting library */
var formats = new Array();
"""
LibFormatFoot = """
function get_format(format_type) {
var value = formats[format_type];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return value;
}
}
"""
SimplePlural = """
function pluralidx(count) { return (count == 1) ? 0 : 1; }
"""
InterPolate = r"""
function interpolate(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
}
"""
PluralIdx = r"""
function pluralidx(n) {
var v=%s;
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
}
"""
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
src = [NullSource, InterPolate, LibFormatHead, get_formats(), LibFormatFoot]
return http.HttpResponse(''.join(src), 'text/javascript')
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
if request.GET:
if 'language' in request.GET:
if check_for_language(request.GET['language']):
activate(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, basestring):
packages = packages.split('+')
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
default_locale = to_locale(settings.LANGUAGE_CODE)
locale = to_locale(get_language())
t = {}
paths = []
en_catalog_missing = False
# first load all english languages files for defaults
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(p.__file__), 'locale')
paths.append(path)
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
# 'en' catalog was missing.
if locale.startswith('en'):
# If 'en' is the selected language this would cause issues
# later on if default_locale is something other than 'en'.
en_catalog_missing = True
# Otherwise it is harmless.
pass
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the flag en_catalog_missing has been set, the currently
# selected language is English but it doesn't have a translation
# catalog (presumably due to being the language translated from).
# If that is the case, a wrong language catalog might have been
# loaded in the previous step. It needs to be discarded.
if en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
src = [LibHead]
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':',1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=',1)[1]
src.append(PluralIdx % plural)
else:
src.append(SimplePlural)
csrc = []
pdict = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, basestring):
csrc.append("catalog['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(v)))
elif isinstance(k, tuple):
if k[0] not in pdict:
pdict[k[0]] = k[1]
else:
pdict[k[0]] = max(k[1], pdict[k[0]])
csrc.append("catalog['%s'][%d] = '%s';\n" % (javascript_quote(k[0]), k[1], javascript_quote(v)))
else:
raise TypeError(k)
csrc.sort()
for k, v in pdict.items():
src.append("catalog['%s'] = [%s];\n" % (javascript_quote(k), ','.join(["''"]*(v+1))))
src.extend(csrc)
src.append(LibFoot)
src.append(InterPolate)
src.append(LibFormatHead)
src.append(get_formats())
src.append(LibFormatFoot)
src = ''.join(src)
return http.HttpResponse(src, 'text/javascript')
|
datenbetrieb/odoo
|
refs/heads/8.0
|
addons/hr_gamification/wizard/__init__.py
|
388
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import grant_badge
|
wolfelee/zkdash
|
refs/heads/master
|
lib/zyqconf/hooks.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2014,掌阅科技
All rights reserved.
摘 要: hooks.py
创 建 者: zhuangshixiong
创建日期: 2015-09-22
'''
# pylint: disable=invalid-name, missing-docstring
from collections import defaultdict
class Hook(object):
"""
A single hook that can be listened for.
"""
def __init__(self):
self.subscribers = []
def attach(self, task):
"""attach a task to this hook.
"""
self.subscribers.append(task)
def detach(self, task):
"""detach a task from this hook
"""
self.subscribers.remove(task)
def send(self, **kwargs):
"""send msg to tasks and return their results.
"""
return [task(**kwargs) for task in self.subscribers]
_HOOKS = defaultdict(Hook)
def all_hooks():
"""
Return all registered hooks.
"""
return _HOOKS
def get_hook(name):
"""
Return hook with given name, creating it if necessary.
"""
return _HOOKS[name]
def on(name):
"""Return a decorator that attach the wrapped function to the hook with given name.
"""
hook = get_hook(name)
def hook_decorator(func):
hook.attach(func)
return func
return hook_decorator
|
yangcwGIT/jstorm
|
refs/heads/master
|
jstorm-core/src/main/py/storm/ttypes.py
|
19
|
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:utf8strings
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TopologyInitialStatus:
ACTIVE = 1
INACTIVE = 2
_VALUES_TO_NAMES = {
1: "ACTIVE",
2: "INACTIVE",
}
_NAMES_TO_VALUES = {
"ACTIVE": 1,
"INACTIVE": 2,
}
class JavaObjectArg:
"""
Attributes:
- int_arg
- long_arg
- string_arg
- bool_arg
- binary_arg
- double_arg
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'int_arg', None, None, ), # 1
(2, TType.I64, 'long_arg', None, None, ), # 2
(3, TType.STRING, 'string_arg', None, None, ), # 3
(4, TType.BOOL, 'bool_arg', None, None, ), # 4
(5, TType.STRING, 'binary_arg', None, None, ), # 5
(6, TType.DOUBLE, 'double_arg', None, None, ), # 6
)
def __init__(self, int_arg=None, long_arg=None, string_arg=None, bool_arg=None, binary_arg=None, double_arg=None,):
self.int_arg = int_arg
self.long_arg = long_arg
self.string_arg = string_arg
self.bool_arg = bool_arg
self.binary_arg = binary_arg
self.double_arg = double_arg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.int_arg = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.long_arg = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.string_arg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.bool_arg = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.binary_arg = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.DOUBLE:
self.double_arg = iprot.readDouble();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JavaObjectArg')
if self.int_arg is not None:
oprot.writeFieldBegin('int_arg', TType.I32, 1)
oprot.writeI32(self.int_arg)
oprot.writeFieldEnd()
if self.long_arg is not None:
oprot.writeFieldBegin('long_arg', TType.I64, 2)
oprot.writeI64(self.long_arg)
oprot.writeFieldEnd()
if self.string_arg is not None:
oprot.writeFieldBegin('string_arg', TType.STRING, 3)
oprot.writeString(self.string_arg.encode('utf-8'))
oprot.writeFieldEnd()
if self.bool_arg is not None:
oprot.writeFieldBegin('bool_arg', TType.BOOL, 4)
oprot.writeBool(self.bool_arg)
oprot.writeFieldEnd()
if self.binary_arg is not None:
oprot.writeFieldBegin('binary_arg', TType.STRING, 5)
oprot.writeString(self.binary_arg)
oprot.writeFieldEnd()
if self.double_arg is not None:
oprot.writeFieldBegin('double_arg', TType.DOUBLE, 6)
oprot.writeDouble(self.double_arg)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.int_arg)
value = (value * 31) ^ hash(self.long_arg)
value = (value * 31) ^ hash(self.string_arg)
value = (value * 31) ^ hash(self.bool_arg)
value = (value * 31) ^ hash(self.binary_arg)
value = (value * 31) ^ hash(self.double_arg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JavaObject:
"""
Attributes:
- full_class_name
- args_list
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'full_class_name', None, None, ), # 1
(2, TType.LIST, 'args_list', (TType.STRUCT,(JavaObjectArg, JavaObjectArg.thrift_spec)), None, ), # 2
)
def __init__(self, full_class_name=None, args_list=None,):
self.full_class_name = full_class_name
self.args_list = args_list
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.full_class_name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.args_list = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = JavaObjectArg()
_elem5.read(iprot)
self.args_list.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JavaObject')
if self.full_class_name is not None:
oprot.writeFieldBegin('full_class_name', TType.STRING, 1)
oprot.writeString(self.full_class_name.encode('utf-8'))
oprot.writeFieldEnd()
if self.args_list is not None:
oprot.writeFieldBegin('args_list', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.args_list))
for iter6 in self.args_list:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.full_class_name is None:
raise TProtocol.TProtocolException(message='Required field full_class_name is unset!')
if self.args_list is None:
raise TProtocol.TProtocolException(message='Required field args_list is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.full_class_name)
value = (value * 31) ^ hash(self.args_list)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NullStruct:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NullStruct')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GlobalStreamId:
"""
Attributes:
- componentId
- streamId
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'componentId', None, None, ), # 1
(2, TType.STRING, 'streamId', None, None, ), # 2
)
def __init__(self, componentId=None, streamId=None,):
self.componentId = componentId
self.streamId = streamId
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.componentId = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.streamId = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GlobalStreamId')
if self.componentId is not None:
oprot.writeFieldBegin('componentId', TType.STRING, 1)
oprot.writeString(self.componentId.encode('utf-8'))
oprot.writeFieldEnd()
if self.streamId is not None:
oprot.writeFieldBegin('streamId', TType.STRING, 2)
oprot.writeString(self.streamId.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.componentId is None:
raise TProtocol.TProtocolException(message='Required field componentId is unset!')
if self.streamId is None:
raise TProtocol.TProtocolException(message='Required field streamId is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.componentId)
value = (value * 31) ^ hash(self.streamId)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Grouping:
"""
Attributes:
- fields
- shuffle
- all
- none
- direct
- custom_object
- custom_serialized
- local_or_shuffle
- localFirst
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'fields', (TType.STRING,None), None, ), # 1
(2, TType.STRUCT, 'shuffle', (NullStruct, NullStruct.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'all', (NullStruct, NullStruct.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'none', (NullStruct, NullStruct.thrift_spec), None, ), # 4
(5, TType.STRUCT, 'direct', (NullStruct, NullStruct.thrift_spec), None, ), # 5
(6, TType.STRUCT, 'custom_object', (JavaObject, JavaObject.thrift_spec), None, ), # 6
(7, TType.STRING, 'custom_serialized', None, None, ), # 7
(8, TType.STRUCT, 'local_or_shuffle', (NullStruct, NullStruct.thrift_spec), None, ), # 8
(9, TType.STRUCT, 'localFirst', (NullStruct, NullStruct.thrift_spec), None, ), # 9
)
def __init__(self, fields=None, shuffle=None, all=None, none=None, direct=None, custom_object=None, custom_serialized=None, local_or_shuffle=None, localFirst=None,):
self.fields = fields
self.shuffle = shuffle
self.all = all
self.none = none
self.direct = direct
self.custom_object = custom_object
self.custom_serialized = custom_serialized
self.local_or_shuffle = local_or_shuffle
self.localFirst = localFirst
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.fields = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = iprot.readString().decode('utf-8')
self.fields.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.shuffle = NullStruct()
self.shuffle.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.all = NullStruct()
self.all.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.none = NullStruct()
self.none.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.direct = NullStruct()
self.direct.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.custom_object = JavaObject()
self.custom_object.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.custom_serialized = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRUCT:
self.local_or_shuffle = NullStruct()
self.local_or_shuffle.read(iprot)
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRUCT:
self.localFirst = NullStruct()
self.localFirst.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Grouping')
if self.fields is not None:
oprot.writeFieldBegin('fields', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.fields))
for iter13 in self.fields:
oprot.writeString(iter13.encode('utf-8'))
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.shuffle is not None:
oprot.writeFieldBegin('shuffle', TType.STRUCT, 2)
self.shuffle.write(oprot)
oprot.writeFieldEnd()
if self.all is not None:
oprot.writeFieldBegin('all', TType.STRUCT, 3)
self.all.write(oprot)
oprot.writeFieldEnd()
if self.none is not None:
oprot.writeFieldBegin('none', TType.STRUCT, 4)
self.none.write(oprot)
oprot.writeFieldEnd()
if self.direct is not None:
oprot.writeFieldBegin('direct', TType.STRUCT, 5)
self.direct.write(oprot)
oprot.writeFieldEnd()
if self.custom_object is not None:
oprot.writeFieldBegin('custom_object', TType.STRUCT, 6)
self.custom_object.write(oprot)
oprot.writeFieldEnd()
if self.custom_serialized is not None:
oprot.writeFieldBegin('custom_serialized', TType.STRING, 7)
oprot.writeString(self.custom_serialized)
oprot.writeFieldEnd()
if self.local_or_shuffle is not None:
oprot.writeFieldBegin('local_or_shuffle', TType.STRUCT, 8)
self.local_or_shuffle.write(oprot)
oprot.writeFieldEnd()
if self.localFirst is not None:
oprot.writeFieldBegin('localFirst', TType.STRUCT, 9)
self.localFirst.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.fields)
value = (value * 31) ^ hash(self.shuffle)
value = (value * 31) ^ hash(self.all)
value = (value * 31) ^ hash(self.none)
value = (value * 31) ^ hash(self.direct)
value = (value * 31) ^ hash(self.custom_object)
value = (value * 31) ^ hash(self.custom_serialized)
value = (value * 31) ^ hash(self.local_or_shuffle)
value = (value * 31) ^ hash(self.localFirst)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class StreamInfo:
"""
Attributes:
- output_fields
- direct
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'output_fields', (TType.STRING,None), None, ), # 1
(2, TType.BOOL, 'direct', None, None, ), # 2
)
def __init__(self, output_fields=None, direct=None,):
self.output_fields = output_fields
self.direct = direct
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.output_fields = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = iprot.readString().decode('utf-8')
self.output_fields.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.direct = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('StreamInfo')
if self.output_fields is not None:
oprot.writeFieldBegin('output_fields', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.output_fields))
for iter20 in self.output_fields:
oprot.writeString(iter20.encode('utf-8'))
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.direct is not None:
oprot.writeFieldBegin('direct', TType.BOOL, 2)
oprot.writeBool(self.direct)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.output_fields is None:
raise TProtocol.TProtocolException(message='Required field output_fields is unset!')
if self.direct is None:
raise TProtocol.TProtocolException(message='Required field direct is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.output_fields)
value = (value * 31) ^ hash(self.direct)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ShellComponent:
"""
Attributes:
- execution_command
- script
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'execution_command', None, None, ), # 1
(2, TType.STRING, 'script', None, None, ), # 2
)
def __init__(self, execution_command=None, script=None,):
self.execution_command = execution_command
self.script = script
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.execution_command = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.script = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ShellComponent')
if self.execution_command is not None:
oprot.writeFieldBegin('execution_command', TType.STRING, 1)
oprot.writeString(self.execution_command.encode('utf-8'))
oprot.writeFieldEnd()
if self.script is not None:
oprot.writeFieldBegin('script', TType.STRING, 2)
oprot.writeString(self.script.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.execution_command)
value = (value * 31) ^ hash(self.script)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ComponentObject:
"""
Attributes:
- serialized_java
- shell
- java_object
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'serialized_java', None, None, ), # 1
(2, TType.STRUCT, 'shell', (ShellComponent, ShellComponent.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'java_object', (JavaObject, JavaObject.thrift_spec), None, ), # 3
)
def __init__(self, serialized_java=None, shell=None, java_object=None,):
self.serialized_java = serialized_java
self.shell = shell
self.java_object = java_object
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.serialized_java = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.shell = ShellComponent()
self.shell.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.java_object = JavaObject()
self.java_object.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ComponentObject')
if self.serialized_java is not None:
oprot.writeFieldBegin('serialized_java', TType.STRING, 1)
oprot.writeString(self.serialized_java)
oprot.writeFieldEnd()
if self.shell is not None:
oprot.writeFieldBegin('shell', TType.STRUCT, 2)
self.shell.write(oprot)
oprot.writeFieldEnd()
if self.java_object is not None:
oprot.writeFieldBegin('java_object', TType.STRUCT, 3)
self.java_object.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.serialized_java)
value = (value * 31) ^ hash(self.shell)
value = (value * 31) ^ hash(self.java_object)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ComponentCommon:
"""
Attributes:
- inputs
- streams
- parallelism_hint
- json_conf
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'inputs', (TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.STRUCT,(Grouping, Grouping.thrift_spec)), None, ), # 1
(2, TType.MAP, 'streams', (TType.STRING,None,TType.STRUCT,(StreamInfo, StreamInfo.thrift_spec)), None, ), # 2
(3, TType.I32, 'parallelism_hint', None, None, ), # 3
(4, TType.STRING, 'json_conf', None, None, ), # 4
)
def __init__(self, inputs=None, streams=None, parallelism_hint=None, json_conf=None,):
self.inputs = inputs
self.streams = streams
self.parallelism_hint = parallelism_hint
self.json_conf = json_conf
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.inputs = {}
(_ktype22, _vtype23, _size21 ) = iprot.readMapBegin()
for _i25 in xrange(_size21):
_key26 = GlobalStreamId()
_key26.read(iprot)
_val27 = Grouping()
_val27.read(iprot)
self.inputs[_key26] = _val27
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.streams = {}
(_ktype29, _vtype30, _size28 ) = iprot.readMapBegin()
for _i32 in xrange(_size28):
_key33 = iprot.readString().decode('utf-8')
_val34 = StreamInfo()
_val34.read(iprot)
self.streams[_key33] = _val34
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.parallelism_hint = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.json_conf = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ComponentCommon')
if self.inputs is not None:
oprot.writeFieldBegin('inputs', TType.MAP, 1)
oprot.writeMapBegin(TType.STRUCT, TType.STRUCT, len(self.inputs))
for kiter35,viter36 in self.inputs.items():
kiter35.write(oprot)
viter36.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.streams is not None:
oprot.writeFieldBegin('streams', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.streams))
for kiter37,viter38 in self.streams.items():
oprot.writeString(kiter37.encode('utf-8'))
viter38.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.parallelism_hint is not None:
oprot.writeFieldBegin('parallelism_hint', TType.I32, 3)
oprot.writeI32(self.parallelism_hint)
oprot.writeFieldEnd()
if self.json_conf is not None:
oprot.writeFieldBegin('json_conf', TType.STRING, 4)
oprot.writeString(self.json_conf.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.inputs is None:
raise TProtocol.TProtocolException(message='Required field inputs is unset!')
if self.streams is None:
raise TProtocol.TProtocolException(message='Required field streams is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.inputs)
value = (value * 31) ^ hash(self.streams)
value = (value * 31) ^ hash(self.parallelism_hint)
value = (value * 31) ^ hash(self.json_conf)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SpoutSpec:
"""
Attributes:
- spout_object
- common
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'spout_object', (ComponentObject, ComponentObject.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'common', (ComponentCommon, ComponentCommon.thrift_spec), None, ), # 2
)
def __init__(self, spout_object=None, common=None,):
self.spout_object = spout_object
self.common = common
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.spout_object = ComponentObject()
self.spout_object.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.common = ComponentCommon()
self.common.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SpoutSpec')
if self.spout_object is not None:
oprot.writeFieldBegin('spout_object', TType.STRUCT, 1)
self.spout_object.write(oprot)
oprot.writeFieldEnd()
if self.common is not None:
oprot.writeFieldBegin('common', TType.STRUCT, 2)
self.common.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.spout_object is None:
raise TProtocol.TProtocolException(message='Required field spout_object is unset!')
if self.common is None:
raise TProtocol.TProtocolException(message='Required field common is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.spout_object)
value = (value * 31) ^ hash(self.common)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Bolt:
"""
Attributes:
- bolt_object
- common
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'bolt_object', (ComponentObject, ComponentObject.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'common', (ComponentCommon, ComponentCommon.thrift_spec), None, ), # 2
)
def __init__(self, bolt_object=None, common=None,):
self.bolt_object = bolt_object
self.common = common
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.bolt_object = ComponentObject()
self.bolt_object.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.common = ComponentCommon()
self.common.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Bolt')
if self.bolt_object is not None:
oprot.writeFieldBegin('bolt_object', TType.STRUCT, 1)
self.bolt_object.write(oprot)
oprot.writeFieldEnd()
if self.common is not None:
oprot.writeFieldBegin('common', TType.STRUCT, 2)
self.common.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.bolt_object is None:
raise TProtocol.TProtocolException(message='Required field bolt_object is unset!')
if self.common is None:
raise TProtocol.TProtocolException(message='Required field common is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.bolt_object)
value = (value * 31) ^ hash(self.common)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class StateSpoutSpec:
"""
Attributes:
- state_spout_object
- common
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'state_spout_object', (ComponentObject, ComponentObject.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'common', (ComponentCommon, ComponentCommon.thrift_spec), None, ), # 2
)
def __init__(self, state_spout_object=None, common=None,):
self.state_spout_object = state_spout_object
self.common = common
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.state_spout_object = ComponentObject()
self.state_spout_object.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.common = ComponentCommon()
self.common.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('StateSpoutSpec')
if self.state_spout_object is not None:
oprot.writeFieldBegin('state_spout_object', TType.STRUCT, 1)
self.state_spout_object.write(oprot)
oprot.writeFieldEnd()
if self.common is not None:
oprot.writeFieldBegin('common', TType.STRUCT, 2)
self.common.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.state_spout_object is None:
raise TProtocol.TProtocolException(message='Required field state_spout_object is unset!')
if self.common is None:
raise TProtocol.TProtocolException(message='Required field common is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.state_spout_object)
value = (value * 31) ^ hash(self.common)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class StormTopology:
"""
Attributes:
- spouts
- bolts
- state_spouts
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'spouts', (TType.STRING,None,TType.STRUCT,(SpoutSpec, SpoutSpec.thrift_spec)), None, ), # 1
(2, TType.MAP, 'bolts', (TType.STRING,None,TType.STRUCT,(Bolt, Bolt.thrift_spec)), None, ), # 2
(3, TType.MAP, 'state_spouts', (TType.STRING,None,TType.STRUCT,(StateSpoutSpec, StateSpoutSpec.thrift_spec)), None, ), # 3
)
def __init__(self, spouts=None, bolts=None, state_spouts=None,):
self.spouts = spouts
self.bolts = bolts
self.state_spouts = state_spouts
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.spouts = {}
(_ktype40, _vtype41, _size39 ) = iprot.readMapBegin()
for _i43 in xrange(_size39):
_key44 = iprot.readString().decode('utf-8')
_val45 = SpoutSpec()
_val45.read(iprot)
self.spouts[_key44] = _val45
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.bolts = {}
(_ktype47, _vtype48, _size46 ) = iprot.readMapBegin()
for _i50 in xrange(_size46):
_key51 = iprot.readString().decode('utf-8')
_val52 = Bolt()
_val52.read(iprot)
self.bolts[_key51] = _val52
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.state_spouts = {}
(_ktype54, _vtype55, _size53 ) = iprot.readMapBegin()
for _i57 in xrange(_size53):
_key58 = iprot.readString().decode('utf-8')
_val59 = StateSpoutSpec()
_val59.read(iprot)
self.state_spouts[_key58] = _val59
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('StormTopology')
if self.spouts is not None:
oprot.writeFieldBegin('spouts', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.spouts))
for kiter60,viter61 in self.spouts.items():
oprot.writeString(kiter60.encode('utf-8'))
viter61.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.bolts is not None:
oprot.writeFieldBegin('bolts', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.bolts))
for kiter62,viter63 in self.bolts.items():
oprot.writeString(kiter62.encode('utf-8'))
viter63.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.state_spouts is not None:
oprot.writeFieldBegin('state_spouts', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.state_spouts))
for kiter64,viter65 in self.state_spouts.items():
oprot.writeString(kiter64.encode('utf-8'))
viter65.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.spouts is None:
raise TProtocol.TProtocolException(message='Required field spouts is unset!')
if self.bolts is None:
raise TProtocol.TProtocolException(message='Required field bolts is unset!')
if self.state_spouts is None:
raise TProtocol.TProtocolException(message='Required field state_spouts is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.spouts)
value = (value * 31) ^ hash(self.bolts)
value = (value * 31) ^ hash(self.state_spouts)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TopologySummary:
"""
Attributes:
- id
- name
- status
- uptime_secs
- num_tasks
- num_workers
- error_info
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
(3, TType.STRING, 'status', None, None, ), # 3
(4, TType.I32, 'uptime_secs', None, None, ), # 4
(5, TType.I32, 'num_tasks', None, None, ), # 5
(6, TType.I32, 'num_workers', None, None, ), # 6
(7, TType.STRING, 'error_info', None, None, ), # 7
)
def __init__(self, id=None, name=None, status=None, uptime_secs=None, num_tasks=None, num_workers=None, error_info=None,):
self.id = id
self.name = name
self.status = status
self.uptime_secs = uptime_secs
self.num_tasks = num_tasks
self.num_workers = num_workers
self.error_info = error_info
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.status = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.uptime_secs = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.num_tasks = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.num_workers = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.error_info = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TopologySummary')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRING, 3)
oprot.writeString(self.status.encode('utf-8'))
oprot.writeFieldEnd()
if self.uptime_secs is not None:
oprot.writeFieldBegin('uptime_secs', TType.I32, 4)
oprot.writeI32(self.uptime_secs)
oprot.writeFieldEnd()
if self.num_tasks is not None:
oprot.writeFieldBegin('num_tasks', TType.I32, 5)
oprot.writeI32(self.num_tasks)
oprot.writeFieldEnd()
if self.num_workers is not None:
oprot.writeFieldBegin('num_workers', TType.I32, 6)
oprot.writeI32(self.num_workers)
oprot.writeFieldEnd()
if self.error_info is not None:
oprot.writeFieldBegin('error_info', TType.STRING, 7)
oprot.writeString(self.error_info.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
if self.name is None:
raise TProtocol.TProtocolException(message='Required field name is unset!')
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
if self.uptime_secs is None:
raise TProtocol.TProtocolException(message='Required field uptime_secs is unset!')
if self.num_tasks is None:
raise TProtocol.TProtocolException(message='Required field num_tasks is unset!')
if self.num_workers is None:
raise TProtocol.TProtocolException(message='Required field num_workers is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.uptime_secs)
value = (value * 31) ^ hash(self.num_tasks)
value = (value * 31) ^ hash(self.num_workers)
value = (value * 31) ^ hash(self.error_info)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SupervisorSummary:
"""
Attributes:
- host
- supervisor_id
- uptime_secs
- num_workers
- num_used_workers
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'host', None, None, ), # 1
(2, TType.STRING, 'supervisor_id', None, None, ), # 2
(3, TType.I32, 'uptime_secs', None, None, ), # 3
(4, TType.I32, 'num_workers', None, None, ), # 4
(5, TType.I32, 'num_used_workers', None, None, ), # 5
)
def __init__(self, host=None, supervisor_id=None, uptime_secs=None, num_workers=None, num_used_workers=None,):
self.host = host
self.supervisor_id = supervisor_id
self.uptime_secs = uptime_secs
self.num_workers = num_workers
self.num_used_workers = num_used_workers
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.host = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.supervisor_id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.uptime_secs = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.num_workers = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.num_used_workers = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SupervisorSummary')
if self.host is not None:
oprot.writeFieldBegin('host', TType.STRING, 1)
oprot.writeString(self.host.encode('utf-8'))
oprot.writeFieldEnd()
if self.supervisor_id is not None:
oprot.writeFieldBegin('supervisor_id', TType.STRING, 2)
oprot.writeString(self.supervisor_id.encode('utf-8'))
oprot.writeFieldEnd()
if self.uptime_secs is not None:
oprot.writeFieldBegin('uptime_secs', TType.I32, 3)
oprot.writeI32(self.uptime_secs)
oprot.writeFieldEnd()
if self.num_workers is not None:
oprot.writeFieldBegin('num_workers', TType.I32, 4)
oprot.writeI32(self.num_workers)
oprot.writeFieldEnd()
if self.num_used_workers is not None:
oprot.writeFieldBegin('num_used_workers', TType.I32, 5)
oprot.writeI32(self.num_used_workers)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.host is None:
raise TProtocol.TProtocolException(message='Required field host is unset!')
if self.supervisor_id is None:
raise TProtocol.TProtocolException(message='Required field supervisor_id is unset!')
if self.uptime_secs is None:
raise TProtocol.TProtocolException(message='Required field uptime_secs is unset!')
if self.num_workers is None:
raise TProtocol.TProtocolException(message='Required field num_workers is unset!')
if self.num_used_workers is None:
raise TProtocol.TProtocolException(message='Required field num_used_workers is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.host)
value = (value * 31) ^ hash(self.supervisor_id)
value = (value * 31) ^ hash(self.uptime_secs)
value = (value * 31) ^ hash(self.num_workers)
value = (value * 31) ^ hash(self.num_used_workers)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NimbusStat:
"""
Attributes:
- host
- uptime_secs
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'host', None, None, ), # 1
(2, TType.STRING, 'uptime_secs', None, None, ), # 2
)
def __init__(self, host=None, uptime_secs=None,):
self.host = host
self.uptime_secs = uptime_secs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.host = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.uptime_secs = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NimbusStat')
if self.host is not None:
oprot.writeFieldBegin('host', TType.STRING, 1)
oprot.writeString(self.host.encode('utf-8'))
oprot.writeFieldEnd()
if self.uptime_secs is not None:
oprot.writeFieldBegin('uptime_secs', TType.STRING, 2)
oprot.writeString(self.uptime_secs.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.host is None:
raise TProtocol.TProtocolException(message='Required field host is unset!')
if self.uptime_secs is None:
raise TProtocol.TProtocolException(message='Required field uptime_secs is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.host)
value = (value * 31) ^ hash(self.uptime_secs)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NimbusSummary:
"""
Attributes:
- nimbus_master
- nimbus_slaves
- supervisor_num
- total_port_num
- used_port_num
- free_port_num
- version
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'nimbus_master', (NimbusStat, NimbusStat.thrift_spec), None, ), # 1
(2, TType.LIST, 'nimbus_slaves', (TType.STRUCT,(NimbusStat, NimbusStat.thrift_spec)), None, ), # 2
(3, TType.I32, 'supervisor_num', None, None, ), # 3
(4, TType.I32, 'total_port_num', None, None, ), # 4
(5, TType.I32, 'used_port_num', None, None, ), # 5
(6, TType.I32, 'free_port_num', None, None, ), # 6
(7, TType.STRING, 'version', None, None, ), # 7
)
def __init__(self, nimbus_master=None, nimbus_slaves=None, supervisor_num=None, total_port_num=None, used_port_num=None, free_port_num=None, version=None,):
self.nimbus_master = nimbus_master
self.nimbus_slaves = nimbus_slaves
self.supervisor_num = supervisor_num
self.total_port_num = total_port_num
self.used_port_num = used_port_num
self.free_port_num = free_port_num
self.version = version
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.nimbus_master = NimbusStat()
self.nimbus_master.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.nimbus_slaves = []
(_etype69, _size66) = iprot.readListBegin()
for _i70 in xrange(_size66):
_elem71 = NimbusStat()
_elem71.read(iprot)
self.nimbus_slaves.append(_elem71)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.supervisor_num = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.total_port_num = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.used_port_num = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.free_port_num = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.version = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NimbusSummary')
if self.nimbus_master is not None:
oprot.writeFieldBegin('nimbus_master', TType.STRUCT, 1)
self.nimbus_master.write(oprot)
oprot.writeFieldEnd()
if self.nimbus_slaves is not None:
oprot.writeFieldBegin('nimbus_slaves', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.nimbus_slaves))
for iter72 in self.nimbus_slaves:
iter72.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.supervisor_num is not None:
oprot.writeFieldBegin('supervisor_num', TType.I32, 3)
oprot.writeI32(self.supervisor_num)
oprot.writeFieldEnd()
if self.total_port_num is not None:
oprot.writeFieldBegin('total_port_num', TType.I32, 4)
oprot.writeI32(self.total_port_num)
oprot.writeFieldEnd()
if self.used_port_num is not None:
oprot.writeFieldBegin('used_port_num', TType.I32, 5)
oprot.writeI32(self.used_port_num)
oprot.writeFieldEnd()
if self.free_port_num is not None:
oprot.writeFieldBegin('free_port_num', TType.I32, 6)
oprot.writeI32(self.free_port_num)
oprot.writeFieldEnd()
if self.version is not None:
oprot.writeFieldBegin('version', TType.STRING, 7)
oprot.writeString(self.version.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.nimbus_master is None:
raise TProtocol.TProtocolException(message='Required field nimbus_master is unset!')
if self.nimbus_slaves is None:
raise TProtocol.TProtocolException(message='Required field nimbus_slaves is unset!')
if self.supervisor_num is None:
raise TProtocol.TProtocolException(message='Required field supervisor_num is unset!')
if self.total_port_num is None:
raise TProtocol.TProtocolException(message='Required field total_port_num is unset!')
if self.used_port_num is None:
raise TProtocol.TProtocolException(message='Required field used_port_num is unset!')
if self.free_port_num is None:
raise TProtocol.TProtocolException(message='Required field free_port_num is unset!')
if self.version is None:
raise TProtocol.TProtocolException(message='Required field version is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.nimbus_master)
value = (value * 31) ^ hash(self.nimbus_slaves)
value = (value * 31) ^ hash(self.supervisor_num)
value = (value * 31) ^ hash(self.total_port_num)
value = (value * 31) ^ hash(self.used_port_num)
value = (value * 31) ^ hash(self.free_port_num)
value = (value * 31) ^ hash(self.version)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ClusterSummary:
"""
Attributes:
- nimbus
- supervisors
- topologies
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'nimbus', (NimbusSummary, NimbusSummary.thrift_spec), None, ), # 1
(2, TType.LIST, 'supervisors', (TType.STRUCT,(SupervisorSummary, SupervisorSummary.thrift_spec)), None, ), # 2
(3, TType.LIST, 'topologies', (TType.STRUCT,(TopologySummary, TopologySummary.thrift_spec)), None, ), # 3
)
def __init__(self, nimbus=None, supervisors=None, topologies=None,):
self.nimbus = nimbus
self.supervisors = supervisors
self.topologies = topologies
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.nimbus = NimbusSummary()
self.nimbus.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.supervisors = []
(_etype76, _size73) = iprot.readListBegin()
for _i77 in xrange(_size73):
_elem78 = SupervisorSummary()
_elem78.read(iprot)
self.supervisors.append(_elem78)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.topologies = []
(_etype82, _size79) = iprot.readListBegin()
for _i83 in xrange(_size79):
_elem84 = TopologySummary()
_elem84.read(iprot)
self.topologies.append(_elem84)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ClusterSummary')
if self.nimbus is not None:
oprot.writeFieldBegin('nimbus', TType.STRUCT, 1)
self.nimbus.write(oprot)
oprot.writeFieldEnd()
if self.supervisors is not None:
oprot.writeFieldBegin('supervisors', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.supervisors))
for iter85 in self.supervisors:
iter85.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.topologies is not None:
oprot.writeFieldBegin('topologies', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.topologies))
for iter86 in self.topologies:
iter86.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.nimbus is None:
raise TProtocol.TProtocolException(message='Required field nimbus is unset!')
if self.supervisors is None:
raise TProtocol.TProtocolException(message='Required field supervisors is unset!')
if self.topologies is None:
raise TProtocol.TProtocolException(message='Required field topologies is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.nimbus)
value = (value * 31) ^ hash(self.supervisors)
value = (value * 31) ^ hash(self.topologies)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TaskComponent:
"""
Attributes:
- taskId
- component
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'taskId', None, None, ), # 1
(2, TType.STRING, 'component', None, None, ), # 2
)
def __init__(self, taskId=None, component=None,):
self.taskId = taskId
self.component = component
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.taskId = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.component = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TaskComponent')
if self.taskId is not None:
oprot.writeFieldBegin('taskId', TType.I32, 1)
oprot.writeI32(self.taskId)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 2)
oprot.writeString(self.component.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.taskId is None:
raise TProtocol.TProtocolException(message='Required field taskId is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.taskId)
value = (value * 31) ^ hash(self.component)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class WorkerSummary:
"""
Attributes:
- port
- uptime
- topology
- tasks
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'port', None, None, ), # 1
(2, TType.I32, 'uptime', None, None, ), # 2
(3, TType.STRING, 'topology', None, None, ), # 3
(4, TType.LIST, 'tasks', (TType.STRUCT,(TaskComponent, TaskComponent.thrift_spec)), None, ), # 4
)
def __init__(self, port=None, uptime=None, topology=None, tasks=None,):
self.port = port
self.uptime = uptime
self.topology = topology
self.tasks = tasks
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.port = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.uptime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.topology = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.tasks = []
(_etype90, _size87) = iprot.readListBegin()
for _i91 in xrange(_size87):
_elem92 = TaskComponent()
_elem92.read(iprot)
self.tasks.append(_elem92)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('WorkerSummary')
if self.port is not None:
oprot.writeFieldBegin('port', TType.I32, 1)
oprot.writeI32(self.port)
oprot.writeFieldEnd()
if self.uptime is not None:
oprot.writeFieldBegin('uptime', TType.I32, 2)
oprot.writeI32(self.uptime)
oprot.writeFieldEnd()
if self.topology is not None:
oprot.writeFieldBegin('topology', TType.STRING, 3)
oprot.writeString(self.topology.encode('utf-8'))
oprot.writeFieldEnd()
if self.tasks is not None:
oprot.writeFieldBegin('tasks', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.tasks))
for iter93 in self.tasks:
iter93.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.port is None:
raise TProtocol.TProtocolException(message='Required field port is unset!')
if self.uptime is None:
raise TProtocol.TProtocolException(message='Required field uptime is unset!')
if self.topology is None:
raise TProtocol.TProtocolException(message='Required field topology is unset!')
if self.tasks is None:
raise TProtocol.TProtocolException(message='Required field tasks is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.port)
value = (value * 31) ^ hash(self.uptime)
value = (value * 31) ^ hash(self.topology)
value = (value * 31) ^ hash(self.tasks)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MetricWindow:
"""
Attributes:
- metricWindow
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'metricWindow', (TType.I32,None,TType.DOUBLE,None), None, ), # 1
)
def __init__(self, metricWindow=None,):
self.metricWindow = metricWindow
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.metricWindow = {}
(_ktype95, _vtype96, _size94 ) = iprot.readMapBegin()
for _i98 in xrange(_size94):
_key99 = iprot.readI32();
_val100 = iprot.readDouble();
self.metricWindow[_key99] = _val100
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MetricWindow')
if self.metricWindow is not None:
oprot.writeFieldBegin('metricWindow', TType.MAP, 1)
oprot.writeMapBegin(TType.I32, TType.DOUBLE, len(self.metricWindow))
for kiter101,viter102 in self.metricWindow.items():
oprot.writeI32(kiter101)
oprot.writeDouble(viter102)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.metricWindow is None:
raise TProtocol.TProtocolException(message='Required field metricWindow is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.metricWindow)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MetricInfo:
"""
Attributes:
- baseMetric
- inputMetric
- outputMetric
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'baseMetric', (TType.STRING,None,TType.STRUCT,(MetricWindow, MetricWindow.thrift_spec)), None, ), # 1
(2, TType.MAP, 'inputMetric', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.STRUCT,(MetricWindow, MetricWindow.thrift_spec))), None, ), # 2
(3, TType.MAP, 'outputMetric', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.STRUCT,(MetricWindow, MetricWindow.thrift_spec))), None, ), # 3
)
def __init__(self, baseMetric=None, inputMetric=None, outputMetric=None,):
self.baseMetric = baseMetric
self.inputMetric = inputMetric
self.outputMetric = outputMetric
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.baseMetric = {}
(_ktype104, _vtype105, _size103 ) = iprot.readMapBegin()
for _i107 in xrange(_size103):
_key108 = iprot.readString().decode('utf-8')
_val109 = MetricWindow()
_val109.read(iprot)
self.baseMetric[_key108] = _val109
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.inputMetric = {}
(_ktype111, _vtype112, _size110 ) = iprot.readMapBegin()
for _i114 in xrange(_size110):
_key115 = iprot.readString().decode('utf-8')
_val116 = {}
(_ktype118, _vtype119, _size117 ) = iprot.readMapBegin()
for _i121 in xrange(_size117):
_key122 = iprot.readString().decode('utf-8')
_val123 = MetricWindow()
_val123.read(iprot)
_val116[_key122] = _val123
iprot.readMapEnd()
self.inputMetric[_key115] = _val116
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.outputMetric = {}
(_ktype125, _vtype126, _size124 ) = iprot.readMapBegin()
for _i128 in xrange(_size124):
_key129 = iprot.readString().decode('utf-8')
_val130 = {}
(_ktype132, _vtype133, _size131 ) = iprot.readMapBegin()
for _i135 in xrange(_size131):
_key136 = iprot.readString().decode('utf-8')
_val137 = MetricWindow()
_val137.read(iprot)
_val130[_key136] = _val137
iprot.readMapEnd()
self.outputMetric[_key129] = _val130
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MetricInfo')
if self.baseMetric is not None:
oprot.writeFieldBegin('baseMetric', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.baseMetric))
for kiter138,viter139 in self.baseMetric.items():
oprot.writeString(kiter138.encode('utf-8'))
viter139.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.inputMetric is not None:
oprot.writeFieldBegin('inputMetric', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.inputMetric))
for kiter140,viter141 in self.inputMetric.items():
oprot.writeString(kiter140.encode('utf-8'))
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(viter141))
for kiter142,viter143 in viter141.items():
oprot.writeString(kiter142.encode('utf-8'))
viter143.write(oprot)
oprot.writeMapEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.outputMetric is not None:
oprot.writeFieldBegin('outputMetric', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.outputMetric))
for kiter144,viter145 in self.outputMetric.items():
oprot.writeString(kiter144.encode('utf-8'))
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(viter145))
for kiter146,viter147 in viter145.items():
oprot.writeString(kiter146.encode('utf-8'))
viter147.write(oprot)
oprot.writeMapEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.baseMetric is None:
raise TProtocol.TProtocolException(message='Required field baseMetric is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.baseMetric)
value = (value * 31) ^ hash(self.inputMetric)
value = (value * 31) ^ hash(self.outputMetric)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TopologyMetric:
"""
Attributes:
- topologyMetric
- componentMetric
- workerMetric
- taskMetric
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'topologyMetric', (MetricInfo, MetricInfo.thrift_spec), None, ), # 1
(2, TType.MAP, 'componentMetric', (TType.STRING,None,TType.STRUCT,(MetricInfo, MetricInfo.thrift_spec)), None, ), # 2
(3, TType.MAP, 'workerMetric', (TType.STRING,None,TType.STRUCT,(MetricInfo, MetricInfo.thrift_spec)), None, ), # 3
(4, TType.MAP, 'taskMetric', (TType.I32,None,TType.STRUCT,(MetricInfo, MetricInfo.thrift_spec)), None, ), # 4
)
def __init__(self, topologyMetric=None, componentMetric=None, workerMetric=None, taskMetric=None,):
self.topologyMetric = topologyMetric
self.componentMetric = componentMetric
self.workerMetric = workerMetric
self.taskMetric = taskMetric
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.topologyMetric = MetricInfo()
self.topologyMetric.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.componentMetric = {}
(_ktype149, _vtype150, _size148 ) = iprot.readMapBegin()
for _i152 in xrange(_size148):
_key153 = iprot.readString().decode('utf-8')
_val154 = MetricInfo()
_val154.read(iprot)
self.componentMetric[_key153] = _val154
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.workerMetric = {}
(_ktype156, _vtype157, _size155 ) = iprot.readMapBegin()
for _i159 in xrange(_size155):
_key160 = iprot.readString().decode('utf-8')
_val161 = MetricInfo()
_val161.read(iprot)
self.workerMetric[_key160] = _val161
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.taskMetric = {}
(_ktype163, _vtype164, _size162 ) = iprot.readMapBegin()
for _i166 in xrange(_size162):
_key167 = iprot.readI32();
_val168 = MetricInfo()
_val168.read(iprot)
self.taskMetric[_key167] = _val168
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TopologyMetric')
if self.topologyMetric is not None:
oprot.writeFieldBegin('topologyMetric', TType.STRUCT, 1)
self.topologyMetric.write(oprot)
oprot.writeFieldEnd()
if self.componentMetric is not None:
oprot.writeFieldBegin('componentMetric', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.componentMetric))
for kiter169,viter170 in self.componentMetric.items():
oprot.writeString(kiter169.encode('utf-8'))
viter170.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.workerMetric is not None:
oprot.writeFieldBegin('workerMetric', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.workerMetric))
for kiter171,viter172 in self.workerMetric.items():
oprot.writeString(kiter171.encode('utf-8'))
viter172.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.taskMetric is not None:
oprot.writeFieldBegin('taskMetric', TType.MAP, 4)
oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.taskMetric))
for kiter173,viter174 in self.taskMetric.items():
oprot.writeI32(kiter173)
viter174.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.topologyMetric is None:
raise TProtocol.TProtocolException(message='Required field topologyMetric is unset!')
if self.componentMetric is None:
raise TProtocol.TProtocolException(message='Required field componentMetric is unset!')
if self.workerMetric is None:
raise TProtocol.TProtocolException(message='Required field workerMetric is unset!')
if self.taskMetric is None:
raise TProtocol.TProtocolException(message='Required field taskMetric is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.topologyMetric)
value = (value * 31) ^ hash(self.componentMetric)
value = (value * 31) ^ hash(self.workerMetric)
value = (value * 31) ^ hash(self.taskMetric)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SupervisorWorkers:
"""
Attributes:
- supervisor
- workers
- workerMetric
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'supervisor', (SupervisorSummary, SupervisorSummary.thrift_spec), None, ), # 1
(2, TType.LIST, 'workers', (TType.STRUCT,(WorkerSummary, WorkerSummary.thrift_spec)), None, ), # 2
(3, TType.MAP, 'workerMetric', (TType.STRING,None,TType.STRUCT,(MetricInfo, MetricInfo.thrift_spec)), None, ), # 3
)
def __init__(self, supervisor=None, workers=None, workerMetric=None,):
self.supervisor = supervisor
self.workers = workers
self.workerMetric = workerMetric
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.supervisor = SupervisorSummary()
self.supervisor.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.workers = []
(_etype178, _size175) = iprot.readListBegin()
for _i179 in xrange(_size175):
_elem180 = WorkerSummary()
_elem180.read(iprot)
self.workers.append(_elem180)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.workerMetric = {}
(_ktype182, _vtype183, _size181 ) = iprot.readMapBegin()
for _i185 in xrange(_size181):
_key186 = iprot.readString().decode('utf-8')
_val187 = MetricInfo()
_val187.read(iprot)
self.workerMetric[_key186] = _val187
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SupervisorWorkers')
if self.supervisor is not None:
oprot.writeFieldBegin('supervisor', TType.STRUCT, 1)
self.supervisor.write(oprot)
oprot.writeFieldEnd()
if self.workers is not None:
oprot.writeFieldBegin('workers', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.workers))
for iter188 in self.workers:
iter188.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.workerMetric is not None:
oprot.writeFieldBegin('workerMetric', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.workerMetric))
for kiter189,viter190 in self.workerMetric.items():
oprot.writeString(kiter189.encode('utf-8'))
viter190.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.supervisor is None:
raise TProtocol.TProtocolException(message='Required field supervisor is unset!')
if self.workers is None:
raise TProtocol.TProtocolException(message='Required field workers is unset!')
if self.workerMetric is None:
raise TProtocol.TProtocolException(message='Required field workerMetric is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.supervisor)
value = (value * 31) ^ hash(self.workers)
value = (value * 31) ^ hash(self.workerMetric)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ErrorInfo:
"""
Attributes:
- error
- error_time_secs
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'error', None, None, ), # 1
(2, TType.I32, 'error_time_secs', None, None, ), # 2
)
def __init__(self, error=None, error_time_secs=None,):
self.error = error
self.error_time_secs = error_time_secs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.error = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.error_time_secs = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ErrorInfo')
if self.error is not None:
oprot.writeFieldBegin('error', TType.STRING, 1)
oprot.writeString(self.error.encode('utf-8'))
oprot.writeFieldEnd()
if self.error_time_secs is not None:
oprot.writeFieldBegin('error_time_secs', TType.I32, 2)
oprot.writeI32(self.error_time_secs)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.error is None:
raise TProtocol.TProtocolException(message='Required field error is unset!')
if self.error_time_secs is None:
raise TProtocol.TProtocolException(message='Required field error_time_secs is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.error)
value = (value * 31) ^ hash(self.error_time_secs)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ComponentSummary:
"""
Attributes:
- name
- parallel
- type
- task_ids
- errors
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.I32, 'parallel', None, None, ), # 2
(3, TType.STRING, 'type', None, None, ), # 3
(4, TType.LIST, 'task_ids', (TType.I32,None), None, ), # 4
(5, TType.LIST, 'errors', (TType.STRUCT,(ErrorInfo, ErrorInfo.thrift_spec)), None, ), # 5
)
def __init__(self, name=None, parallel=None, type=None, task_ids=None, errors=None,):
self.name = name
self.parallel = parallel
self.type = type
self.task_ids = task_ids
self.errors = errors
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.parallel = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.type = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.task_ids = []
(_etype194, _size191) = iprot.readListBegin()
for _i195 in xrange(_size191):
_elem196 = iprot.readI32();
self.task_ids.append(_elem196)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.errors = []
(_etype200, _size197) = iprot.readListBegin()
for _i201 in xrange(_size197):
_elem202 = ErrorInfo()
_elem202.read(iprot)
self.errors.append(_elem202)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ComponentSummary')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.parallel is not None:
oprot.writeFieldBegin('parallel', TType.I32, 2)
oprot.writeI32(self.parallel)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin('type', TType.STRING, 3)
oprot.writeString(self.type.encode('utf-8'))
oprot.writeFieldEnd()
if self.task_ids is not None:
oprot.writeFieldBegin('task_ids', TType.LIST, 4)
oprot.writeListBegin(TType.I32, len(self.task_ids))
for iter203 in self.task_ids:
oprot.writeI32(iter203)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.errors is not None:
oprot.writeFieldBegin('errors', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.errors))
for iter204 in self.errors:
iter204.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocol.TProtocolException(message='Required field name is unset!')
if self.parallel is None:
raise TProtocol.TProtocolException(message='Required field parallel is unset!')
if self.type is None:
raise TProtocol.TProtocolException(message='Required field type is unset!')
if self.task_ids is None:
raise TProtocol.TProtocolException(message='Required field task_ids is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.parallel)
value = (value * 31) ^ hash(self.type)
value = (value * 31) ^ hash(self.task_ids)
value = (value * 31) ^ hash(self.errors)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TaskSummary:
"""
Attributes:
- task_id
- uptime
- status
- host
- port
- errors
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'task_id', None, None, ), # 1
(2, TType.I32, 'uptime', None, None, ), # 2
(3, TType.STRING, 'status', None, None, ), # 3
(4, TType.STRING, 'host', None, None, ), # 4
(5, TType.I32, 'port', None, None, ), # 5
(6, TType.LIST, 'errors', (TType.STRUCT,(ErrorInfo, ErrorInfo.thrift_spec)), None, ), # 6
)
def __init__(self, task_id=None, uptime=None, status=None, host=None, port=None, errors=None,):
self.task_id = task_id
self.uptime = uptime
self.status = status
self.host = host
self.port = port
self.errors = errors
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.task_id = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.uptime = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.status = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.host = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.port = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.errors = []
(_etype208, _size205) = iprot.readListBegin()
for _i209 in xrange(_size205):
_elem210 = ErrorInfo()
_elem210.read(iprot)
self.errors.append(_elem210)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TaskSummary')
if self.task_id is not None:
oprot.writeFieldBegin('task_id', TType.I32, 1)
oprot.writeI32(self.task_id)
oprot.writeFieldEnd()
if self.uptime is not None:
oprot.writeFieldBegin('uptime', TType.I32, 2)
oprot.writeI32(self.uptime)
oprot.writeFieldEnd()
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRING, 3)
oprot.writeString(self.status.encode('utf-8'))
oprot.writeFieldEnd()
if self.host is not None:
oprot.writeFieldBegin('host', TType.STRING, 4)
oprot.writeString(self.host.encode('utf-8'))
oprot.writeFieldEnd()
if self.port is not None:
oprot.writeFieldBegin('port', TType.I32, 5)
oprot.writeI32(self.port)
oprot.writeFieldEnd()
if self.errors is not None:
oprot.writeFieldBegin('errors', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.errors))
for iter211 in self.errors:
iter211.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.task_id is None:
raise TProtocol.TProtocolException(message='Required field task_id is unset!')
if self.uptime is None:
raise TProtocol.TProtocolException(message='Required field uptime is unset!')
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
if self.host is None:
raise TProtocol.TProtocolException(message='Required field host is unset!')
if self.port is None:
raise TProtocol.TProtocolException(message='Required field port is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.task_id)
value = (value * 31) ^ hash(self.uptime)
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.host)
value = (value * 31) ^ hash(self.port)
value = (value * 31) ^ hash(self.errors)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TopologyInfo:
"""
Attributes:
- topology
- components
- tasks
- metrics
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'topology', (TopologySummary, TopologySummary.thrift_spec), None, ), # 1
(2, TType.LIST, 'components', (TType.STRUCT,(ComponentSummary, ComponentSummary.thrift_spec)), None, ), # 2
(3, TType.LIST, 'tasks', (TType.STRUCT,(TaskSummary, TaskSummary.thrift_spec)), None, ), # 3
(4, TType.STRUCT, 'metrics', (TopologyMetric, TopologyMetric.thrift_spec), None, ), # 4
)
def __init__(self, topology=None, components=None, tasks=None, metrics=None,):
self.topology = topology
self.components = components
self.tasks = tasks
self.metrics = metrics
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.topology = TopologySummary()
self.topology.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.components = []
(_etype215, _size212) = iprot.readListBegin()
for _i216 in xrange(_size212):
_elem217 = ComponentSummary()
_elem217.read(iprot)
self.components.append(_elem217)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.tasks = []
(_etype221, _size218) = iprot.readListBegin()
for _i222 in xrange(_size218):
_elem223 = TaskSummary()
_elem223.read(iprot)
self.tasks.append(_elem223)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.metrics = TopologyMetric()
self.metrics.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TopologyInfo')
if self.topology is not None:
oprot.writeFieldBegin('topology', TType.STRUCT, 1)
self.topology.write(oprot)
oprot.writeFieldEnd()
if self.components is not None:
oprot.writeFieldBegin('components', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.components))
for iter224 in self.components:
iter224.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.tasks is not None:
oprot.writeFieldBegin('tasks', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.tasks))
for iter225 in self.tasks:
iter225.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.metrics is not None:
oprot.writeFieldBegin('metrics', TType.STRUCT, 4)
self.metrics.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.topology is None:
raise TProtocol.TProtocolException(message='Required field topology is unset!')
if self.components is None:
raise TProtocol.TProtocolException(message='Required field components is unset!')
if self.tasks is None:
raise TProtocol.TProtocolException(message='Required field tasks is unset!')
if self.metrics is None:
raise TProtocol.TProtocolException(message='Required field metrics is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.topology)
value = (value * 31) ^ hash(self.components)
value = (value * 31) ^ hash(self.tasks)
value = (value * 31) ^ hash(self.metrics)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TopologyAssignException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TopologyAssignException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AlreadyAliveException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AlreadyAliveException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NotAliveException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NotAliveException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class InvalidTopologyException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InvalidTopologyException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AuthorizationException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthorizationException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class KillOptions:
"""
Attributes:
- wait_secs
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'wait_secs', None, None, ), # 1
)
def __init__(self, wait_secs=None,):
self.wait_secs = wait_secs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.wait_secs = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('KillOptions')
if self.wait_secs is not None:
oprot.writeFieldBegin('wait_secs', TType.I32, 1)
oprot.writeI32(self.wait_secs)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.wait_secs)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RebalanceOptions:
"""
Attributes:
- wait_secs
- reassign
- conf
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'wait_secs', None, None, ), # 1
(2, TType.BOOL, 'reassign', None, None, ), # 2
(3, TType.STRING, 'conf', None, None, ), # 3
)
def __init__(self, wait_secs=None, reassign=None, conf=None,):
self.wait_secs = wait_secs
self.reassign = reassign
self.conf = conf
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.wait_secs = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.reassign = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.conf = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RebalanceOptions')
if self.wait_secs is not None:
oprot.writeFieldBegin('wait_secs', TType.I32, 1)
oprot.writeI32(self.wait_secs)
oprot.writeFieldEnd()
if self.reassign is not None:
oprot.writeFieldBegin('reassign', TType.BOOL, 2)
oprot.writeBool(self.reassign)
oprot.writeFieldEnd()
if self.conf is not None:
oprot.writeFieldBegin('conf', TType.STRING, 3)
oprot.writeString(self.conf.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.wait_secs)
value = (value * 31) ^ hash(self.reassign)
value = (value * 31) ^ hash(self.conf)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SubmitOptions:
"""
Attributes:
- initial_status
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'initial_status', None, None, ), # 1
)
def __init__(self, initial_status=None,):
self.initial_status = initial_status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.initial_status = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SubmitOptions')
if self.initial_status is not None:
oprot.writeFieldBegin('initial_status', TType.I32, 1)
oprot.writeI32(self.initial_status)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.initial_status is None:
raise TProtocol.TProtocolException(message='Required field initial_status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.initial_status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MonitorOptions:
"""
Attributes:
- isEnable
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'isEnable', None, None, ), # 1
)
def __init__(self, isEnable=None,):
self.isEnable = isEnable
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.isEnable = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MonitorOptions')
if self.isEnable is not None:
oprot.writeFieldBegin('isEnable', TType.BOOL, 1)
oprot.writeBool(self.isEnable)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.isEnable)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Credentials:
"""
Attributes:
- creds
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'creds', (TType.STRING,None,TType.STRING,None), None, ), # 1
)
def __init__(self, creds=None,):
self.creds = creds
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.creds = {}
(_ktype227, _vtype228, _size226 ) = iprot.readMapBegin()
for _i230 in xrange(_size226):
_key231 = iprot.readString().decode('utf-8')
_val232 = iprot.readString().decode('utf-8')
self.creds[_key231] = _val232
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Credentials')
if self.creds is not None:
oprot.writeFieldBegin('creds', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.creds))
for kiter233,viter234 in self.creds.items():
oprot.writeString(kiter233.encode('utf-8'))
oprot.writeString(viter234.encode('utf-8'))
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.creds is None:
raise TProtocol.TProtocolException(message='Required field creds is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.creds)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ThriftSerializedObject:
"""
Attributes:
- name
- bits
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'bits', None, None, ), # 2
)
def __init__(self, name=None, bits=None,):
self.name = name
self.bits = bits
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.bits = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ThriftSerializedObject')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.bits is not None:
oprot.writeFieldBegin('bits', TType.STRING, 2)
oprot.writeString(self.bits)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocol.TProtocolException(message='Required field name is unset!')
if self.bits is None:
raise TProtocol.TProtocolException(message='Required field bits is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.bits)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LocalStateData:
"""
Attributes:
- serialized_parts
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'serialized_parts', (TType.STRING,None,TType.STRUCT,(ThriftSerializedObject, ThriftSerializedObject.thrift_spec)), None, ), # 1
)
def __init__(self, serialized_parts=None,):
self.serialized_parts = serialized_parts
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.serialized_parts = {}
(_ktype236, _vtype237, _size235 ) = iprot.readMapBegin()
for _i239 in xrange(_size235):
_key240 = iprot.readString().decode('utf-8')
_val241 = ThriftSerializedObject()
_val241.read(iprot)
self.serialized_parts[_key240] = _val241
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('LocalStateData')
if self.serialized_parts is not None:
oprot.writeFieldBegin('serialized_parts', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.serialized_parts))
for kiter242,viter243 in self.serialized_parts.items():
oprot.writeString(kiter242.encode('utf-8'))
viter243.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.serialized_parts is None:
raise TProtocol.TProtocolException(message='Required field serialized_parts is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.serialized_parts)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NettyMetric:
"""
Attributes:
- connections
- connectionNum
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'connections', (TType.STRING,None,TType.STRUCT,(MetricInfo, MetricInfo.thrift_spec)), None, ), # 1
(2, TType.I32, 'connectionNum', None, None, ), # 2
)
def __init__(self, connections=None, connectionNum=None,):
self.connections = connections
self.connectionNum = connectionNum
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.connections = {}
(_ktype245, _vtype246, _size244 ) = iprot.readMapBegin()
for _i248 in xrange(_size244):
_key249 = iprot.readString().decode('utf-8')
_val250 = MetricInfo()
_val250.read(iprot)
self.connections[_key249] = _val250
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.connectionNum = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NettyMetric')
if self.connections is not None:
oprot.writeFieldBegin('connections', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.connections))
for kiter251,viter252 in self.connections.items():
oprot.writeString(kiter251.encode('utf-8'))
viter252.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.connectionNum is not None:
oprot.writeFieldBegin('connectionNum', TType.I32, 2)
oprot.writeI32(self.connectionNum)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.connections is None:
raise TProtocol.TProtocolException(message='Required field connections is unset!')
if self.connectionNum is None:
raise TProtocol.TProtocolException(message='Required field connectionNum is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.connections)
value = (value * 31) ^ hash(self.connectionNum)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class WorkerUploadMetrics:
"""
Attributes:
- topology_id
- supervisor_id
- port
- workerMetric
- nettyMetric
- taskMetric
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'topology_id', None, None, ), # 1
(2, TType.STRING, 'supervisor_id', None, None, ), # 2
(3, TType.I32, 'port', None, None, ), # 3
(4, TType.STRUCT, 'workerMetric', (MetricInfo, MetricInfo.thrift_spec), None, ), # 4
(5, TType.STRUCT, 'nettyMetric', (NettyMetric, NettyMetric.thrift_spec), None, ), # 5
(6, TType.MAP, 'taskMetric', (TType.I32,None,TType.STRUCT,(MetricInfo, MetricInfo.thrift_spec)), None, ), # 6
)
def __init__(self, topology_id=None, supervisor_id=None, port=None, workerMetric=None, nettyMetric=None, taskMetric=None,):
self.topology_id = topology_id
self.supervisor_id = supervisor_id
self.port = port
self.workerMetric = workerMetric
self.nettyMetric = nettyMetric
self.taskMetric = taskMetric
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.topology_id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.supervisor_id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.port = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.workerMetric = MetricInfo()
self.workerMetric.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.nettyMetric = NettyMetric()
self.nettyMetric.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.MAP:
self.taskMetric = {}
(_ktype254, _vtype255, _size253 ) = iprot.readMapBegin()
for _i257 in xrange(_size253):
_key258 = iprot.readI32();
_val259 = MetricInfo()
_val259.read(iprot)
self.taskMetric[_key258] = _val259
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('WorkerUploadMetrics')
if self.topology_id is not None:
oprot.writeFieldBegin('topology_id', TType.STRING, 1)
oprot.writeString(self.topology_id.encode('utf-8'))
oprot.writeFieldEnd()
if self.supervisor_id is not None:
oprot.writeFieldBegin('supervisor_id', TType.STRING, 2)
oprot.writeString(self.supervisor_id.encode('utf-8'))
oprot.writeFieldEnd()
if self.port is not None:
oprot.writeFieldBegin('port', TType.I32, 3)
oprot.writeI32(self.port)
oprot.writeFieldEnd()
if self.workerMetric is not None:
oprot.writeFieldBegin('workerMetric', TType.STRUCT, 4)
self.workerMetric.write(oprot)
oprot.writeFieldEnd()
if self.nettyMetric is not None:
oprot.writeFieldBegin('nettyMetric', TType.STRUCT, 5)
self.nettyMetric.write(oprot)
oprot.writeFieldEnd()
if self.taskMetric is not None:
oprot.writeFieldBegin('taskMetric', TType.MAP, 6)
oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.taskMetric))
for kiter260,viter261 in self.taskMetric.items():
oprot.writeI32(kiter260)
viter261.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.topology_id is None:
raise TProtocol.TProtocolException(message='Required field topology_id is unset!')
if self.supervisor_id is None:
raise TProtocol.TProtocolException(message='Required field supervisor_id is unset!')
if self.port is None:
raise TProtocol.TProtocolException(message='Required field port is unset!')
if self.workerMetric is None:
raise TProtocol.TProtocolException(message='Required field workerMetric is unset!')
if self.nettyMetric is None:
raise TProtocol.TProtocolException(message='Required field nettyMetric is unset!')
if self.taskMetric is None:
raise TProtocol.TProtocolException(message='Required field taskMetric is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.topology_id)
value = (value * 31) ^ hash(self.supervisor_id)
value = (value * 31) ^ hash(self.port)
value = (value * 31) ^ hash(self.workerMetric)
value = (value * 31) ^ hash(self.nettyMetric)
value = (value * 31) ^ hash(self.taskMetric)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DRPCRequest:
"""
Attributes:
- func_args
- request_id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'func_args', None, None, ), # 1
(2, TType.STRING, 'request_id', None, None, ), # 2
)
def __init__(self, func_args=None, request_id=None,):
self.func_args = func_args
self.request_id = request_id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.func_args = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.request_id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DRPCRequest')
if self.func_args is not None:
oprot.writeFieldBegin('func_args', TType.STRING, 1)
oprot.writeString(self.func_args.encode('utf-8'))
oprot.writeFieldEnd()
if self.request_id is not None:
oprot.writeFieldBegin('request_id', TType.STRING, 2)
oprot.writeString(self.request_id.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.func_args is None:
raise TProtocol.TProtocolException(message='Required field func_args is unset!')
if self.request_id is None:
raise TProtocol.TProtocolException(message='Required field request_id is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.func_args)
value = (value * 31) ^ hash(self.request_id)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DRPCExecutionException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DRPCExecutionException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
toddpalino/kafka-tools
|
refs/heads/master
|
tests/tools/client/test_configuration.py
|
1
|
import os
import ssl
import unittest
from mock import MagicMock, patch, call
from kafka.tools.configuration import ClientConfiguration, eval_boolean, check_file_access
from kafka.tools.exceptions import ConfigurationError
class ConfigurationTests(unittest.TestCase):
def test_eval_boolean(self):
assert eval_boolean(True)
assert not eval_boolean(False)
assert eval_boolean(1)
assert not eval_boolean(0)
assert eval_boolean('True')
assert not eval_boolean('False')
@patch('kafka.tools.configuration.os.access')
def test_check_file_access(self, mock_access):
mock_access.side_effect = [True, False]
check_file_access('file1')
self.assertRaises(ConfigurationError, check_file_access, 'file2')
mock_access.assert_has_calls([call('file1', os.R_OK), call('file2', os.R_OK)])
def test_create(self):
config = ClientConfiguration()
assert config.ssl_context is None
def test_create_both_zk_and_hosts(self):
self.assertRaises(ConfigurationError, ClientConfiguration, zkconnect='foo', broker_list='bar')
def test_create_invalid_name(self):
self.assertRaises(ConfigurationError, ClientConfiguration, invalidconfig='foo')
def test_client_id(self):
config = ClientConfiguration(client_id="testid")
assert config.client_id == "testid"
self.assertRaises(TypeError, ClientConfiguration, client_id=1)
self.assertRaises(TypeError, ClientConfiguration, client_id=None)
def test_metadata_refresh(self):
config = ClientConfiguration(metadata_refresh=2345)
assert config.metadata_refresh == 2345
self.assertRaises(TypeError, ClientConfiguration, metadata_refresh='foo')
self.assertRaises(TypeError, ClientConfiguration, metadata_refresh=-1)
def test_max_request_size(self):
config = ClientConfiguration(max_request_size=2345)
assert config.max_request_size == 2345
self.assertRaises(TypeError, ClientConfiguration, max_request_size='foo')
self.assertRaises(TypeError, ClientConfiguration, max_request_size=-1)
def test_num_retries(self):
config = ClientConfiguration(num_retries=5)
assert config.num_retries == 5
self.assertRaises(TypeError, ClientConfiguration, num_retries='foo')
self.assertRaises(TypeError, ClientConfiguration, num_retries=-1)
def test_retry_backoff(self):
config = ClientConfiguration(retry_backoff=5.4)
assert config.retry_backoff == 5.4
self.assertRaises(TypeError, ClientConfiguration, retry_backoff='foo')
self.assertRaises(TypeError, ClientConfiguration, retry_backoff=-1)
def test_broker_threads(self):
config = ClientConfiguration(broker_threads=31)
assert config.broker_threads == 31
self.assertRaises(TypeError, ClientConfiguration, broker_threads='foo')
self.assertRaises(TypeError, ClientConfiguration, broker_threads=-1)
def test_broker_list(self):
config = ClientConfiguration(broker_list='broker1.example.com:9091,broker2.example.com:9092')
assert config.broker_list == [('broker1.example.com', 9091), ('broker2.example.com', 9092)]
self.assertRaises(TypeError, ClientConfiguration, broker_list=1)
def test_zkconnect(self):
config = ClientConfiguration(zkconnect='zk.example.com:2181/kafka-cluster')
assert config.zkconnect == 'zk.example.com:2181/kafka-cluster'
self.assertRaises(TypeError, ClientConfiguration, zkconnect=1)
def test_verify_certificates(self):
config = ClientConfiguration(tls_verify_certificates=True)
assert config.tls_verify_certificates
config = ClientConfiguration(tls_verify_certificates=False)
assert not config.tls_verify_certificates
def test_verify_hostnames(self):
config = ClientConfiguration(tls_verify_hostnames=True)
assert config.tls_verify_hostnames
config = ClientConfiguration(tls_verify_hostnames=False)
assert not config.tls_verify_hostnames
@patch('kafka.tools.configuration.check_file_access')
def test_root_certificates(self, mock_access):
mock_access.return_value = True
config = ClientConfiguration(tls_root_certificates='filename')
assert config.tls_root_certificates == 'filename'
mock_access.assert_called_once_with('filename')
@patch('kafka.tools.configuration.check_file_access')
def test_client_certificate(self, mock_access):
mock_access.return_value = True
config = ClientConfiguration(tls_client_certificate='filename')
assert config.tls_client_certificate == 'filename'
mock_access.assert_called_once_with('filename')
@patch('kafka.tools.configuration.check_file_access')
def test_client_keyfile(self, mock_access):
mock_access.return_value = True
config = ClientConfiguration(tls_client_keyfile='filename')
assert config.tls_client_keyfile == 'filename'
mock_access.assert_called_once_with('filename')
def test_client_key_password(self):
def testfunc():
return 'foo'
config = ClientConfiguration(tls_client_key_password_callback=testfunc)
assert config.tls_client_key_password_callback == testfunc
self.assertRaises(TypeError, ClientConfiguration, tls_client_key_password_callback='notcallable')
def test_verify_ssl_configuration(self):
config = ClientConfiguration(tls_verify_certificates=False, tls_verify_hostnames=True)
self.assertRaises(ConfigurationError, config._verify_ssl_configuration)
config.tls_verify_certificates = True
config._verify_ssl_configuration()
def test_enable_tls_default(self):
config = ClientConfiguration(enable_tls=True)
assert isinstance(config.ssl_context, ssl.SSLContext)
assert config.ssl_context.protocol == ssl.PROTOCOL_SSLv23
assert config.ssl_context.verify_mode == ssl.CERT_REQUIRED
assert config.ssl_context.check_hostname is True
@patch('kafka.tools.configuration.ssl.SSLContext')
@patch('kafka.tools.configuration.check_file_access')
def test_enable_tls_custom_certs(self, mock_access, mock_ssl):
def testfunc():
return 'foo'
config = ClientConfiguration(enable_tls=True,
tls_verify_certificates=False,
tls_verify_hostnames=False,
tls_root_certificates='example_root_cert_file',
tls_client_certificate='example_client_cert_file',
tls_client_keyfile='example_client_key_file',
tls_client_key_password_callback=testfunc)
mock_ssl.assert_called_once_with(ssl.PROTOCOL_SSLv23)
assert config.ssl_context.verify_mode == ssl.CERT_NONE
assert config.ssl_context.check_hostname is False
config.ssl_context.load_verify_locations.assert_called_once_with(cafile='example_root_cert_file')
config.ssl_context.load_cert_chain.assert_called_once_with('example_client_cert_file',
keyfile='example_client_key_file',
password=testfunc)
@patch('kafka.tools.configuration.ssl.SSLContext')
@patch('kafka.tools.configuration.check_file_access')
def test_enable_tls_error(self, mock_access, mock_ssl):
def testfunc():
return 'foo'
ssl_instance = MagicMock()
ssl_instance.load_cert_chain.side_effect = ssl.SSLError
mock_ssl.return_value = ssl_instance
self.assertRaises(ConfigurationError, ClientConfiguration, enable_tls=True, tls_verify_certificates=False,
tls_verify_hostnames=False, tls_root_certificates='example_root_cert_file',
tls_client_certificate='example_client_cert_file',
tls_client_keyfile='example_client_key_file', tls_client_key_password_callback=testfunc)
|
radekg/mesos-toolbox
|
refs/heads/master
|
lib/configs/vagrant_config.py
|
2
|
import hashlib, os, sys, time
from lib.utils import Utils
from lib.configs.defaults import Defaults
class VagrantConfigMeta(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(VagrantConfigMeta, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class VagrantConfig(object):
__metaclass__ = VagrantConfigMeta
@staticmethod
def setup(program):
from lib.config import Config
Config.add_argument( "command",
help="Command to execute.",
metavar="COMMAND",
default="",
choices=[ "destroy-f", "halt", "provision",
"resume", "ssh", "status", "suspend", "up",
"check-this-system" ] )
Config.add_argument( "--mesos-build",
dest="mesos_build",
help="Mesos build to use for Vagrant cluster.",
metavar="MESOS_BUILD",
default=Utils.env_with_default("MESOS_BUILD","") )
Config.add_argument( "--marathon-build",
dest="marathon_build",
help="Marathon build to use for Vagrant cluster.",
metavar="MARATHON_BUILD",
default=Utils.env_with_default("MARATHON_BUILD","") )
Config.add_argument( "--mesos-packages-dir",
dest="mesos_packages_dir",
help="Directory in which packaged versions of Mesos are stored.",
metavar="MESOS_PACKAGES_DIR",
default=Utils.env_with_default("MESOS_PACKAGES_DIR", Defaults.mesos_packages_dir() ) )
Config.add_argument( "--marathon-packages-dir",
dest="marathon_packages_dir",
help="Directory in which packaged versions of Mesos are stored.",
metavar="MARATHON_PACKAGES_DIR",
default=Utils.env_with_default("MARATHON_PACKAGES_DIR", Defaults.marathon_packages_dir() ) )
Config.add_argument( "--os",
dest="operating_system",
help="Operating system to build mesos for.",
metavar="OPERATING_SYSTEM",
default=Utils.env_with_default("OPERATING_SYSTEM","") )
Config.add_argument( "--docker-templates",
dest="docker_templates_dir",
help="Docker templates base directory.",
metavar="DOCKER_TEMPLATES_DIR",
default=Utils.env_with_default("DOCKER_TEMPLATES_DIR", "{}/docker/mesos".format(
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
)))
Config.add_argument( "--machine",
dest="machine",
help="Optional machine for the Vagrant command.",
metavar="MACHINE",
default=Utils.env_with_default("MACHINE","") )
Config.add_argument( "--deployment-name",
dest="deployment_name",
help="Deployment name.",
metavar="DEPLOYMENT_NAME",
default=Utils.env_with_default("DEPLOYMENT_NAME","vagrant") )
Config.add_argument( "--consensus-ips",
dest="consensus_ips",
help="Consensus servers IP addresses. Comma delimited list of addresses to give to consensus servers.",
metavar="CONSENSUS_IPS",
default=Utils.env_with_default("CONSENSUS_IPS","192.168.33.99") )
Config.add_argument( "--master-ips",
dest="master_ips",
help="Master IP addresses. Comma delimited list of addresses to give to masters.",
metavar="MASTER_IPS",
default=Utils.env_with_default("MASTER_IPS","192.168.33.100") )
Config.add_argument( "--agent-ips",
dest="agent_ips",
help="Agent IP addresses. Comma delimited list of addresses to give to agents.",
metavar="AGENT_IPSS",
default=Utils.env_with_default("AGENT_IPS","192.168.33.101,192.168.33.102") )
Config.add_argument( "--consensus-memory",
dest="consensus_memory",
help="Consensus machine RAM amount.",
metavar="CONSENSUS_MEMORY",
default=Utils.env_with_default("CONSENSUS_MEMORY","512") )
Config.add_argument( "--master-memory",
dest="master_memory",
help="Master RAM amount.",
metavar="MASTER_MEMORY",
default=Utils.env_with_default("MASTER_MEMORY","1024") )
Config.add_argument( "--agent-memory",
dest="agent_memory",
help="Agent RAM amount.",
metavar="AGENT_MEMORY",
default=Utils.env_with_default("AGENT_MEMORY","2048") )
return Config.ready(program)
@staticmethod
def command():
from lib.config import Config
return Config.args().command
@staticmethod
def mesos_build():
from lib.config import Config
return Config.args().mesos_build
@staticmethod
def mesos_packages_dir():
from lib.config import Config
path = "{}/".format(Config.args().mesos_packages_dir)
Utils.cmd("mkdir -p {}".format(path))
return path
@staticmethod
def marathon_build():
from lib.config import Config
return Config.args().marathon_build
@staticmethod
def marathon_packages_dir():
from lib.config import Config
path = "{}/".format(Config.args().marathon_packages_dir)
Utils.cmd("mkdir -p {}".format(path))
return path
@staticmethod
def operating_system():
from lib.config import Config
return Config.args().operating_system
@staticmethod
def docker_templates_dir():
from lib.config import Config
return Config.args().docker_templates_dir
@staticmethod
def machine():
from lib.config import Config
return Config.args().machine
@staticmethod
def deployment_name():
from lib.config import Config
return Config.args().deployment_name
@staticmethod
def consensus_ips():
from lib.config import Config
return Config.args().consensus_ips
@staticmethod
def master_ips():
from lib.config import Config
return Config.args().master_ips
@staticmethod
def agent_ips():
from lib.config import Config
return Config.args().agent_ips
@staticmethod
def consensus_memory():
from lib.config import Config
return Config.args().consensus_memory
@staticmethod
def master_memory():
from lib.config import Config
return Config.args().master_memory
@staticmethod
def agent_memory():
from lib.config import Config
return Config.args().agent_memory
@staticmethod
def supported_operating_systems():
from lib.config import Config
return Utils.list_supported_operating_systems(Config.args().docker_templates_dir)
|
matrix-org/synapse
|
refs/heads/master
|
synapse/util/caches/lrucache.py
|
1
|
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from functools import wraps
from typing import (
Any,
Callable,
Collection,
Generic,
Iterable,
List,
Optional,
Type,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import Literal
from synapse.config import cache as cache_config
from synapse.util import caches
from synapse.util.caches import CacheMetric, register_cache
from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
try:
from pympler.asizeof import Asizer
def _get_size_of(val: Any, *, recurse=True) -> int:
"""Get an estimate of the size in bytes of the object.
Args:
val: The object to size.
recurse: If true will include referenced values in the size,
otherwise only sizes the given object.
"""
# Ignore singleton values when calculating memory usage.
if val in ((), None, ""):
return 0
sizer = Asizer()
sizer.exclude_refs((), None, "")
return sizer.asizeof(val, limit=100 if recurse else 0)
except ImportError:
def _get_size_of(val: Any, *, recurse=True) -> int:
return 0
# Function type: the type used for invalidation callbacks
FT = TypeVar("FT", bound=Callable[..., Any])
# Key and Value type for the cache
KT = TypeVar("KT")
VT = TypeVar("VT")
# a general type var, distinct from either KT or VT
T = TypeVar("T")
def enumerate_leaves(node, depth):
if depth == 0:
yield node
else:
for n in node.values():
for m in enumerate_leaves(n, depth - 1):
yield m
class _Node:
__slots__ = ["prev_node", "next_node", "key", "value", "callbacks", "memory"]
def __init__(
self,
prev_node,
next_node,
key,
value,
callbacks: Collection[Callable[[], None]] = (),
):
self.prev_node = prev_node
self.next_node = next_node
self.key = key
self.value = value
# Set of callbacks to run when the node gets deleted. We store as a list
# rather than a set to keep memory usage down (and since we expect few
# entries per node, the performance of checking for duplication in a
# list vs using a set is negligible).
#
# Note that we store this as an optional list to keep the memory
# footprint down. Storing `None` is free as its a singleton, while empty
# lists are 56 bytes (and empty sets are 216 bytes, if we did the naive
# thing and used sets).
self.callbacks = None # type: Optional[List[Callable[[], None]]]
self.add_callbacks(callbacks)
self.memory = 0
if caches.TRACK_MEMORY_USAGE:
self.memory = (
_get_size_of(key)
+ _get_size_of(value)
+ _get_size_of(self.callbacks, recurse=False)
+ _get_size_of(self, recurse=False)
)
self.memory += _get_size_of(self.memory, recurse=False)
def add_callbacks(self, callbacks: Collection[Callable[[], None]]) -> None:
"""Add to stored list of callbacks, removing duplicates."""
if not callbacks:
return
if not self.callbacks:
self.callbacks = []
for callback in callbacks:
if callback not in self.callbacks:
self.callbacks.append(callback)
def run_and_clear_callbacks(self) -> None:
"""Run all callbacks and clear the stored list of callbacks. Used when
the node is being deleted.
"""
if not self.callbacks:
return
for callback in self.callbacks:
callback()
self.callbacks = None
class LruCache(Generic[KT, VT]):
"""
Least-recently-used cache, supporting prometheus metrics and invalidation callbacks.
If cache_type=TreeCache, all keys must be tuples.
"""
def __init__(
self,
max_size: int,
cache_name: Optional[str] = None,
cache_type: Type[Union[dict, TreeCache]] = dict,
size_callback: Optional[Callable] = None,
metrics_collection_callback: Optional[Callable[[], None]] = None,
apply_cache_factor_from_config: bool = True,
):
"""
Args:
max_size: The maximum amount of entries the cache can hold
cache_name: The name of this cache, for the prometheus metrics. If unset,
no metrics will be reported on this cache.
cache_type (type):
type of underlying cache to be used. Typically one of dict
or TreeCache.
size_callback (func(V) -> int | None):
metrics_collection_callback:
metrics collection callback. This is called early in the metrics
collection process, before any of the metrics registered with the
prometheus Registry are collected, so can be used to update any dynamic
metrics.
Ignored if cache_name is None.
apply_cache_factor_from_config (bool): If true, `max_size` will be
multiplied by a cache factor derived from the homeserver config
"""
cache = cache_type()
self.cache = cache # Used for introspection.
self.apply_cache_factor_from_config = apply_cache_factor_from_config
# Save the original max size, and apply the default size factor.
self._original_max_size = max_size
# We previously didn't apply the cache factor here, and as such some caches were
# not affected by the global cache factor. Add an option here to disable applying
# the cache factor when a cache is created
if apply_cache_factor_from_config:
self.max_size = int(max_size * cache_config.properties.default_factor_size)
else:
self.max_size = int(max_size)
# register_cache might call our "set_cache_factor" callback; there's nothing to
# do yet when we get resized.
self._on_resize = None # type: Optional[Callable[[],None]]
if cache_name is not None:
metrics = register_cache(
"lru_cache",
cache_name,
self,
collect_callback=metrics_collection_callback,
) # type: Optional[CacheMetric]
else:
metrics = None
# this is exposed for access from outside this class
self.metrics = metrics
list_root = _Node(None, None, None, None)
list_root.next_node = list_root
list_root.prev_node = list_root
lock = threading.Lock()
def evict():
while cache_len() > self.max_size:
todelete = list_root.prev_node
evicted_len = delete_node(todelete)
cache.pop(todelete.key, None)
if metrics:
metrics.inc_evictions(evicted_len)
def synchronized(f: FT) -> FT:
@wraps(f)
def inner(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return cast(FT, inner)
cached_cache_len = [0]
if size_callback is not None:
def cache_len():
return cached_cache_len[0]
else:
def cache_len():
return len(cache)
self.len = synchronized(cache_len)
def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()):
prev_node = list_root
next_node = prev_node.next_node
node = _Node(prev_node, next_node, key, value, callbacks)
prev_node.next_node = node
next_node.prev_node = node
cache[key] = node
if size_callback:
cached_cache_len[0] += size_callback(node.value)
if caches.TRACK_MEMORY_USAGE and metrics:
metrics.inc_memory_usage(node.memory)
def move_node_to_front(node):
prev_node = node.prev_node
next_node = node.next_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
prev_node = list_root
next_node = prev_node.next_node
node.prev_node = prev_node
node.next_node = next_node
prev_node.next_node = node
next_node.prev_node = node
def delete_node(node):
prev_node = node.prev_node
next_node = node.next_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
deleted_len = 1
if size_callback:
deleted_len = size_callback(node.value)
cached_cache_len[0] -= deleted_len
node.run_and_clear_callbacks()
if caches.TRACK_MEMORY_USAGE and metrics:
metrics.dec_memory_usage(node.memory)
return deleted_len
@overload
def cache_get(
key: KT,
default: Literal[None] = None,
callbacks: Collection[Callable[[], None]] = ...,
update_metrics: bool = ...,
) -> Optional[VT]:
...
@overload
def cache_get(
key: KT,
default: T,
callbacks: Collection[Callable[[], None]] = ...,
update_metrics: bool = ...,
) -> Union[T, VT]:
...
@synchronized
def cache_get(
key: KT,
default: Optional[T] = None,
callbacks: Collection[Callable[[], None]] = (),
update_metrics: bool = True,
):
node = cache.get(key, None)
if node is not None:
move_node_to_front(node)
node.add_callbacks(callbacks)
if update_metrics and metrics:
metrics.inc_hits()
return node.value
else:
if update_metrics and metrics:
metrics.inc_misses()
return default
@synchronized
def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()):
node = cache.get(key, None)
if node is not None:
# We sometimes store large objects, e.g. dicts, which cause
# the inequality check to take a long time. So let's only do
# the check if we have some callbacks to call.
if value != node.value:
node.run_and_clear_callbacks()
# We don't bother to protect this by value != node.value as
# generally size_callback will be cheap compared with equality
# checks. (For example, taking the size of two dicts is quicker
# than comparing them for equality.)
if size_callback:
cached_cache_len[0] -= size_callback(node.value)
cached_cache_len[0] += size_callback(value)
node.add_callbacks(callbacks)
move_node_to_front(node)
node.value = value
else:
add_node(key, value, set(callbacks))
evict()
@synchronized
def cache_set_default(key: KT, value: VT) -> VT:
node = cache.get(key, None)
if node is not None:
return node.value
else:
add_node(key, value)
evict()
return value
@overload
def cache_pop(key: KT, default: Literal[None] = None) -> Optional[VT]:
...
@overload
def cache_pop(key: KT, default: T) -> Union[T, VT]:
...
@synchronized
def cache_pop(key: KT, default: Optional[T] = None):
node = cache.get(key, None)
if node:
delete_node(node)
cache.pop(node.key, None)
return node.value
else:
return default
@synchronized
def cache_del_multi(key: KT) -> None:
"""Delete an entry, or tree of entries
If the LruCache is backed by a regular dict, then "key" must be of
the right type for this cache
If the LruCache is backed by a TreeCache, then "key" must be a tuple, but
may be of lower cardinality than the TreeCache - in which case the whole
subtree is deleted.
"""
popped = cache.pop(key, None)
if popped is None:
return
# for each deleted node, we now need to remove it from the linked list
# and run its callbacks.
for leaf in iterate_tree_cache_entry(popped):
delete_node(leaf)
@synchronized
def cache_clear() -> None:
list_root.next_node = list_root
list_root.prev_node = list_root
for node in cache.values():
node.run_and_clear_callbacks()
cache.clear()
if size_callback:
cached_cache_len[0] = 0
if caches.TRACK_MEMORY_USAGE and metrics:
metrics.clear_memory_usage()
@synchronized
def cache_contains(key: KT) -> bool:
return key in cache
self.sentinel = object()
# make sure that we clear out any excess entries after we get resized.
self._on_resize = evict
self.get = cache_get
self.set = cache_set
self.setdefault = cache_set_default
self.pop = cache_pop
self.del_multi = cache_del_multi
# `invalidate` is exposed for consistency with DeferredCache, so that it can be
# invalidated by the cache invalidation replication stream.
self.invalidate = cache_del_multi
self.len = synchronized(cache_len)
self.contains = cache_contains
self.clear = cache_clear
def __getitem__(self, key):
result = self.get(key, self.sentinel)
if result is self.sentinel:
raise KeyError()
else:
return result
def __setitem__(self, key, value):
self.set(key, value)
def __delitem__(self, key, value):
result = self.pop(key, self.sentinel)
if result is self.sentinel:
raise KeyError()
def __len__(self):
return self.len()
def __contains__(self, key):
return self.contains(key)
def set_cache_factor(self, factor: float) -> bool:
"""
Set the cache factor for this individual cache.
This will trigger a resize if it changes, which may require evicting
items from the cache.
Returns:
bool: Whether the cache changed size or not.
"""
if not self.apply_cache_factor_from_config:
return False
new_size = int(self._original_max_size * factor)
if new_size != self.max_size:
self.max_size = new_size
if self._on_resize:
self._on_resize()
return True
return False
|
dessHub/bc-14-online-store-application
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pymongo/pool.py
|
17
|
# Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import contextlib
import os
import platform
import socket
import sys
import threading
try:
import ssl
from ssl import SSLError
_HAVE_SNI = getattr(ssl, 'HAS_SNI', False)
except ImportError:
_HAVE_SNI = False
class SSLError(socket.error):
pass
from bson import DEFAULT_CODEC_OPTIONS
from bson.py3compat import imap, itervalues, _unicode
from bson.son import SON
from pymongo import auth, helpers, thread_util, __version__
from pymongo.common import MAX_MESSAGE_SIZE
from pymongo.errors import (AutoReconnect,
ConnectionFailure,
ConfigurationError,
DocumentTooLarge,
NetworkTimeout,
NotMasterError,
OperationFailure)
from pymongo.ismaster import IsMaster
from pymongo.monotonic import time as _time
from pymongo.network import (command,
receive_message,
SocketChecker)
from pymongo.read_concern import DEFAULT_READ_CONCERN
from pymongo.read_preferences import ReadPreference
from pymongo.server_type import SERVER_TYPE
# Always use our backport so we always have support for IP address matching
from pymongo.ssl_match_hostname import match_hostname, CertificateError
# For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are
# not permitted for SNI hostname.
try:
from ipaddress import ip_address
def is_ip_address(address):
try:
ip_address(_unicode(address))
return True
except (ValueError, UnicodeError):
return False
except ImportError:
if hasattr(socket, 'inet_pton') and socket.has_ipv6:
# Most *nix, recent Windows
def is_ip_address(address):
try:
# inet_pton rejects IPv4 literals with leading zeros
# (e.g. 192.168.0.01), inet_aton does not, and we
# can connect to them without issue. Use inet_aton.
socket.inet_aton(address)
return True
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, address)
return True
except socket.error:
return False
else:
# No inet_pton
def is_ip_address(address):
try:
socket.inet_aton(address)
return True
except socket.error:
if ':' in address:
# ':' is not a valid character for a hostname. If we get
# here a few things have to be true:
# - We're on a recent version of python 2.7 (2.7.9+).
# 2.6 and older 2.7 versions don't support SNI.
# - We're on Windows XP or some unusual Unix that doesn't
# have inet_pton.
# - The application is using IPv6 literals with TLS, which
# is pretty unusual.
return True
return False
try:
from fcntl import fcntl, F_GETFD, F_SETFD, FD_CLOEXEC
def _set_non_inheritable_non_atomic(fd):
"""Set the close-on-exec flag on the given file descriptor."""
flags = fcntl(fd, F_GETFD)
fcntl(fd, F_SETFD, flags | FD_CLOEXEC)
except ImportError:
# Windows, various platforms we don't claim to support
# (Jython, IronPython, ...), systems that don't provide
# everything we need from fcntl, etc.
def _set_non_inheritable_non_atomic(dummy):
"""Dummy function for platforms that don't provide fcntl."""
pass
_METADATA = SON([
('driver', SON([('name', 'PyMongo'), ('version', __version__)])),
])
if sys.platform.startswith('linux'):
_METADATA['os'] = SON([
('type', platform.system()),
# Distro name and version (e.g. Ubuntu 16.04 xenial)
('name', ' '.join([part for part in
platform.linux_distribution() if part])),
('architecture', platform.machine()),
# Kernel version (e.g. 4.4.0-17-generic).
('version', platform.release())
])
elif sys.platform == 'darwin':
_METADATA['os'] = SON([
('type', platform.system()),
('name', platform.system()),
('architecture', platform.machine()),
# (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin
# kernel version.
('version', platform.mac_ver()[0])
])
elif sys.platform == 'win32':
_METADATA['os'] = SON([
('type', platform.system()),
# "Windows XP", "Windows 7", "Windows 10", etc.
('name', ' '.join((platform.system(), platform.release()))),
('architecture', platform.machine()),
# Windows patch level (e.g. 5.1.2600-SP3)
('version', '-'.join(platform.win32_ver()[1:3]))
])
elif sys.platform.startswith('java'):
_name, _ver, _arch = platform.java_ver()[-1]
_METADATA['os'] = SON([
# Linux, Windows 7, Mac OS X, etc.
('type', _name),
('name', _name),
# x86, x86_64, AMD64, etc.
('architecture', _arch),
# Linux kernel version, OSX version, etc.
('version', _ver)
])
else:
# Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11)
_aliased = platform.system_alias(
platform.system(), platform.release(), platform.version())
_METADATA['os'] = SON([
('type', platform.system()),
('name', ' '.join([part for part in _aliased[:2] if part])),
('architecture', platform.machine()),
('version', _aliased[2])
])
if platform.python_implementation().startswith('PyPy'):
_METADATA['platform'] = ' '.join(
(platform.python_implementation(),
'.'.join(imap(str, sys.pypy_version_info)),
'(Python %s)' % '.'.join(imap(str, sys.version_info))))
elif sys.platform.startswith('java'):
_METADATA['platform'] = ' '.join(
(platform.python_implementation(),
'.'.join(imap(str, sys.version_info)),
'(%s)' % ' '.join((platform.system(), platform.release()))))
else:
_METADATA['platform'] = ' '.join(
(platform.python_implementation(),
'.'.join(imap(str, sys.version_info))))
# If the first getaddrinfo call of this interpreter's life is on a thread,
# while the main thread holds the import lock, getaddrinfo deadlocks trying
# to import the IDNA codec. Import it here, where presumably we're on the
# main thread, to avoid the deadlock. See PYTHON-607.
u'foo'.encode('idna')
def _raise_connection_failure(address, error):
"""Convert a socket.error to ConnectionFailure and raise it."""
host, port = address
# If connecting to a Unix socket, port will be None.
if port is not None:
msg = '%s:%d: %s' % (host, port, error)
else:
msg = '%s: %s' % (host, error)
if isinstance(error, socket.timeout):
raise NetworkTimeout(msg)
elif isinstance(error, SSLError) and 'timed out' in str(error):
# CPython 2.6, 2.7, PyPy 2.x, and PyPy3 do not distinguish network
# timeouts from other SSLErrors (https://bugs.python.org/issue10272).
# Luckily, we can work around this limitation because the phrase
# 'timed out' appears in all the timeout related SSLErrors raised
# on the above platforms. CPython >= 3.2 and PyPy3.3 correctly raise
# socket.timeout.
raise NetworkTimeout(msg)
else:
raise AutoReconnect(msg)
class PoolOptions(object):
__slots__ = ('__max_pool_size', '__min_pool_size', '__max_idle_time_ms',
'__connect_timeout', '__socket_timeout',
'__wait_queue_timeout', '__wait_queue_multiple',
'__ssl_context', '__ssl_match_hostname', '__socket_keepalive',
'__event_listeners', '__appname', '__metadata')
def __init__(self, max_pool_size=100, min_pool_size=0,
max_idle_time_ms=None, connect_timeout=None,
socket_timeout=None, wait_queue_timeout=None,
wait_queue_multiple=None, ssl_context=None,
ssl_match_hostname=True, socket_keepalive=False,
event_listeners=None, appname=None):
self.__max_pool_size = max_pool_size
self.__min_pool_size = min_pool_size
self.__max_idle_time_ms = max_idle_time_ms
self.__connect_timeout = connect_timeout
self.__socket_timeout = socket_timeout
self.__wait_queue_timeout = wait_queue_timeout
self.__wait_queue_multiple = wait_queue_multiple
self.__ssl_context = ssl_context
self.__ssl_match_hostname = ssl_match_hostname
self.__socket_keepalive = socket_keepalive
self.__event_listeners = event_listeners
self.__appname = appname
self.__metadata = _METADATA.copy()
if appname:
self.__metadata['application'] = {'name': appname}
@property
def max_pool_size(self):
"""The maximum allowable number of concurrent connections to each
connected server. Requests to a server will block if there are
`maxPoolSize` outstanding connections to the requested server.
Defaults to 100. Cannot be 0.
When a server's pool has reached `max_pool_size`, operations for that
server block waiting for a socket to be returned to the pool. If
``waitQueueTimeoutMS`` is set, a blocked operation will raise
:exc:`~pymongo.errors.ConnectionFailure` after a timeout.
By default ``waitQueueTimeoutMS`` is not set.
"""
return self.__max_pool_size
@property
def min_pool_size(self):
"""The minimum required number of concurrent connections that the pool
will maintain to each connected server. Default is 0.
"""
return self.__min_pool_size
@property
def max_idle_time_ms(self):
"""The maximum number of milliseconds that a connection can remain
idle in the pool before being removed and replaced. Defaults to
`None` (no limit).
"""
return self.__max_idle_time_ms
@property
def connect_timeout(self):
"""How long a connection can take to be opened before timing out.
"""
return self.__connect_timeout
@property
def socket_timeout(self):
"""How long a send or receive on a socket can take before timing out.
"""
return self.__socket_timeout
@property
def wait_queue_timeout(self):
"""How long a thread will wait for a socket from the pool if the pool
has no free sockets.
"""
return self.__wait_queue_timeout
@property
def wait_queue_multiple(self):
"""Multiplied by max_pool_size to give the number of threads allowed
to wait for a socket at one time.
"""
return self.__wait_queue_multiple
@property
def ssl_context(self):
"""An SSLContext instance or None.
"""
return self.__ssl_context
@property
def ssl_match_hostname(self):
"""Call ssl.match_hostname if cert_reqs is not ssl.CERT_NONE.
"""
return self.__ssl_match_hostname
@property
def socket_keepalive(self):
"""Whether to send periodic messages to determine if a connection
is closed.
"""
return self.__socket_keepalive
@property
def event_listeners(self):
"""An instance of pymongo.monitoring._EventListeners.
"""
return self.__event_listeners
@property
def appname(self):
"""The application name, for sending with ismaster in server handshake.
"""
return self.__appname
@property
def metadata(self):
"""A dict of metadata about the application, driver, os, and platform.
"""
return self.__metadata.copy()
class SocketInfo(object):
"""Store a socket with some metadata.
:Parameters:
- `sock`: a raw socket object
- `pool`: a Pool instance
- `ismaster`: optional IsMaster instance, response to ismaster on `sock`
- `address`: the server's (host, port)
"""
def __init__(self, sock, pool, ismaster, address):
self.sock = sock
self.address = address
self.authset = set()
self.closed = False
self.last_checkout = _time()
self.is_writable = ismaster.is_writable if ismaster else None
self.max_wire_version = ismaster.max_wire_version if ismaster else None
self.max_bson_size = ismaster.max_bson_size if ismaster else None
self.max_message_size = (
ismaster.max_message_size if ismaster else MAX_MESSAGE_SIZE)
self.max_write_batch_size = (
ismaster.max_write_batch_size if ismaster else None)
self.listeners = pool.opts.event_listeners
if ismaster:
self.is_mongos = ismaster.server_type == SERVER_TYPE.Mongos
else:
self.is_mongos = None
# The pool's pool_id changes with each reset() so we can close sockets
# created before the last reset.
self.pool_id = pool.pool_id
def command(self, dbname, spec, slave_ok=False,
read_preference=ReadPreference.PRIMARY,
codec_options=DEFAULT_CODEC_OPTIONS, check=True,
allowable_errors=None, check_keys=False,
read_concern=DEFAULT_READ_CONCERN,
write_concern=None,
parse_write_concern_error=False,
collation=None):
"""Execute a command or raise ConnectionFailure or OperationFailure.
:Parameters:
- `dbname`: name of the database on which to run the command
- `spec`: a command document as a dict, SON, or mapping object
- `slave_ok`: whether to set the SlaveOkay wire protocol bit
- `read_preference`: a read preference
- `codec_options`: a CodecOptions instance
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `check_keys`: if True, check `spec` for invalid keys
- `read_concern`: The read concern for this command.
- `write_concern`: The write concern for this command.
- `parse_write_concern_error`: Whether to parse the
``writeConcernError`` field in the command response.
- `collation`: The collation for this command.
"""
if self.max_wire_version < 4 and not read_concern.ok_for_legacy:
raise ConfigurationError(
'read concern level of %s is not valid '
'with a max wire version of %d.'
% (read_concern.level, self.max_wire_version))
if not (write_concern is None or write_concern.acknowledged or
collation is None):
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
if self.max_wire_version >= 5 and write_concern:
spec['writeConcern'] = write_concern.document
elif self.max_wire_version < 5 and collation is not None:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use a collation.')
try:
return command(self.sock, dbname, spec, slave_ok,
self.is_mongos, read_preference, codec_options,
check, allowable_errors, self.address,
check_keys, self.listeners, self.max_bson_size,
read_concern,
parse_write_concern_error=parse_write_concern_error,
collation=collation)
except OperationFailure:
raise
# Catch socket.error, KeyboardInterrupt, etc. and close ourselves.
except BaseException as error:
self._raise_connection_failure(error)
def send_message(self, message, max_doc_size):
"""Send a raw BSON message or raise ConnectionFailure.
If a network exception is raised, the socket is closed.
"""
if (self.max_bson_size is not None
and max_doc_size > self.max_bson_size):
raise DocumentTooLarge(
"BSON document too large (%d bytes) - the connected server "
"supports BSON document sizes up to %d bytes." %
(max_doc_size, self.max_bson_size))
try:
self.sock.sendall(message)
except BaseException as error:
self._raise_connection_failure(error)
def receive_message(self, operation, request_id):
"""Receive a raw BSON message or raise ConnectionFailure.
If any exception is raised, the socket is closed.
"""
try:
return receive_message(
self.sock, operation, request_id, self.max_message_size)
except BaseException as error:
self._raise_connection_failure(error)
def legacy_write(self, request_id, msg, max_doc_size, with_last_error):
"""Send OP_INSERT, etc., optionally returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, an OP_INSERT, OP_UPDATE, or OP_DELETE message,
perhaps with a getlasterror command appended.
- `max_doc_size`: size in bytes of the largest document in `msg`.
- `with_last_error`: True if a getlasterror command is appended.
"""
if not with_last_error and not self.is_writable:
# Write won't succeed, bail as if we'd done a getlasterror.
raise NotMasterError("not master")
self.send_message(msg, max_doc_size)
if with_last_error:
response = self.receive_message(1, request_id)
return helpers._check_gle_response(response)
def write_command(self, request_id, msg):
"""Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message.
"""
self.send_message(msg, 0)
response = helpers._unpack_response(self.receive_message(1, request_id))
assert response['number_returned'] == 1
result = response['data'][0]
# Raises NotMasterError or OperationFailure.
helpers._check_command_response(result)
return result
def check_auth(self, all_credentials):
"""Update this socket's authentication.
Log in or out to bring this socket's credentials up to date with
those provided. Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `all_credentials`: dict, maps auth source to MongoCredential.
"""
if all_credentials or self.authset:
cached = set(itervalues(all_credentials))
authset = self.authset.copy()
# Logout any credentials that no longer exist in the cache.
for credentials in authset - cached:
auth.logout(credentials.source, self)
self.authset.discard(credentials)
for credentials in cached - authset:
auth.authenticate(credentials, self)
self.authset.add(credentials)
def authenticate(self, credentials):
"""Log in to the server and store these credentials in `authset`.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `credentials`: A MongoCredential.
"""
auth.authenticate(credentials, self)
self.authset.add(credentials)
def close(self):
self.closed = True
# Avoid exceptions on interpreter shutdown.
try:
self.sock.close()
except:
pass
def _raise_connection_failure(self, error):
# Catch *all* exceptions from socket methods and close the socket. In
# regular Python, socket operations only raise socket.error, even if
# the underlying cause was a Ctrl-C: a signal raised during socket.recv
# is expressed as an EINTR error from poll. See internal_select_ex() in
# socketmodule.c. All error codes from poll become socket.error at
# first. Eventually in PyEval_EvalFrameEx the interpreter checks for
# signals and throws KeyboardInterrupt into the current frame on the
# main thread.
#
# But in Gevent and Eventlet, the polling mechanism (epoll, kqueue,
# ...) is called in Python code, which experiences the signal as a
# KeyboardInterrupt from the start, rather than as an initial
# socket.error, so we catch that, close the socket, and reraise it.
self.close()
if isinstance(error, socket.error):
_raise_connection_failure(self.address, error)
else:
raise error
def __eq__(self, other):
return self.sock == other.sock
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.sock)
def __repr__(self):
return "SocketInfo(%s)%s at %s" % (
repr(self.sock),
self.closed and " CLOSED" or "",
id(self)
)
def _create_connection(address, options):
"""Given (host, port) and PoolOptions, connect and return a socket object.
Can raise socket.error.
This is a modified version of create_connection from CPython >= 2.6.
"""
host, port = address
# Check if dealing with a unix domain socket
if host.endswith('.sock'):
if not hasattr(socket, "AF_UNIX"):
raise ConnectionFailure("UNIX-sockets are not supported "
"on this system")
sock = socket.socket(socket.AF_UNIX)
# SOCK_CLOEXEC not supported for Unix sockets.
_set_non_inheritable_non_atomic(sock.fileno())
try:
sock.connect(host)
return sock
except socket.error:
sock.close()
raise
# Don't try IPv6 if we don't support it. Also skip it if host
# is 'localhost' (::1 is fine). Avoids slow connect issues
# like PYTHON-356.
family = socket.AF_INET
if socket.has_ipv6 and host != 'localhost':
family = socket.AF_UNSPEC
err = None
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, dummy, sa = res
# SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited
# number of platforms (newer Linux and *BSD). Starting with CPython 3.4
# all file descriptors are created non-inheritable. See PEP 446.
try:
sock = socket.socket(
af, socktype | getattr(socket, 'SOCK_CLOEXEC', 0), proto)
except socket.error:
# Can SOCK_CLOEXEC be defined even if the kernel doesn't support
# it?
sock = socket.socket(af, socktype, proto)
# Fallback when SOCK_CLOEXEC isn't available.
_set_non_inheritable_non_atomic(sock.fileno())
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.settimeout(options.connect_timeout)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE,
options.socket_keepalive)
sock.connect(sa)
return sock
except socket.error as e:
err = e
sock.close()
if err is not None:
raise err
else:
# This likely means we tried to connect to an IPv6 only
# host with an OS/kernel or Python interpreter that doesn't
# support IPv6. The test case is Jython2.5.1 which doesn't
# support IPv6 at all.
raise socket.error('getaddrinfo failed')
def _configured_socket(address, options):
"""Given (host, port) and PoolOptions, return a configured socket.
Can raise socket.error, ConnectionFailure, or CertificateError.
Sets socket's SSL and timeout options.
"""
sock = _create_connection(address, options)
ssl_context = options.ssl_context
if ssl_context is not None:
host = address[0]
try:
# According to RFC6066, section 3, IPv4 and IPv6 literals are
# not permitted for SNI hostname.
if _HAVE_SNI and not is_ip_address(host):
sock = ssl_context.wrap_socket(sock, server_hostname=host)
else:
sock = ssl_context.wrap_socket(sock)
except IOError as exc:
sock.close()
raise ConnectionFailure("SSL handshake failed: %s" % (str(exc),))
if ssl_context.verify_mode and options.ssl_match_hostname:
try:
match_hostname(sock.getpeercert(), hostname=host)
except CertificateError:
sock.close()
raise
sock.settimeout(options.socket_timeout)
return sock
# Do *not* explicitly inherit from object or Jython won't call __del__
# http://bugs.jython.org/issue1057
class Pool:
def __init__(self, address, options, handshake=True):
"""
:Parameters:
- `address`: a (hostname, port) tuple
- `options`: a PoolOptions instance
- `handshake`: whether to call ismaster for each new SocketInfo
"""
# Check a socket's health with socket_closed() every once in a while.
# Can override for testing: 0 to always check, None to never check.
self._check_interval_seconds = 1
self.sockets = set()
self.lock = threading.Lock()
self.active_sockets = 0
# Keep track of resets, so we notice sockets created before the most
# recent reset and close them.
self.pool_id = 0
self.pid = os.getpid()
self.address = address
self.opts = options
self.handshake = handshake
if (self.opts.wait_queue_multiple is None or
self.opts.max_pool_size is None):
max_waiters = None
else:
max_waiters = (
self.opts.max_pool_size * self.opts.wait_queue_multiple)
self._socket_semaphore = thread_util.create_semaphore(
self.opts.max_pool_size, max_waiters)
self.socket_checker = SocketChecker()
def reset(self):
with self.lock:
self.pool_id += 1
self.pid = os.getpid()
sockets, self.sockets = self.sockets, set()
self.active_sockets = 0
for sock_info in sockets:
sock_info.close()
def remove_stale_sockets(self):
with self.lock:
if self.opts.max_idle_time_ms is not None:
for sock_info in self.sockets.copy():
age = _time() - sock_info.last_checkout
if age > self.opts.max_idle_time_ms:
self.sockets.remove(sock_info)
sock_info.close()
while len(
self.sockets) + self.active_sockets < self.opts.min_pool_size:
sock_info = self.connect()
with self.lock:
self.sockets.add(sock_info)
def connect(self):
"""Connect to Mongo and return a new SocketInfo.
Can raise ConnectionFailure or CertificateError.
Note that the pool does not keep a reference to the socket -- you
must call return_socket() when you're done with it.
"""
sock = None
try:
sock = _configured_socket(self.address, self.opts)
if self.handshake:
cmd = SON([
('ismaster', 1),
('client', self.opts.metadata)
])
ismaster = IsMaster(
command(sock,
'admin',
cmd,
False,
False,
ReadPreference.PRIMARY,
DEFAULT_CODEC_OPTIONS))
else:
ismaster = None
return SocketInfo(sock, self, ismaster, self.address)
except socket.error as error:
if sock is not None:
sock.close()
_raise_connection_failure(self.address, error)
@contextlib.contextmanager
def get_socket(self, all_credentials, checkout=False):
"""Get a socket from the pool. Use with a "with" statement.
Returns a :class:`SocketInfo` object wrapping a connected
:class:`socket.socket`.
This method should always be used in a with-statement::
with pool.get_socket(credentials, checkout) as socket_info:
socket_info.send_message(msg)
data = socket_info.receive_message(op_code, request_id)
The socket is logged in or out as needed to match ``all_credentials``
using the correct authentication mechanism for the server's wire
protocol version.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `all_credentials`: dict, maps auth source to MongoCredential.
- `checkout` (optional): keep socket checked out.
"""
# First get a socket, then attempt authentication. Simplifies
# semaphore management in the face of network errors during auth.
sock_info = self._get_socket_no_auth()
try:
sock_info.check_auth(all_credentials)
yield sock_info
except:
# Exception in caller. Decrement semaphore.
self.return_socket(sock_info)
raise
else:
if not checkout:
self.return_socket(sock_info)
def _get_socket_no_auth(self):
"""Get or create a SocketInfo. Can raise ConnectionFailure."""
# We use the pid here to avoid issues with fork / multiprocessing.
# See test.test_client:TestClient.test_fork for an example of
# what could go wrong otherwise
if self.pid != os.getpid():
self.reset()
# Get a free socket or create one.
if not self._socket_semaphore.acquire(
True, self.opts.wait_queue_timeout):
self._raise_wait_queue_timeout()
with self.lock:
self.active_sockets += 1
# We've now acquired the semaphore and must release it on error.
try:
try:
# set.pop() isn't atomic in Jython less than 2.7, see
# http://bugs.jython.org/issue1854
with self.lock:
sock_info, from_pool = self.sockets.pop(), True
except KeyError:
# Can raise ConnectionFailure or CertificateError.
sock_info, from_pool = self.connect(), False
# If socket is idle, open a new one.
if self.opts.max_idle_time_ms is not None:
age = _time() - sock_info.last_checkout
if age > self.opts.max_idle_time_ms:
sock_info.close()
sock_info, from_pool = self.connect(), False
if from_pool:
# Can raise ConnectionFailure.
sock_info = self._check(sock_info)
except:
self._socket_semaphore.release()
with self.lock:
self.active_sockets -= 1
raise
sock_info.last_checkout = _time()
return sock_info
def return_socket(self, sock_info):
"""Return the socket to the pool, or if it's closed discard it."""
if self.pid != os.getpid():
self.reset()
else:
if sock_info.pool_id != self.pool_id:
sock_info.close()
elif not sock_info.closed:
with self.lock:
self.sockets.add(sock_info)
self._socket_semaphore.release()
with self.lock:
self.active_sockets -= 1
def _check(self, sock_info):
"""This side-effecty function checks if this pool has been reset since
the last time this socket was used, or if the socket has been closed by
some external network error, and if so, attempts to create a new socket.
If this connection attempt fails we reset the pool and reraise the
ConnectionFailure.
Checking sockets lets us avoid seeing *some*
:class:`~pymongo.errors.AutoReconnect` exceptions on server
hiccups, etc. We only do this if it's been > 1 second since
the last socket checkout, to keep performance reasonable - we
can't avoid AutoReconnects completely anyway.
"""
error = False
# How long since socket was last checked out.
age = _time() - sock_info.last_checkout
if (self._check_interval_seconds is not None
and (
0 == self._check_interval_seconds
or age > self._check_interval_seconds)):
if self.socket_checker.socket_closed(sock_info.sock):
sock_info.close()
error = True
if not error:
return sock_info
else:
return self.connect()
def _raise_wait_queue_timeout(self):
raise ConnectionFailure(
'Timed out waiting for socket from pool with max_size %r and'
' wait_queue_timeout %r' % (
self.opts.max_pool_size, self.opts.wait_queue_timeout))
def __del__(self):
# Avoid ResourceWarnings in Python 3
for sock_info in self.sockets:
sock_info.close()
|
elmerdpadilla/iv
|
refs/heads/8.0
|
addons/website_payment/__init__.py
|
389
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
esatterwhite/django-tastypie
|
refs/heads/master
|
tests/content_gfk/tests/fields.py
|
31
|
from __future__ import with_statement
from django.test import TestCase
from tastypie.contrib.contenttypes.fields import GenericForeignKeyField
from tastypie.bundle import Bundle
from content_gfk.models import Note, Quote, Rating, Definition
from content_gfk.api.resources import NoteResource, DefinitionResource, \
QuoteResource, RatingResource
class ContentTypeFieldTestCase(TestCase):
def test_init(self):
# Test that you have to use a dict some other resources
with self.assertRaises(ValueError):
GenericForeignKeyField(((Note, NoteResource)), 'nofield')
# Test that you must register some other resources
with self.assertRaises(ValueError):
GenericForeignKeyField({}, 'nofield')
# Test that the resources you raise must be models
with self.assertRaises(ValueError):
GenericForeignKeyField({NoteResource: Note}, 'nofield')
def test_get_related_resource(self):
gfk_field = GenericForeignKeyField({
Note: NoteResource,
Quote: QuoteResource
}, 'nofield')
definition_1 = Definition.objects.create(
word='toast',
content="Cook or brown (food, esp. bread or cheese)"
)
# Test that you can not link to a model that does not have a resource
with self.assertRaises(TypeError):
gfk_field.get_related_resource(definition_1)
note_1 = Note.objects.create(
title='All aboard the rest train',
content='Sometimes it is just better to lorem ipsum'
)
self.assertTrue(isinstance(gfk_field.get_related_resource(note_1), NoteResource))
def test_resource_from_uri(self):
note_2 = Note.objects.create(
title='Generic and such',
content='Sometimes it is to lorem ipsum'
)
gfk_field = GenericForeignKeyField({
Note: NoteResource,
Quote: QuoteResource
}, 'nofield')
self.assertEqual(
gfk_field.resource_from_uri(
gfk_field.to_class(),
'/api/v1/notes/%s/' % note_2.pk
).obj,
note_2
)
def test_build_related_resource(self):
gfk_field = GenericForeignKeyField({
Note: NoteResource,
Quote: QuoteResource
}, 'nofield')
quote_1 = Quote.objects.create(
byline='Issac Kelly',
content='To ipsum or not to ipsum, that is the cliche'
)
qr = QuoteResource()
qr.build_bundle(obj=quote_1)
bundle = gfk_field.build_related_resource(
'/api/v1/quotes/%s/' % quote_1.pk
)
# Test that the GFK field builds the same as the QuoteResource
self.assertEqual(bundle.obj, quote_1)
|
ivyl/patchwork
|
refs/heads/master
|
patchwork/views/patch.py
|
2
|
# Patchwork - automated patch tracking system
# Copyright (C) 2008 Jeremy Kerr <jk@ozlabs.org>
#
# This file is part of the Patchwork package.
#
# Patchwork is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Patchwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from __future__ import absolute_import
from django.http import HttpResponse, HttpResponseForbidden, Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.utils import six
from patchwork.forms import PatchForm, CreateBundleForm
from patchwork.models import Patch, Project, Bundle, TestResult
from patchwork.views import generic_list, patch_to_mbox
from patchwork.permissions import Can
def patch(request, patch_id):
patch = get_object_or_404(Patch, id=patch_id)
editable = Can(request.user).edit(patch)
messages = []
form = None
createbundleform = None
if editable:
form = PatchForm(instance=patch)
if request.user.is_authenticated():
createbundleform = CreateBundleForm()
if request.method == 'POST':
action = request.POST.get('action', None)
if action:
action = action.lower()
if action == 'createbundle':
bundle = Bundle(owner=request.user, project=patch.project)
createbundleform = CreateBundleForm(instance=bundle,
data=request.POST)
if createbundleform.is_valid():
createbundleform.save()
bundle.append_patch(patch)
bundle.save()
createbundleform = CreateBundleForm()
messages += ['Bundle %s created' % bundle.name]
elif action == 'addtobundle':
bundle = get_object_or_404(
Bundle, id=request.POST.get('bundle_id'))
try:
bundle.append_patch(patch)
bundle.save()
messages += ['Patch added to bundle "%s"' % bundle.name]
except Exception as ex:
messages += ["Couldn't add patch '%s' to bundle %s: %s"
% (patch.name, bundle.name, ex.message)]
# all other actions require edit privs
elif not editable:
return HttpResponseForbidden()
elif action is None:
form = PatchForm(data=request.POST, instance=patch)
if form.is_valid():
form.save()
messages += ['Patch updated']
context = {
'series': patch.series(),
'patch': patch,
'patchform': form,
'createbundleform': createbundleform,
'project': patch.project,
'messages': messages,
'test_results': TestResult.objects
.filter(revision=None, patch=patch)
.order_by('test__name').select_related('test')}
return render(request, 'patchwork/patch.html', context)
def content(request, patch_id):
patch = get_object_or_404(Patch, id=patch_id)
response = HttpResponse(content_type="text/x-patch")
response.write(patch.content)
response['Content-Disposition'] = 'attachment; filename=' + \
patch.filename().replace(';', '').replace('\n', '')
return response
def mbox(request, patch_id):
patch = get_object_or_404(Patch, id=patch_id)
options = {
'patch-link': request.GET.get('link', None),
'request': request,
}
response = HttpResponse(content_type="text/plain")
# NOTE(stephenfin) http://stackoverflow.com/a/28584090/613428
if six.PY3:
response.write(patch_to_mbox(patch, options).as_bytes(True).decode())
else:
response.write(patch_to_mbox(patch, options).as_string(True))
response['Content-Disposition'] = 'attachment; filename=' + \
patch.filename().replace(';', '').replace('\n', '')
return response
def list(request, project_id):
project = get_object_or_404(Project, linkname=project_id)
context = generic_list(request, project, 'patch_list',
view_args={'project_id': project.linkname})
return render(request, 'patchwork/list.html', context)
def _get_patch_or_404(request, msgid):
patch = Patch.objects.filter(msgid='<' + msgid + '>').first()
if patch is None:
raise Http404("Patch not found")
return patch
def msgid(request, msgid):
patch = _get_patch_or_404(Patch, msgid)
return redirect(patch)
|
lilydjwg/you-get
|
refs/heads/develop
|
src/you_get/cli_wrapper/player/dragonplayer.py
|
12133432
| |
a10networks/a10sdk-python
|
refs/heads/master
|
a10sdk/core/ip/__init__.py
|
12133432
| |
pigeonflight/strider-plone
|
refs/heads/master
|
docker/appengine/lib/django-1.5/tests/regressiontests/test_runner/__init__.py
|
12133432
| |
cfriedt/gnuradio
|
refs/heads/master
|
gr-filter/python/filter/qa_filterbank.py
|
47
|
#!/usr/bin/env python
#
# Copyright 2012,2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import time
import random
import math
from gnuradio import gr, gr_unittest, filter, blocks
def convolution(A, B):
"""
Returns a convolution of the A and B vectors of length
len(A)-len(B).
"""
rs = []
for i in range(len(B)-1, len(A)):
r = 0
for j, b in enumerate(B):
r += A[i-j] * b
rs.append(r)
return rs
class test_filterbank_vcvcf(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000(self):
"""
Generates nfilts sets of random complex data.
Generates two sets of random taps for each filter.
Applies one set of the random taps, gets some output,
applies the second set of random taps, gets some more output,
The output is then compared with a python-implemented
convolution.
"""
myrand = random.Random(123).random
nfilts = 10
ntaps = 5
# Sets some of the taps to be all zeros.
zero_filts1 = (3, 7)
zero_filts2 = (1, 6, 9)
ndatapoints = 100
# Generate some random sets of data
data_sets = []
for i in range(0, nfilts):
data_sets.append([(myrand()-0.5) + (myrand()-0.5)*(0+1j)
for k in range(0, ndatapoints)])
# Join them together to pass to vector_source block
data = []
for dp in zip(*data_sets):
data += dp
# Generate some random taps.
taps1 = []
taps2 = []
for i in range(0, nfilts):
if i in zero_filts1:
taps1.append([0]*ntaps)
else:
taps1.append([myrand()-0.5 for k in range(0, ntaps)])
if i in zero_filts2:
taps2.append([0]*ntaps)
else:
taps2.append([myrand()-0.5 for k in range(0, ntaps)])
# Calculate results with a python-implemented convolution.
results = []
results2 = []
for ds, ts, ts2 in zip(data_sets, taps1, taps2):
results.append(convolution(ds[-len(ts):]+ds[:-1], ts))
results2.append(convolution(ds[-len(ts):]+ds[:-1], ts2))
# Convert results from 2D arrays to 1D arrays for ease of comparison.
comb_results = []
for rs in zip(*results):
comb_results += rs
comb_results2 = []
for rs in zip(*results2):
comb_results2 += rs
# Construct the signal-processing chain.
src = blocks.vector_source_c(data, True, nfilts)
fb = filter.filterbank_vcvcf(taps1)
v2s = blocks.vector_to_stream(gr.sizeof_gr_complex, nfilts)
s2v = blocks.stream_to_vector(gr.sizeof_gr_complex, nfilts*ndatapoints)
snk = blocks.probe_signal_vc(nfilts*ndatapoints)
self.tb.connect(src, fb, v2s, s2v, snk)
# Run the signal-processing chain.
self.tb.start()
all_zero = True
outdata = None
waittime = 0.001
# Wait until we have some data.
while (not outdata) or outdata[0]==0:
time.sleep(waittime)
outdata = snk.level()
# Apply the second set of taps.
fb.set_taps(taps2)
outdata2 = None
# Wait until we have new data.
while (not outdata2) or outdata[0] == outdata2[0]:
time.sleep(waittime)
outdata2 = snk.level()
self.tb.stop()
# Compare the datasets.
self.assertComplexTuplesAlmostEqual(comb_results, outdata, 6)
self.assertComplexTuplesAlmostEqual(comb_results2, outdata2, 6)
if __name__ == '__main__':
gr_unittest.run(test_filterbank_vcvcf, "test_filterbank_vcvcf.xml")
|
mbernasocchi/QGIS
|
refs/heads/master
|
tests/src/python/test_qgssourceselectprovider.py
|
30
|
# -*- coding: utf-8 -*-
"""
Test the QgsSourceSelectProvider
and QgsSourceSelectProviderRegistry classes
Run with: ctest -V -R PyQgsSourceSelectProvider
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import tempfile
from qgis.gui import (QgsGui, QgsSourceSelectProvider, QgsSourceSelectProviderRegistry, QgsAbstractDataSourceWidget)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QWidget
__author__ = 'Alessandro Pasotti'
__date__ = '01/09/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
start_app()
class ConcreteDataSourceWidget(QgsAbstractDataSourceWidget):
pass
class ConcreteSourceSelectProvider(QgsSourceSelectProvider):
def providerKey(self):
return "MyTestProviderKey"
def text(self):
return "MyTestProviderText"
def icon(self):
return QIcon()
def createDataSourceWidget(self):
return ConcreteDataSourceWidget()
def ordering(self):
return 1
class ConcreteSourceSelectProvider2(QgsSourceSelectProvider):
def providerKey(self):
return "MyTestProviderKey2"
def text(self):
return "MyTestProviderText2"
def name(self):
return "MyName"
def toolTip(self):
return "MyToolTip"
def icon(self):
return QIcon()
def createDataSourceWidget(self):
return ConcreteDataSourceWidget()
def ordering(self):
return 2
class TestQgsSourceSelectProvider(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testConcreteClass(self):
provider = ConcreteSourceSelectProvider()
self.assertTrue(isinstance(provider, ConcreteSourceSelectProvider))
widget = provider.createDataSourceWidget()
self.assertTrue(isinstance(widget, ConcreteDataSourceWidget))
self.assertEqual(provider.providerKey(), "MyTestProviderKey")
self.assertEqual(provider.name(), "MyTestProviderKey")
self.assertEqual(provider.text(), "MyTestProviderText")
self.assertEqual(provider.toolTip(), "")
self.assertEqual(provider.ordering(), 1)
self.assertTrue(isinstance(provider.icon(), QIcon))
# test toolTip
provider = ConcreteSourceSelectProvider2()
self.assertEqual(provider.toolTip(), "MyToolTip")
def _testRegistry(self, registry):
registry.addProvider(ConcreteSourceSelectProvider())
registry.addProvider(ConcreteSourceSelectProvider2())
# Check order
self.assertEqual(['MyTestProviderKey', 'MyName'],
[p.name() for p in registry.providers() if p.providerKey().startswith('MyTestProviderKey')])
registry = QgsSourceSelectProviderRegistry()
registry.addProvider(ConcreteSourceSelectProvider())
registry.addProvider(ConcreteSourceSelectProvider2())
# Check order
self.assertEqual(['MyTestProviderKey', 'MyName'],
[p.name() for p in registry.providers() if p.providerKey().startswith('MyTestProviderKey')])
# Get provider by name
self.assertTrue(registry.providerByName('MyTestProviderKey'))
self.assertTrue(registry.providerByName('MyName'))
# Get not existent by name
self.assertFalse(registry.providerByName('Oh This Is Missing!'))
# Get providers by data provider key
self.assertGreater(len(registry.providersByKey('MyTestProviderKey')), 0)
self.assertGreater(len(registry.providersByKey('MyTestProviderKey2')), 0)
# Get not existent by key
self.assertEqual(len(registry.providersByKey('Oh This Is Missing!')), 0)
def testRemoveProvider(self):
registry = QgsSourceSelectProviderRegistry()
registry.addProvider(ConcreteSourceSelectProvider())
registry.addProvider(ConcreteSourceSelectProvider2())
self.assertEqual(['MyTestProviderKey', 'MyName'],
[p.name() for p in registry.providers() if p.providerKey().startswith('MyTestProviderKey')])
self.assertTrue(registry.removeProvider(registry.providerByName('MyName')))
self.assertEqual(['MyTestProviderKey'],
[p.name() for p in registry.providers() if p.providerKey().startswith('MyTestProviderKey')])
self.assertTrue(registry.removeProvider(registry.providerByName('MyTestProviderKey')))
self.assertEqual([],
[p.name() for p in registry.providers() if p.providerKey().startswith('MyTestProviderKey')])
def testRegistry(self):
registry = QgsSourceSelectProviderRegistry()
self._testRegistry(registry)
def testRegistrySingleton(self):
registry = QgsGui.sourceSelectProviderRegistry()
self._testRegistry(registry)
# Check that at least OGR and GDAL are here
self.assertTrue(registry.providersByKey('ogr'))
self.assertTrue(registry.providersByKey('gdal'))
if __name__ == '__main__':
unittest.main()
|
abaditsegay/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/encodings/__init__.py
|
60
|
""" Standard "encodings" Package
Standard Python encoding modules are stored in this package
directory.
Codec modules must have names corresponding to normalized encoding
names as defined in the normalize_encoding() function below, e.g.
'utf-8' must be implemented by the module 'utf_8.py'.
Each codec module must export the following interface:
* getregentry() -> codecs.CodecInfo object
The getregentry() API must a CodecInfo object with encoder, decoder,
incrementalencoder, incrementaldecoder, streamwriter and streamreader
atttributes which adhere to the Python Codec Interface Standard.
In addition, a module may optionally also define the following
APIs which are then used by the package's codec search function:
* getaliases() -> sequence of encoding name strings to use as aliases
Alias names returned by getaliases() must be normalized encoding
names as defined by normalize_encoding().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
from encodings import aliases
import __builtin__
_cache = {}
_unknown = '--unknown--'
_import_tail = ['*']
_norm_encoding_map = (' . '
'0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ '
' abcdefghijklmnopqrstuvwxyz '
' '
' '
' ')
_aliases = aliases.aliases
class CodecRegistryError(LookupError, SystemError):
pass
def normalize_encoding(encoding):
""" Normalize an encoding name.
Normalization works as follows: all non-alphanumeric
characters except the dot used for Python package names are
collapsed and replaced with a single underscore, e.g. ' -;#'
becomes '_'. Leading and trailing underscores are removed.
Note that encoding names should be ASCII only; if they do use
non-ASCII characters, these must be Latin-1 compatible.
"""
# Make sure we have an 8-bit string, because .translate() works
# differently for Unicode strings.
if hasattr(__builtin__, "unicode") and isinstance(encoding, unicode):
# Note that .encode('latin-1') does *not* use the codec
# registry, so this call doesn't recurse. (See unicodeobject.c
# PyUnicode_AsEncodedString() for details)
encoding = encoding.encode('latin-1')
return '_'.join(encoding.translate(_norm_encoding_map).split())
def search_function(encoding):
# Cache lookup
entry = _cache.get(encoding, _unknown)
if entry is not _unknown:
return entry
# Import the module:
#
# First try to find an alias for the normalized encoding
# name and lookup the module using the aliased name, then try to
# lookup the module using the standard import scheme, i.e. first
# try in the encodings package, then at top-level.
#
norm_encoding = normalize_encoding(encoding)
aliased_encoding = _aliases.get(norm_encoding) or \
_aliases.get(norm_encoding.replace('.', '_'))
if aliased_encoding is not None:
modnames = [aliased_encoding,
norm_encoding]
else:
modnames = [norm_encoding]
for modname in modnames:
if not modname or '.' in modname:
continue
try:
# Import is absolute to prevent the possibly malicious import of a
# module with side-effects that is not in the 'encodings' package.
mod = __import__('encodings.' + modname, fromlist=_import_tail,
level=0)
except ImportError:
pass
else:
break
else:
mod = None
try:
getregentry = mod.getregentry
except AttributeError:
# Not a codec module
mod = None
if mod is None:
# Cache misses
_cache[encoding] = None
return None
# Now ask the module for the registry entry
entry = getregentry()
if not isinstance(entry, codecs.CodecInfo):
if not 4 <= len(entry) <= 7:
raise CodecRegistryError,\
'module "%s" (%s) failed to register' % \
(mod.__name__, mod.__file__)
if not callable(entry[0]) or \
not callable(entry[1]) or \
(entry[2] is not None and not callable(entry[2])) or \
(entry[3] is not None and not callable(entry[3])) or \
(len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \
(len(entry) > 5 and entry[5] is not None and not callable(entry[5])):
raise CodecRegistryError,\
'incompatible codecs in module "%s" (%s)' % \
(mod.__name__, mod.__file__)
if len(entry)<7 or entry[6] is None:
entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
entry = codecs.CodecInfo(*entry)
# Cache the codec registry entry
_cache[encoding] = entry
# Register its aliases (without overwriting previously registered
# aliases)
try:
codecaliases = mod.getaliases()
except AttributeError:
pass
else:
for alias in codecaliases:
if not _aliases.has_key(alias):
_aliases[alias] = modname
# Return the registry entry
return entry
# Register the search_function in the Python codec registry
codecs.register(search_function)
|
MoritzS/django
|
refs/heads/master
|
tests/contenttypes_tests/operations_migrations/0002_rename_foo.py
|
133
|
from django.db import migrations
def assert_foo_contenttype_not_cached(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
try:
content_type = ContentType.objects.get_by_natural_key('contenttypes_tests', 'foo')
except ContentType.DoesNotExist:
pass
else:
if not ContentType.objects.filter(app_label='contenttypes_tests', model='foo').exists():
raise AssertionError('The contenttypes_tests.Foo ContentType should not be cached.')
elif content_type.model != 'foo':
raise AssertionError(
"The cached contenttypes_tests.Foo ContentType should have "
"its model set to 'foo'."
)
class Migration(migrations.Migration):
dependencies = [
('contenttypes_tests', '0001_initial'),
]
operations = [
migrations.RenameModel('Foo', 'RenamedFoo'),
migrations.RunPython(assert_foo_contenttype_not_cached, migrations.RunPython.noop)
]
|
ProjectSWGCore/NGECore2
|
refs/heads/master
|
scripts/mobiles/endor/domesticated_merek.py
|
2
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('domesticated_merek')
mobileTemplate.setLevel(50)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(.8)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setHideAmount(45)
mobileTemplate.setSocialGroup("self")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_merek.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_claw_4')
attacks.add('bm_damage_poison_4')
attacks.add('bm_dampen_pain_4')
attacks.add('bm_slash_4')
mobileTemplate.setDefaultAttack('creatureRangedAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('domesticated_merek', mobileTemplate)
return
|
orbitfp7/nova
|
refs/heads/master
|
nova/tests/unit/test_quota.py
|
11
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_config import cfg
from oslo_utils import timeutils
from nova import compute
from nova.compute import flavors
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqa_api
from nova.db.sqlalchemy import models as sqa_models
from nova import exception
from nova import quota
from nova import test
import nova.tests.unit.image.fake
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
class QuotaIntegrationTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(QuotaIntegrationTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
quota_instances=2,
quota_cores=4,
quota_floating_ips=1,
network_manager='nova.network.manager.FlatDHCPManager')
# Apparently needed by the RPC tests...
self.network = self.start_service('network')
self.user_id = 'admin'
self.project_id = 'admin'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
self.compute_api = compute.API()
def tearDown(self):
super(QuotaIntegrationTestCase, self).tearDown()
nova.tests.unit.image.fake.FakeImageService_reset()
def _create_instance(self, cores=2):
"""Create a test instance."""
inst = {}
inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = '3' # m1.large
inst['vcpus'] = cores
return db.instance_create(self.context, inst)
def test_too_many_instances(self):
instance_uuids = []
for i in range(CONF.quota_instances):
instance = self._create_instance()
instance_uuids.append(instance['uuid'])
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
try:
self.compute_api.create(self.context, min_count=1, max_count=1,
instance_type=inst_type,
image_href=image_uuid)
except exception.QuotaError as e:
expected_kwargs = {'code': 413, 'resource': 'cores', 'req': 1,
'used': 4, 'allowed': 4, 'overs': 'cores,instances'}
self.assertEqual(e.kwargs, expected_kwargs)
else:
self.fail('Expected QuotaError exception')
for instance_uuid in instance_uuids:
db.instance_destroy(self.context, instance_uuid)
def test_too_many_cores(self):
instance = self._create_instance(cores=4)
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
try:
self.compute_api.create(self.context, min_count=1, max_count=1,
instance_type=inst_type,
image_href=image_uuid)
except exception.QuotaError as e:
expected_kwargs = {'code': 413, 'resource': 'cores', 'req': 1,
'used': 4, 'allowed': 4, 'overs': 'cores'}
self.assertEqual(e.kwargs, expected_kwargs)
else:
self.fail('Expected QuotaError exception')
db.instance_destroy(self.context, instance['uuid'])
def test_many_cores_with_unlimited_quota(self):
# Setting cores quota to unlimited:
self.flags(quota_cores=-1)
instance = self._create_instance(cores=4)
db.instance_destroy(self.context, instance['uuid'])
def test_too_many_addresses(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'pool': 'nova',
'project_id': self.project_id})
self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
self.context,
self.project_id)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_auto_assigned(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'pool': 'nova',
'project_id': self.project_id})
# auto allocated addresses should not be counted
self.assertRaises(exception.NoMoreFloatingIps,
self.network.allocate_floating_ip,
self.context,
self.project_id,
True)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_too_many_metadata_items(self):
metadata = {}
for i in range(CONF.quota_metadata_items + 1):
metadata['key%s' % i] = 'value%s' % i
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, self.compute_api.create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid,
metadata=metadata)
def _create_with_injected_files(self, files):
api = self.compute_api
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context, min_count=1, max_count=1,
instance_type=inst_type, image_href=image_uuid,
injected_files=files)
def test_no_injected_files(self):
api = self.compute_api
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context,
instance_type=inst_type,
image_href=image_uuid)
def test_max_injected_files(self):
files = []
for i in xrange(CONF.quota_injected_files):
files.append(('/my/path%d' % i, 'config = test\n'))
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_files(self):
files = []
for i in xrange(CONF.quota_injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max)])
files = [('/test/path', content)]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_content_bytes(self):
max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max + 1)])
files = [('/test/path', content)]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_path_bytes(self):
max = CONF.quota_injected_file_path_length
path = ''.join(['a' for i in xrange(max)])
files = [(path, 'config = quotatest')]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_path_bytes(self):
max = CONF.quota_injected_file_path_length
path = ''.join(['a' for i in xrange(max + 1)])
files = [(path, 'config = quotatest')]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_reservation_expire(self):
self.useFixture(test.TimeOverride())
def assertInstancesReserved(reserved):
result = quota.QUOTAS.get_project_quotas(self.context,
self.context.project_id)
self.assertEqual(result['instances']['reserved'], reserved)
quota.QUOTAS.reserve(self.context,
expire=60,
instances=2)
assertInstancesReserved(2)
timeutils.advance_time_seconds(80)
quota.QUOTAS.expire(self.context)
assertInstancesReserved(0)
class FakeContext(object):
def __init__(self, project_id, quota_class):
self.is_admin = False
self.user_id = 'fake_user'
self.project_id = project_id
self.quota_class = quota_class
self.read_deleted = 'no'
def elevated(self):
elevated = self.__class__(self.project_id, self.quota_class)
elevated.is_admin = True
return elevated
class FakeDriver(object):
def __init__(self, by_project=None, by_user=None, by_class=None,
reservations=None):
self.called = []
self.by_project = by_project or {}
self.by_user = by_user or {}
self.by_class = by_class or {}
self.reservations = reservations or []
def get_by_project_and_user(self, context, project_id, user_id, resource):
self.called.append(('get_by_project_and_user',
context, project_id, user_id, resource))
try:
return self.by_user[user_id][resource]
except KeyError:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
def get_by_project(self, context, project_id, resource):
self.called.append(('get_by_project', context, project_id, resource))
try:
return self.by_project[project_id][resource]
except KeyError:
raise exception.ProjectQuotaNotFound(project_id=project_id)
def get_by_class(self, context, quota_class, resource):
self.called.append(('get_by_class', context, quota_class, resource))
try:
return self.by_class[quota_class][resource]
except KeyError:
raise exception.QuotaClassNotFound(class_name=quota_class)
def get_defaults(self, context, resources):
self.called.append(('get_defaults', context, resources))
return resources
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
self.called.append(('get_class_quotas', context, resources,
quota_class, defaults))
return resources
def get_user_quotas(self, context, resources, project_id, user_id,
quota_class=None, defaults=True, usages=True):
self.called.append(('get_user_quotas', context, resources,
project_id, user_id, quota_class, defaults,
usages))
return resources
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True, usages=True,
remains=False):
self.called.append(('get_project_quotas', context, resources,
project_id, quota_class, defaults, usages,
remains))
return resources
def limit_check(self, context, resources, values, project_id=None,
user_id=None):
self.called.append(('limit_check', context, resources,
values, project_id, user_id))
def reserve(self, context, resources, deltas, expire=None,
project_id=None, user_id=None):
self.called.append(('reserve', context, resources, deltas,
expire, project_id, user_id))
return self.reservations
def commit(self, context, reservations, project_id=None, user_id=None):
self.called.append(('commit', context, reservations, project_id,
user_id))
def rollback(self, context, reservations, project_id=None, user_id=None):
self.called.append(('rollback', context, reservations, project_id,
user_id))
def usage_reset(self, context, resources):
self.called.append(('usage_reset', context, resources))
def destroy_all_by_project_and_user(self, context, project_id, user_id):
self.called.append(('destroy_all_by_project_and_user', context,
project_id, user_id))
def destroy_all_by_project(self, context, project_id):
self.called.append(('destroy_all_by_project', context, project_id))
def expire(self, context):
self.called.append(('expire', context))
class BaseResourceTestCase(test.TestCase):
def test_no_flag(self):
resource = quota.BaseResource('test_resource')
self.assertEqual(resource.name, 'test_resource')
self.assertIsNone(resource.flag)
self.assertEqual(resource.default, -1)
def test_with_flag(self):
# We know this flag exists, so use it...
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, 10)
def test_with_flag_no_quota(self):
self.flags(quota_instances=-1)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, -1)
def test_quota_no_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver()
context = FakeContext(None, None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 10)
def test_quota_with_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
))
context = FakeContext('test_project', None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_no_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext(None, 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 20)
def test_quota_with_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
),
by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_override_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
override_project=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
project_id='override_project')
self.assertEqual(quota_value, 20)
def test_quota_with_project_override_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=15),
override_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
quota_class='override_class')
self.assertEqual(quota_value, 20)
def test_valid_method_call_check_invalid_input(self):
resources = {'dummy': 1}
self.assertRaises(exception.InvalidQuotaMethodUsage,
quota._valid_method_call_check_resources,
resources, 'limit')
def test_valid_method_call_check_invalid_method(self):
resources = {'key_pairs': 1}
self.assertRaises(exception.InvalidQuotaMethodUsage,
quota._valid_method_call_check_resources,
resources, 'dummy')
def test_valid_method_call_check_multiple(self):
resources = {'key_pairs': 1, 'dummy': 2}
self.assertRaises(exception.InvalidQuotaMethodUsage,
quota._valid_method_call_check_resources,
resources, 'check')
resources = {'key_pairs': 1, 'instances': 2, 'dummy': 3}
self.assertRaises(exception.InvalidQuotaMethodUsage,
quota._valid_method_call_check_resources,
resources, 'check')
def test_valid_method_call_check_wrong_method_reserve(self):
resources = {'key_pairs': 1}
self.assertRaises(exception.InvalidQuotaMethodUsage,
quota._valid_method_call_check_resources,
resources, 'reserve')
def test_valid_method_call_check_wrong_method_check(self):
resources = {'fixed_ips': 1}
self.assertRaises(exception.InvalidQuotaMethodUsage,
quota._valid_method_call_check_resources,
resources, 'check')
class QuotaEngineTestCase(test.TestCase):
def test_init(self):
quota_obj = quota.QuotaEngine()
self.assertEqual(quota_obj._resources, {})
self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver)
def test_init_override_string(self):
quota_obj = quota.QuotaEngine(
quota_driver_class='nova.tests.unit.test_quota.FakeDriver')
self.assertEqual(quota_obj._resources, {})
self.assertIsInstance(quota_obj._driver, FakeDriver)
def test_init_override_obj(self):
quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver)
self.assertEqual(quota_obj._resources, {})
self.assertEqual(quota_obj._driver, FakeDriver)
def test_register_resource(self):
quota_obj = quota.QuotaEngine()
resource = quota.AbsoluteResource('test_resource')
quota_obj.register_resource(resource)
self.assertEqual(quota_obj._resources, dict(test_resource=resource))
def test_register_resources(self):
quota_obj = quota.QuotaEngine()
resources = [
quota.AbsoluteResource('test_resource1'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource3'),
]
quota_obj.register_resources(resources)
self.assertEqual(quota_obj._resources, dict(
test_resource1=resources[0],
test_resource2=resources[1],
test_resource3=resources[2],
))
def test_get_by_project_and_user(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_user=dict(
fake_user=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_project_and_user(context, 'test_project',
'fake_user', 'test_resource')
self.assertEqual(driver.called, [
('get_by_project_and_user', context, 'test_project',
'fake_user', 'test_resource'),
])
self.assertEqual(result, 42)
def test_get_by_project(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_project(context, 'test_project',
'test_resource')
self.assertEqual(driver.called, [
('get_by_project', context, 'test_project', 'test_resource'),
])
self.assertEqual(result, 42)
def test_get_by_class(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
self.assertEqual(driver.called, [
('get_by_class', context, 'test_class', 'test_resource'),
])
self.assertEqual(result, 42)
def _make_quota_obj(self, driver):
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
resources = [
quota.AbsoluteResource('test_resource4'),
quota.AbsoluteResource('test_resource3'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource1'),
]
quota_obj.register_resources(resources)
return quota_obj
def test_get_defaults(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result = quota_obj.get_defaults(context)
self.assertEqual(driver.called, [
('get_defaults', context, quota_obj._resources),
])
self.assertEqual(result, quota_obj._resources)
def test_get_class_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_class_quotas(context, 'test_class')
result2 = quota_obj.get_class_quotas(context, 'test_class', False)
self.assertEqual(driver.called, [
('get_class_quotas', context, quota_obj._resources,
'test_class', True),
('get_class_quotas', context, quota_obj._resources,
'test_class', False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_get_user_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_user_quotas(context, 'test_project',
'fake_user')
result2 = quota_obj.get_user_quotas(context, 'test_project',
'fake_user',
quota_class='test_class',
defaults=False,
usages=False)
self.assertEqual(driver.called, [
('get_user_quotas', context, quota_obj._resources,
'test_project', 'fake_user', None, True, True),
('get_user_quotas', context, quota_obj._resources,
'test_project', 'fake_user', 'test_class', False, False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_get_project_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_project_quotas(context, 'test_project')
result2 = quota_obj.get_project_quotas(context, 'test_project',
quota_class='test_class',
defaults=False,
usages=False)
self.assertEqual(driver.called, [
('get_project_quotas', context, quota_obj._resources,
'test_project', None, True, True, False),
('get_project_quotas', context, quota_obj._resources,
'test_project', 'test_class', False, False, False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_count_no_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource5',
True, foo='bar')
def test_count_wrong_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource1',
True, foo='bar')
def test_count(self):
def fake_count(context, *args, **kwargs):
self.assertEqual(args, (True,))
self.assertEqual(kwargs, dict(foo='bar'))
return 5
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.register_resource(quota.CountableResource('test_resource5',
fake_count))
result = quota_obj.count(context, 'test_resource5', True, foo='bar')
self.assertEqual(result, 5)
def test_limit_check(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.limit_check(context, test_resource1=4, test_resource2=3,
test_resource3=2, test_resource4=1)
self.assertEqual(driver.called, [
('limit_check', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
), None, None),
])
def test_reserve(self):
context = FakeContext(None, None)
driver = FakeDriver(reservations=[
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.reserve(context, test_resource1=4,
test_resource2=3, test_resource3=2,
test_resource4=1)
result2 = quota_obj.reserve(context, expire=3600,
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
result3 = quota_obj.reserve(context, project_id='fake_project',
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
self.assertEqual(driver.called, [
('reserve', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
), None, None, None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
), 3600, None, None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
), None, 'fake_project', None),
])
self.assertEqual(result1, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
self.assertEqual(result2, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
self.assertEqual(result3, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
def test_commit(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('commit', context, ['resv-01', 'resv-02', 'resv-03'], None,
None),
])
def test_rollback(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None,
None),
])
def test_usage_reset(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.usage_reset(context, ['res1', 'res2', 'res3'])
self.assertEqual(driver.called, [
('usage_reset', context, ['res1', 'res2', 'res3']),
])
def test_destroy_all_by_project_and_user(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.destroy_all_by_project_and_user(context,
'test_project', 'fake_user')
self.assertEqual(driver.called, [
('destroy_all_by_project_and_user', context, 'test_project',
'fake_user'),
])
def test_destroy_all_by_project(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.destroy_all_by_project(context, 'test_project')
self.assertEqual(driver.called, [
('destroy_all_by_project', context, 'test_project'),
])
def test_expire(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.expire(context)
self.assertEqual(driver.called, [
('expire', context),
])
def test_resources(self):
quota_obj = self._make_quota_obj(None)
self.assertEqual(quota_obj.resources,
['test_resource1', 'test_resource2',
'test_resource3', 'test_resource4'])
class DbQuotaDriverTestCase(test.TestCase):
def setUp(self):
super(DbQuotaDriverTestCase, self).setUp()
self.flags(quota_instances=10,
quota_cores=20,
quota_ram=50 * 1024,
quota_floating_ips=10,
quota_fixed_ips=10,
quota_metadata_items=128,
quota_injected_files=5,
quota_injected_file_content_bytes=10 * 1024,
quota_injected_file_path_length=255,
quota_security_groups=10,
quota_security_group_rules=20,
quota_server_groups=10,
quota_server_group_members=10,
reservation_expire=86400,
until_refresh=0,
max_age=0,
)
self.driver = quota.DbQuotaDriver()
self.calls = []
self.useFixture(test.TimeOverride())
def test_get_defaults(self):
# Use our pre-defined resources
self._stub_quota_class_get_default()
result = self.driver.get_defaults(None, quota.QUOTAS._resources)
self.assertEqual(result, dict(
instances=5,
cores=20,
ram=25 * 1024,
floating_ips=10,
fixed_ips=10,
metadata_items=64,
injected_files=5,
injected_file_content_bytes=5 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
server_groups=10,
server_group_members=10,
))
def _stub_quota_class_get_default(self):
# Stub out quota_class_get_default
def fake_qcgd(context):
self.calls.append('quota_class_get_default')
return dict(
instances=5,
ram=25 * 1024,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
)
self.stubs.Set(db, 'quota_class_get_default', fake_qcgd)
def _stub_quota_class_get_all_by_name(self):
# Stub out quota_class_get_all_by_name
def fake_qcgabn(context, quota_class):
self.calls.append('quota_class_get_all_by_name')
self.assertEqual(quota_class, 'test_class')
return dict(
instances=5,
ram=25 * 1024,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
)
self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
def test_get_class_quotas(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class')
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
cores=20,
ram=25 * 1024,
floating_ips=10,
fixed_ips=10,
metadata_items=64,
injected_files=5,
injected_file_content_bytes=5 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
server_groups=10,
server_group_members=10,
))
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class', False)
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
ram=25 * 1024,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
))
def _stub_get_by_project_and_user(self):
def fake_qgabpau(context, project_id, user_id):
self.calls.append('quota_get_all_by_project_and_user')
self.assertEqual(project_id, 'test_project')
self.assertEqual(user_id, 'fake_user')
return dict(
cores=10,
injected_files=2,
injected_file_path_bytes=127,
)
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return {
'cores': 10,
'injected_files': 2,
'injected_file_path_bytes': 127,
}
def fake_qugabpau(context, project_id, user_id):
self.calls.append('quota_usage_get_all_by_project_and_user')
self.assertEqual(project_id, 'test_project')
self.assertEqual(user_id, 'fake_user')
return dict(
instances=dict(in_use=2, reserved=2),
cores=dict(in_use=4, reserved=4),
ram=dict(in_use=10 * 1024, reserved=0),
floating_ips=dict(in_use=2, reserved=0),
metadata_items=dict(in_use=0, reserved=0),
injected_files=dict(in_use=0, reserved=0),
injected_file_content_bytes=dict(in_use=0, reserved=0),
injected_file_path_bytes=dict(in_use=0, reserved=0),
)
self.stubs.Set(db, 'quota_get_all_by_project_and_user', fake_qgabpau)
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project_and_user',
fake_qugabpau)
self._stub_quota_class_get_all_by_name()
def test_get_user_quotas(self):
self.maxDiff = None
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user')
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
server_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
server_group_members=dict(
limit=10,
in_use=0,
reserved=0,
),
))
def _stub_get_by_project_and_user_specific(self):
def fake_quota_get(context, project_id, resource, user_id=None):
self.calls.append('quota_get')
self.assertEqual(project_id, 'test_project')
self.assertEqual(user_id, 'fake_user')
self.assertEqual(resource, 'test_resource')
return dict(
test_resource=dict(in_use=20, reserved=10),
)
self.stubs.Set(db, 'quota_get', fake_quota_get)
def test_get_by_project_and_user(self):
self._stub_get_by_project_and_user_specific()
result = self.driver.get_by_project_and_user(
FakeContext('test_project', 'test_class'),
'test_project', 'fake_user', 'test_resource')
self.assertEqual(self.calls, ['quota_get'])
self.assertEqual(result, dict(
test_resource=dict(in_use=20, reserved=10),
))
def _stub_get_by_project(self):
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
cores=10,
injected_files=2,
injected_file_path_bytes=127,
)
def fake_qugabp(context, project_id):
self.calls.append('quota_usage_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
instances=dict(in_use=2, reserved=2),
cores=dict(in_use=4, reserved=4),
ram=dict(in_use=10 * 1024, reserved=0),
floating_ips=dict(in_use=2, reserved=0),
metadata_items=dict(in_use=0, reserved=0),
injected_files=dict(in_use=0, reserved=0),
injected_file_content_bytes=dict(in_use=0, reserved=0),
injected_file_path_bytes=dict(in_use=0, reserved=0),
)
def fake_quota_get_all(context, project_id):
self.calls.append('quota_get_all')
self.assertEqual(project_id, 'test_project')
return [sqa_models.ProjectUserQuota(resource='instances',
hard_limit=5),
sqa_models.ProjectUserQuota(resource='cores',
hard_limit=2)]
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
self.stubs.Set(db, 'quota_get_all', fake_quota_get_all)
self._stub_quota_class_get_all_by_name()
self._stub_quota_class_get_default()
def test_get_project_quotas(self):
self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
server_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
server_group_members=dict(
limit=10,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_with_remains(self):
self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', remains=True)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
'quota_get_all',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
remains=0,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
remains=8,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
remains=25 * 1024,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
remains=10,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
remains=10,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
remains=64,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
remains=2,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
remains=5 * 1024,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
remains=127,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
remains=10,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
remains=20,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
remains=100,
),
server_groups=dict(
limit=10,
in_use=0,
reserved=0,
remains=10,
),
server_group_members=dict(
limit=10,
in_use=0,
reserved=0,
remains=10,
),
))
def test_get_user_quotas_alt_context_no_class(self):
self.maxDiff = None
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', None),
quota.QUOTAS._resources, 'test_project', 'fake_user')
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
])
self.assertEqual(result, dict(
instances=dict(
limit=10,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=50 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=128,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=10 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
server_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
server_group_members=dict(
limit=10,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_no_class(self):
self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
server_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
server_group_members=dict(
limit=10,
in_use=0,
reserved=0,
),
))
def test_get_user_quotas_alt_context_with_class(self):
self.maxDiff = None
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user',
quota_class='test_class')
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
server_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
server_group_members=dict(
limit=10,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_with_class(self):
self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project', quota_class='test_class')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
server_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
server_group_members=dict(
limit=10,
in_use=0,
reserved=0,
),
))
def test_get_user_quotas_no_defaults(self):
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user',
defaults=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_no_defaults(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', defaults=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
))
def test_get_user_quotas_no_usages(self):
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user', usages=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
),
cores=dict(
limit=10,
),
ram=dict(
limit=25 * 1024,
),
floating_ips=dict(
limit=10,
),
fixed_ips=dict(
limit=10,
),
metadata_items=dict(
limit=64,
),
injected_files=dict(
limit=2,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
),
injected_file_path_bytes=dict(
limit=127,
),
security_groups=dict(
limit=10,
),
security_group_rules=dict(
limit=20,
),
key_pairs=dict(
limit=100,
),
server_groups=dict(
limit=10,
),
server_group_members=dict(
limit=10,
),
))
def test_get_project_quotas_no_usages(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', usages=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
),
cores=dict(
limit=10,
),
ram=dict(
limit=25 * 1024,
),
floating_ips=dict(
limit=10,
),
fixed_ips=dict(
limit=10,
),
metadata_items=dict(
limit=64,
),
injected_files=dict(
limit=2,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
),
injected_file_path_bytes=dict(
limit=127,
),
security_groups=dict(
limit=10,
),
security_group_rules=dict(
limit=20,
),
key_pairs=dict(
limit=100,
),
server_groups=dict(
limit=10,
),
server_group_members=dict(
limit=10,
),
))
def _stub_get_settable_quotas(self):
def fake_quota_get_all_by_project(context, project_id):
self.calls.append('quota_get_all_by_project')
return {'floating_ips': 20}
def fake_get_project_quotas(context, resources, project_id,
quota_class=None, defaults=True,
usages=True, remains=False,
project_quotas=None):
self.calls.append('get_project_quotas')
result = {}
for k, v in resources.items():
limit = v.default
reserved = 0
if k == 'instances':
remains = v.default - 5
in_use = 1
elif k == 'cores':
remains = -1
in_use = 5
limit = -1
elif k == 'floating_ips':
remains = 20
in_use = 0
limit = 20
else:
remains = v.default
in_use = 0
result[k] = {'limit': limit, 'in_use': in_use,
'reserved': reserved, 'remains': remains}
return result
def fake_process_quotas_in_get_user_quotas(context, resources,
project_id, quotas,
quota_class=None,
defaults=True, usages=None,
remains=False):
self.calls.append('_process_quotas')
result = {}
for k, v in resources.items():
reserved = 0
if k == 'instances':
in_use = 1
elif k == 'cores':
in_use = 5
reserved = 10
else:
in_use = 0
result[k] = {'limit': v.default,
'in_use': in_use, 'reserved': reserved}
return result
def fake_qgabpau(context, project_id, user_id):
self.calls.append('quota_get_all_by_project_and_user')
return {'instances': 2, 'cores': -1}
self.stubs.Set(db, 'quota_get_all_by_project',
fake_quota_get_all_by_project)
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
self.stubs.Set(self.driver, '_process_quotas',
fake_process_quotas_in_get_user_quotas)
self.stubs.Set(db, 'quota_get_all_by_project_and_user',
fake_qgabpau)
def test_get_settable_quotas_with_user(self):
self._stub_get_settable_quotas()
result = self.driver.get_settable_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', user_id='test_user')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'get_project_quotas',
'quota_get_all_by_project_and_user',
'_process_quotas',
])
self.assertEqual(result, {
'instances': {
'minimum': 1,
'maximum': 7,
},
'cores': {
'minimum': 15,
'maximum': -1,
},
'ram': {
'minimum': 0,
'maximum': 50 * 1024,
},
'floating_ips': {
'minimum': 0,
'maximum': 20,
},
'fixed_ips': {
'minimum': 0,
'maximum': 10,
},
'metadata_items': {
'minimum': 0,
'maximum': 128,
},
'injected_files': {
'minimum': 0,
'maximum': 5,
},
'injected_file_content_bytes': {
'minimum': 0,
'maximum': 10 * 1024,
},
'injected_file_path_bytes': {
'minimum': 0,
'maximum': 255,
},
'security_groups': {
'minimum': 0,
'maximum': 10,
},
'security_group_rules': {
'minimum': 0,
'maximum': 20,
},
'key_pairs': {
'minimum': 0,
'maximum': 100,
},
'server_groups': {
'minimum': 0,
'maximum': 10,
},
'server_group_members': {
'minimum': 0,
'maximum': 10,
},
})
def test_get_settable_quotas_without_user(self):
self._stub_get_settable_quotas()
result = self.driver.get_settable_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'get_project_quotas',
])
self.assertEqual(result, {
'instances': {
'minimum': 5,
'maximum': -1,
},
'cores': {
'minimum': 5,
'maximum': -1,
},
'ram': {
'minimum': 0,
'maximum': -1,
},
'floating_ips': {
'minimum': 0,
'maximum': -1,
},
'fixed_ips': {
'minimum': 0,
'maximum': -1,
},
'metadata_items': {
'minimum': 0,
'maximum': -1,
},
'injected_files': {
'minimum': 0,
'maximum': -1,
},
'injected_file_content_bytes': {
'minimum': 0,
'maximum': -1,
},
'injected_file_path_bytes': {
'minimum': 0,
'maximum': -1,
},
'security_groups': {
'minimum': 0,
'maximum': -1,
},
'security_group_rules': {
'minimum': 0,
'maximum': -1,
},
'key_pairs': {
'minimum': 0,
'maximum': -1,
},
'server_groups': {
'minimum': 0,
'maximum': -1,
},
'server_group_members': {
'minimum': 0,
'maximum': -1,
},
})
def test_get_settable_quotas_by_user_with_unlimited_value(self):
self._stub_get_settable_quotas()
result = self.driver.get_settable_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', user_id='test_user')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'get_project_quotas',
'quota_get_all_by_project_and_user',
'_process_quotas',
])
self.assertEqual(result, {
'instances': {
'minimum': 1,
'maximum': 7,
},
'cores': {
'minimum': 15,
'maximum': -1,
},
'ram': {
'minimum': 0,
'maximum': 50 * 1024,
},
'floating_ips': {
'minimum': 0,
'maximum': 20,
},
'fixed_ips': {
'minimum': 0,
'maximum': 10,
},
'metadata_items': {
'minimum': 0,
'maximum': 128,
},
'injected_files': {
'minimum': 0,
'maximum': 5,
},
'injected_file_content_bytes': {
'minimum': 0,
'maximum': 10 * 1024,
},
'injected_file_path_bytes': {
'minimum': 0,
'maximum': 255,
},
'security_groups': {
'minimum': 0,
'maximum': 10,
},
'security_group_rules': {
'minimum': 0,
'maximum': 20,
},
'key_pairs': {
'minimum': 0,
'maximum': 100,
},
'server_groups': {
'minimum': 0,
'maximum': 10,
},
'server_group_members': {
'minimum': 0,
'maximum': 10,
},
})
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
quota_class=None, defaults=True,
usages=True, remains=False,
project_quotas=None):
self.calls.append('get_project_quotas')
return {k: dict(limit=v.default) for k, v in resources.items()}
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
def test_get_quotas_has_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync_no_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['metadata_items'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_has_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['instances'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['instances', 'cores', 'ram',
'floating_ips', 'security_groups',
'server_groups'],
True,
project_id='test_project')
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
instances=10,
cores=20,
ram=50 * 1024,
floating_ips=10,
security_groups=10,
server_groups=10,
))
def test_get_quotas_no_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['metadata_items', 'injected_files',
'injected_file_content_bytes',
'injected_file_path_bytes',
'security_group_rules',
'server_group_members'], False,
project_id='test_project')
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
metadata_items=128,
injected_files=5,
injected_file_content_bytes=10 * 1024,
injected_file_path_bytes=255,
security_group_rules=20,
server_group_members=10,
))
def test_limit_check_under(self):
self._stub_get_project_quotas()
self.assertRaises(exception.InvalidQuotaValue,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=-1))
def test_limit_check_over(self):
self._stub_get_project_quotas()
self.assertRaises(exception.OverQuota,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=129))
def test_limit_check_project_overs(self):
self._stub_get_project_quotas()
self.assertRaises(exception.OverQuota,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(injected_file_content_bytes=10241,
injected_file_path_bytes=256))
def test_limit_check_unlimited(self):
self.flags(quota_metadata_items=-1)
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=32767))
def test_limit_check(self):
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=128))
def _stub_quota_reserve(self):
def fake_quota_reserve(context, resources, quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
self.calls.append(('quota_reserve', expire, until_refresh,
max_age))
return ['resv-1', 'resv-2', 'resv-3']
self.stubs.Set(db, 'quota_reserve', fake_quota_reserve)
def test_reserve_bad_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.assertRaises(exception.InvalidReservationExpiration,
self.driver.reserve,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire='invalid')
self.assertEqual(self.calls, [])
def test_reserve_default_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2))
expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_int_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=3600)
expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_timedelta_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire_delta = datetime.timedelta(seconds=60)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire_delta)
expire = timeutils.utcnow() + expire_delta
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_datetime_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_until_refresh(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(until_refresh=500)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 500, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_max_age(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(max_age=86400)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 86400),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_usage_reset(self):
calls = []
def fake_quota_usage_update(context, project_id, user_id, resource,
**kwargs):
calls.append(('quota_usage_update', context, project_id, user_id,
resource, kwargs))
if resource == 'nonexist':
raise exception.QuotaUsageNotFound(project_id=project_id)
self.stubs.Set(db, 'quota_usage_update', fake_quota_usage_update)
ctx = FakeContext('test_project', 'test_class')
resources = ['res1', 'res2', 'nonexist', 'res4']
self.driver.usage_reset(ctx, resources)
# Make sure we had some calls
self.assertEqual(len(calls), len(resources))
# Extract the elevated context that was used and do some
# sanity checks
elevated = calls[0][1]
self.assertEqual(elevated.project_id, ctx.project_id)
self.assertEqual(elevated.quota_class, ctx.quota_class)
self.assertEqual(elevated.is_admin, True)
# Now check that all the expected calls were made
exemplar = [('quota_usage_update', elevated, 'test_project',
'fake_user', res, dict(in_use=-1)) for res in resources]
self.assertEqual(calls, exemplar)
class FakeSession(object):
def begin(self):
return self
def add(self, instance):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
return False
class FakeUsage(sqa_models.QuotaUsage):
def save(self, *args, **kwargs):
pass
class QuotaReserveSqlAlchemyTestCase(test.TestCase):
# nova.db.sqlalchemy.api.quota_reserve is so complex it needs its
# own test case, and since it's a quota manipulator, this is the
# best place to put it...
def setUp(self):
super(QuotaReserveSqlAlchemyTestCase, self).setUp()
self.sync_called = set()
self.quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
fixed_ips=5,
)
self.deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
fixed_ips=2,
)
def make_sync(res_name):
def sync(context, project_id, user_id, session):
self.sync_called.add(res_name)
if res_name in self.usages:
if self.usages[res_name].in_use < 0:
return {res_name: 2}
else:
return {res_name: self.usages[res_name].in_use - 1}
return {res_name: 0}
return sync
self.resources = {}
_existing_quota_sync_func_dict = dict(sqa_api.QUOTA_SYNC_FUNCTIONS)
def restore_sync_functions():
sqa_api.QUOTA_SYNC_FUNCTIONS.clear()
sqa_api.QUOTA_SYNC_FUNCTIONS.update(_existing_quota_sync_func_dict)
self.addCleanup(restore_sync_functions)
for res_name in ('instances', 'cores', 'ram', 'fixed_ips'):
method_name = '_sync_%s' % res_name
sqa_api.QUOTA_SYNC_FUNCTIONS[method_name] = make_sync(res_name)
res = quota.ReservableResource(res_name, '_sync_%s' % res_name)
self.resources[res_name] = res
self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.usages = {}
self.usages_created = {}
self.reservations_created = {}
self.usages_list = [
dict(resource='instances',
project_id='test_project',
user_id='fake_user',
in_use=2,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
user_id='fake_user',
in_use=2,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
user_id='fake_user',
in_use=2,
reserved=2 * 1024,
until_refresh=None),
dict(resource='fixed_ips',
project_id='test_project',
user_id=None,
in_use=2,
reserved=2,
until_refresh=None),
]
def fake_get_session():
return FakeSession()
def fake_get_project_user_quota_usages(context, session, project_id,
user_id):
return self.usages.copy(), self.usages.copy()
def fake_quota_usage_create(project_id, user_id, resource,
in_use, reserved, until_refresh,
session=None, save=True):
quota_usage_ref = self._make_quota_usage(
project_id, user_id, resource, in_use, reserved, until_refresh,
timeutils.utcnow(), timeutils.utcnow())
self.usages_created[resource] = quota_usage_ref
return quota_usage_ref
def fake_reservation_create(uuid, usage_id, project_id,
user_id, resource, delta, expire,
session=None):
reservation_ref = self._make_reservation(
uuid, usage_id, project_id, user_id, resource, delta, expire,
timeutils.utcnow(), timeutils.utcnow())
self.reservations_created[resource] = reservation_ref
return reservation_ref
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
self.stubs.Set(sqa_api, '_get_project_user_quota_usages',
fake_get_project_user_quota_usages)
self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
self.stubs.Set(sqa_api, '_reservation_create', fake_reservation_create)
self.useFixture(test.TimeOverride())
def _make_quota_usage(self, project_id, user_id, resource, in_use,
reserved, until_refresh, created_at, updated_at):
quota_usage_ref = FakeUsage()
quota_usage_ref.id = len(self.usages) + len(self.usages_created)
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.created_at = created_at
quota_usage_ref.updated_at = updated_at
quota_usage_ref.deleted_at = None
quota_usage_ref.deleted = False
return quota_usage_ref
def init_usage(self, project_id, user_id, resource, in_use, reserved=0,
until_refresh=None, created_at=None, updated_at=None):
if created_at is None:
created_at = timeutils.utcnow()
if updated_at is None:
updated_at = timeutils.utcnow()
if resource == 'fixed_ips':
user_id = None
quota_usage_ref = self._make_quota_usage(project_id, user_id, resource,
in_use, reserved,
until_refresh,
created_at, updated_at)
self.usages[resource] = quota_usage_ref
def compare_usage(self, usage_dict, expected):
for usage in expected:
resource = usage['resource']
for key, value in usage.items():
actual = getattr(usage_dict[resource], key)
self.assertEqual(actual, value,
"%s != %s on usage for resource %s" %
(actual, value, resource))
def _make_reservation(self, uuid, usage_id, project_id, user_id, resource,
delta, expire, created_at, updated_at):
reservation_ref = sqa_models.Reservation()
reservation_ref.id = len(self.reservations_created)
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage_id
reservation_ref.project_id = project_id
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.created_at = created_at
reservation_ref.updated_at = updated_at
reservation_ref.deleted_at = None
reservation_ref.deleted = False
return reservation_ref
def compare_reservation(self, reservations, expected):
reservations = set(reservations)
for resv in expected:
resource = resv['resource']
resv_obj = self.reservations_created[resource]
self.assertIn(resv_obj.uuid, reservations)
reservations.discard(resv_obj.uuid)
for key, value in resv.items():
actual = getattr(resv_obj, key)
self.assertEqual(actual, value,
"%s != %s on reservation for resource %s" %
(actual, value, resource))
self.assertEqual(len(reservations), 0)
def _update_reservations_list(self, usage_id_change=False,
delta_change=False):
reservations_list = [
dict(resource='instances',
project_id='test_project',
delta=2),
dict(resource='cores',
project_id='test_project',
delta=4),
dict(resource='ram',
delta=2 * 1024),
dict(resource='fixed_ips',
project_id='test_project',
delta=2),
]
if usage_id_change:
reservations_list[0]["usage_id"] = self.usages_created['instances']
reservations_list[1]["usage_id"] = self.usages_created['cores']
reservations_list[2]["usage_id"] = self.usages_created['ram']
reservations_list[3]["usage_id"] = self.usages_created['fixed_ips']
else:
reservations_list[0]["usage_id"] = self.usages['instances']
reservations_list[1]["usage_id"] = self.usages['cores']
reservations_list[2]["usage_id"] = self.usages['ram']
reservations_list[3]["usage_id"] = self.usages['fixed_ips']
if delta_change:
reservations_list[0]["delta"] = -2
reservations_list[1]["delta"] = -4
reservations_list[2]["delta"] = -2 * 1024
reservations_list[3]["delta"] = -2
return reservations_list
def _init_usages(self, *in_use, **kwargs):
for i, option in enumerate(('instances', 'cores', 'ram', 'fixed_ips')):
self.init_usage('test_project', 'fake_user',
option, in_use[i], **kwargs)
return FakeContext('test_project', 'test_class')
def test_quota_reserve_create_usages(self):
context = FakeContext('test_project', 'test_class')
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.usages_list[0]["in_use"] = 0
self.usages_list[1]["in_use"] = 0
self.usages_list[2]["in_use"] = 0
self.usages_list[3]["in_use"] = 0
self.compare_usage(self.usages_created, self.usages_list)
reservations_list = self._update_reservations_list(True)
self.compare_reservation(result, reservations_list)
def test_quota_reserve_negative_in_use(self):
context = self._init_usages(-1, -1, -1, -1, until_refresh=1)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.usages_list[0]["until_refresh"] = 5
self.usages_list[1]["until_refresh"] = 5
self.usages_list[2]["until_refresh"] = 5
self.usages_list[3]["until_refresh"] = 5
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_until_refresh(self):
context = self._init_usages(3, 3, 3, 3, until_refresh=1)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.usages_list[0]["until_refresh"] = 5
self.usages_list[1]["until_refresh"] = 5
self.usages_list[2]["until_refresh"] = 5
self.usages_list[3]["until_refresh"] = 5
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_max_age(self):
max_age = 3600
record_created = (timeutils.utcnow() -
datetime.timedelta(seconds=max_age))
context = self._init_usages(3, 3, 3, 3, created_at=record_created,
updated_at=record_created)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, max_age)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_no_refresh(self):
context = self._init_usages(3, 3, 3, 3)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 3
self.usages_list[1]["in_use"] = 3
self.usages_list[2]["in_use"] = 3
self.usages_list[3]["in_use"] = 3
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_unders(self):
context = self._init_usages(1, 3, 1 * 1024, 1)
self.deltas["instances"] = -2
self.deltas["cores"] = -4
self.deltas["ram"] = -2 * 1024
self.deltas["fixed_ips"] = -2
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 1
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 3
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 1 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 1
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
reservations_list = self._update_reservations_list(False, True)
self.compare_reservation(result, reservations_list)
def test_quota_reserve_overs(self):
context = self._init_usages(4, 8, 10 * 1024, 4)
try:
sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire, 0, 0)
except exception.OverQuota as e:
expected_kwargs = {'code': 500,
'usages': {'instances': {'reserved': 0, 'in_use': 4},
'ram': {'reserved': 0, 'in_use': 10240},
'fixed_ips': {'reserved': 0, 'in_use': 4},
'cores': {'reserved': 0, 'in_use': 8}},
'overs': ['cores', 'fixed_ips', 'instances', 'ram'],
'quotas': {'cores': 10, 'ram': 10240,
'fixed_ips': 5, 'instances': 5}}
self.assertEqual(e.kwargs, expected_kwargs)
else:
self.fail('Expected OverQuota failure')
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 4
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 8
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 10 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 4
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {})
def test_quota_reserve_cores_unlimited(self):
# Requesting 8 cores, quota_cores set to unlimited:
self.flags(quota_cores=-1)
self._init_usages(1, 8, 1 * 1024, 1)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 1
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 8
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 1 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 1
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {})
def test_quota_reserve_ram_unlimited(self):
# Requesting 10*1024 ram, quota_ram set to unlimited:
self.flags(quota_ram=-1)
self._init_usages(1, 1, 10 * 1024, 1)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 1
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 1
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 10 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 1
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {})
def test_quota_reserve_reduction(self):
context = self._init_usages(10, 20, 20 * 1024, 10)
self.deltas["instances"] = -2
self.deltas["cores"] = -4
self.deltas["ram"] = -2 * 1024
self.deltas["fixed_ips"] = -2
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 10
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 20
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 20 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 10
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
reservations_list = self._update_reservations_list(False, True)
self.compare_reservation(result, reservations_list)
class NoopQuotaDriverTestCase(test.TestCase):
def setUp(self):
super(NoopQuotaDriverTestCase, self).setUp()
self.flags(quota_instances=10,
quota_cores=20,
quota_ram=50 * 1024,
quota_floating_ips=10,
quota_metadata_items=128,
quota_injected_files=5,
quota_injected_file_content_bytes=10 * 1024,
quota_injected_file_path_length=255,
quota_security_groups=10,
quota_security_group_rules=20,
reservation_expire=86400,
until_refresh=0,
max_age=0,
)
self.expected_with_usages = {}
self.expected_without_usages = {}
self.expected_without_dict = {}
self.expected_settable_quotas = {}
for r in quota.QUOTAS._resources:
self.expected_with_usages[r] = dict(limit=-1,
in_use=-1,
reserved=-1)
self.expected_without_usages[r] = dict(limit=-1)
self.expected_without_dict[r] = -1
self.expected_settable_quotas[r] = dict(minimum=0, maximum=-1)
self.driver = quota.NoopQuotaDriver()
def test_get_defaults(self):
# Use our pre-defined resources
result = self.driver.get_defaults(None, quota.QUOTAS._resources)
self.assertEqual(self.expected_without_dict, result)
def test_get_class_quotas(self):
result = self.driver.get_class_quotas(None,
quota.QUOTAS._resources,
'test_class')
self.assertEqual(self.expected_without_dict, result)
def test_get_class_quotas_no_defaults(self):
result = self.driver.get_class_quotas(None,
quota.QUOTAS._resources,
'test_class',
False)
self.assertEqual(self.expected_without_dict, result)
def test_get_project_quotas(self):
result = self.driver.get_project_quotas(None,
quota.QUOTAS._resources,
'test_project')
self.assertEqual(self.expected_with_usages, result)
def test_get_user_quotas(self):
result = self.driver.get_user_quotas(None,
quota.QUOTAS._resources,
'test_project',
'fake_user')
self.assertEqual(self.expected_with_usages, result)
def test_get_project_quotas_no_defaults(self):
result = self.driver.get_project_quotas(None,
quota.QUOTAS._resources,
'test_project',
defaults=False)
self.assertEqual(self.expected_with_usages, result)
def test_get_user_quotas_no_defaults(self):
result = self.driver.get_user_quotas(None,
quota.QUOTAS._resources,
'test_project',
'fake_user',
defaults=False)
self.assertEqual(self.expected_with_usages, result)
def test_get_project_quotas_no_usages(self):
result = self.driver.get_project_quotas(None,
quota.QUOTAS._resources,
'test_project',
usages=False)
self.assertEqual(self.expected_without_usages, result)
def test_get_user_quotas_no_usages(self):
result = self.driver.get_user_quotas(None,
quota.QUOTAS._resources,
'test_project',
'fake_user',
usages=False)
self.assertEqual(self.expected_without_usages, result)
def test_get_settable_quotas_with_user(self):
result = self.driver.get_settable_quotas(None,
quota.QUOTAS._resources,
'test_project',
'fake_user')
self.assertEqual(self.expected_settable_quotas, result)
def test_get_settable_quotas_without_user(self):
result = self.driver.get_settable_quotas(None,
quota.QUOTAS._resources,
'test_project')
self.assertEqual(self.expected_settable_quotas, result)
|
skosukhin/spack
|
refs/heads/esiwace
|
var/spack/repos/builtin/packages/xdm/package.py
|
1
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xdm(AutotoolsPackage):
"""X Display Manager / XDMCP server."""
homepage = "http://cgit.freedesktop.org/xorg/app/xdm"
url = "https://www.x.org/archive/individual/app/xdm-1.1.11.tar.gz"
version('1.1.11', 'aaf8c3d05d4a1e689d2d789c99a6023c')
depends_on('libxmu')
depends_on('libx11')
depends_on('libxau')
depends_on('libxinerama')
depends_on('libxft')
depends_on('libxpm')
depends_on('libxaw')
depends_on('libxdmcp')
depends_on('libxt')
depends_on('libxext')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('util-macros', type='build')
|
nikesh-mahalka/cinder
|
refs/heads/master
|
tools/lintstack.py
|
3
|
#!/usr/bin/env python
# Copyright (c) 2013, AT&T Labs, Yun Mao <yunmao@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""pylint error checking."""
from __future__ import print_function
import json
import re
import sys
from pylint import lint
from pylint.reporters import text
from six.moves import cStringIO as StringIO
# Note(maoy): E1103 is error code related to partial type inference
ignore_codes = ["E1103"]
# Note(maoy): the error message is the pattern of E0202. It should be ignored
# for cinder.tests modules
# Note(fengqian): the second error message is the pattern of [E0611].
# It should be ignored because use six module to keep py3.X compatibility.
# Note(e0ne): the third error message is for SQLAlchemy update() calls
# in DB schema migrations.
# Note(xyang): the fourth and fifth error messages are for the code [E1101].
# They should be ignored because 'sha256' and 'sha224' are functions in
# 'hashlib'.
# Note(aarefiev): the sixth error message is for SQLAlchemy rename calls in
# DB migration(033_add_encryption_unique_key).
ignore_messages = ["An attribute affected in cinder.tests",
"No name 'urllib' in module '_MovedItems'",
"No value passed for parameter 'dml'",
"Module 'hashlib' has no 'sha256' member",
"Module 'hashlib' has no 'sha224' member",
"Instance of 'Table' has no 'rename' member"]
# Note(maoy): we ignore all errors in openstack.common because it should be
# checked elsewhere. We also ignore cinder.tests for now due to high false
# positive rate.
ignore_modules = ["cinder/openstack/common/", "cinder/tests/"]
# Note(thangp): E0213, E1101, and E1102 should be ignored for only
# cinder.object modules. E0213 and E1102 are error codes related to
# the first argument of a method, but should be ignored because the method
# is a remotable class method. E1101 is error code related to accessing a
# non-existent member of an object, but should be ignored because the object
# member is created dynamically.
objects_ignore_codes = ["E0213", "E1101", "E1102"]
# Note(thangp): The error messages are for codes [E1120, E1101] appearing in
# the cinder code base using objects. E1120 is an error code related no value
# passed for a parameter in function call, but should be ignored because it is
# reporting false positives. E1101 is error code related to accessing a
# non-existent member of an object, but should be ignored because the object
# member is created dynamically.
objects_ignore_messages = [
"No value passed for parameter 'id' in function call",
"Module 'cinder.objects' has no 'Snapshot' member",
"Module 'cinder.objects' has no 'SnapshotList' member",
"Module 'cinder.objects' has no 'Backup' member",
"Module 'cinder.objects' has no 'BackupList' member",
"Module 'cinder.objects' has no 'Service' member",
"Module 'cinder.objects' has no 'ServiceList' member",
"Module 'cinder.objects' has no 'BackupImport' member",
]
objects_ignore_modules = ["cinder/objects/"]
KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions"
class LintOutput(object):
_cached_filename = None
_cached_content = None
def __init__(self, filename, lineno, line_content, code, message,
lintoutput):
self.filename = filename
self.lineno = lineno
self.line_content = line_content
self.code = code
self.message = message
self.lintoutput = lintoutput
@classmethod
def from_line(cls, line):
m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line)
matched = m.groups()
filename, lineno, code, message = (matched[0], int(matched[1]),
matched[2], matched[-1])
if cls._cached_filename != filename:
with open(filename) as f:
cls._cached_content = list(f.readlines())
cls._cached_filename = filename
line_content = cls._cached_content[lineno - 1].rstrip()
return cls(filename, lineno, line_content, code, message,
line.rstrip())
@classmethod
def from_msg_to_dict(cls, msg):
"""From the output of pylint msg, to a dict, where each key
is a unique error identifier, value is a list of LintOutput
"""
result = {}
for line in msg.splitlines():
obj = cls.from_line(line)
if obj.is_ignored():
continue
key = obj.key()
if key not in result:
result[key] = []
result[key].append(obj)
return result
def is_ignored(self):
if self.code in ignore_codes:
return True
if any(self.filename.startswith(name) for name in ignore_modules):
return True
if any(msg in self.message for msg in
(ignore_messages + objects_ignore_messages)):
return True
if (self.code in objects_ignore_codes and
any(self.filename.startswith(name)
for name in objects_ignore_modules)):
return True
if (self.code in objects_ignore_codes and
any(self.filename.startswith(name)
for name in objects_ignore_modules)):
return True
return False
def key(self):
if self.code in ["E1101", "E1103"]:
# These two types of errors are like Foo class has no member bar.
# We discard the source code so that the error will be ignored
# next time another Foo.bar is encountered.
return self.message, ""
return self.message, self.line_content.strip()
def json(self):
return json.dumps(self.__dict__)
def review_str(self):
return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n"
"%(code)s: %(message)s" % self.__dict__)
class ErrorKeys(object):
@classmethod
def print_json(cls, errors, output=sys.stdout):
print("# automatically generated by tools/lintstack.py", file=output)
for i in sorted(errors.keys()):
print(json.dumps(i), file=output)
@classmethod
def from_file(cls, filename):
keys = set()
for line in open(filename):
if line and line[0] != "#":
d = json.loads(line)
keys.add(tuple(d))
return keys
def run_pylint():
buff = StringIO()
reporter = text.ParseableTextReporter(output=buff)
args = ["--include-ids=y", "-E", "cinder"]
lint.Run(args, reporter=reporter, exit=False)
val = buff.getvalue()
buff.close()
return val
def generate_error_keys(msg=None):
print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE)
if msg is None:
msg = run_pylint()
errors = LintOutput.from_msg_to_dict(msg)
with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f:
ErrorKeys.print_json(errors, output=f)
def validate(newmsg=None):
print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE)
known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE)
if newmsg is None:
print("Running pylint. Be patient...")
newmsg = run_pylint()
errors = LintOutput.from_msg_to_dict(newmsg)
print("Unique errors reported by pylint: was %d, now %d."
% (len(known), len(errors)))
passed = True
for err_key, err_list in errors.items():
for err in err_list:
if err_key not in known:
print(err.lintoutput)
print()
passed = False
if passed:
print("Congrats! pylint check passed.")
redundant = known - set(errors.keys())
if redundant:
print("Extra credit: some known pylint exceptions disappeared.")
for i in sorted(redundant):
print(json.dumps(i))
print("Consider regenerating the exception file if you will.")
else:
print("Please fix the errors above. If you believe they are false "
"positives, run 'tools/lintstack.py generate' to overwrite.")
sys.exit(1)
def usage():
print("""Usage: tools/lintstack.py [generate|validate]
To generate pylint_exceptions file: tools/lintstack.py generate
To validate the current commit: tools/lintstack.py
""")
def main():
option = "validate"
if len(sys.argv) > 1:
option = sys.argv[1]
if option == "generate":
generate_error_keys()
elif option == "validate":
validate()
else:
usage()
if __name__ == "__main__":
main()
|
ortutay/23andme-phenotypes-hackathon
|
refs/heads/master
|
my_app/my_app/utils.py
|
1
|
import os
import json
import requests
from my_app.my_app.models import Phenotype
from joblib import Memory
CACHE_DIR = '/tmp/ttam-hack-cache'
os.makedirs(CACHE_DIR, exist_ok=True)
memory = Memory(cachedir=CACHE_DIR)
TTAM_API_SERVER = 'https://api.23andme.com'
def to_url(path):
return '%s%s' % (TTAM_API_SERVER, path)
def headers(ttam_token):
return {'Authorization': 'Bearer %s' % ttam_token}
@memory.cache
def get_variant_from_marker(marker_id):
url = to_url('/3/marker/%s/' % marker_id)
resp = requests.get(url, verify=False)
d = resp.json()
return d['accession_id'], d['start']
@memory.cache
def get_profile_id(ttam_token):
url = to_url('/3/account/')
resp = requests.get(url, verify=False, headers=headers(ttam_token))
print(resp)
print(resp.json())
return resp.json()['data'][0]['profiles'][0]['id']
def get_allele(ttam_token, accession_id, start, end):
profile_id = get_profile_id(ttam_token)
url = to_url('/3/profile/%s/variant/?accession_id=%s&start=%s&end=%s' % (profile_id, accession_id, start, end))
print(url)
resp = requests.get(url, verify=False, headers=headers(ttam_token))
print('ALLELES')
print(resp.json())
d = resp.json()
alleles = []
for v in d['data']:
for i in range(int(v['dosage'])):
alleles.append(v['allele'])
alleles.sort()
return ''.join(alleles)
def get_phenotypes(ttam_token, phenotype_ids):
profile_id = get_profile_id(ttam_token)
# import pdb; pdb.set_trace()
phenotypes = Phenotype.objects.filter(
profile_id=profile_id,
phenotype_id__in=phenotype_ids)
print('phenotypes', phenotypes)
return {p.phenotype_id: p.value for p in phenotypes}
def set_phenotype(user, phenotype_id, value):
profile_id = get_profile_id(user.profile.ttam_token)
if Phenotype.objects.filter(profile_id=profile_id, phenotype_id=phenotype_id).exists():
return
Phenotype(
user=user,
profile_id=profile_id,
phenotype_id=phenotype_id,
value=value).save()
def get_phenotypes_old(ttam_token, phenotype_ids):
print('ttam_token', ttam_token)
profile_id = get_profile_id(ttam_token)
url = to_url('/3/profile/%s/phenotype/?id=%s' % (profile_id, ','.join(phenotype_ids)))
resp = requests.get(url, verify=False, headers=headers(ttam_token))
print('got resp')
print(resp.json())
return {x['id']: x['value'] for x in resp.json()['data']}
def set_phenotype_old(ttam_token, phenotype_id, value):
profile_id = get_profile_id(ttam_token)
data = {}
data[phenotype_id] = value
url = to_url('/3/profile/%s/phenotype/' % profile_id)
requests.post(url, verify=False, data=data, headers=headers(ttam_token))
if __name__ == '__main__':
print(get_variant_from_marker('rs1558902'))
# token = '4c210fc3755b298a2d84bbacf2b0d4ab'
# print(get_variant(token, 'NC_000016.9', 53803574, 53803575))
# set_phenotype(token, 'fitbit_num_steps', 90)
# print(get_phenotypes(token, ['fitbit_num_steps']))
|
macat/Piratetitler
|
refs/heads/master
|
app/apps/pages/handlers.py
|
1
|
# -*- coding: utf-8 -*-
from tipfy import RequestHandler
from tipfy.ext.jinja2 import render_response
class HomeHandler(RequestHandler):
""" Home page """
def get(self):
return render_response('pages/home.html')
|
mith1979/ansible_automation
|
refs/heads/master
|
applied_python/applied_python/lib/python2.7/site-packages/setuptools/tests/test_upload_docs.py
|
151
|
import os
import zipfile
import contextlib
import pytest
from setuptools.command.upload_docs import upload_docs
from setuptools.dist import Distribution
from .textwrap import DALS
from . import contexts
SETUP_PY = DALS(
"""
from setuptools import setup
setup(name='foo')
""")
@pytest.fixture
def sample_project(tmpdir_cwd):
# setup.py
with open('setup.py', 'wt') as f:
f.write(SETUP_PY)
os.mkdir('build')
# A test document.
with open('build/index.html', 'w') as f:
f.write("Hello world.")
# An empty folder.
os.mkdir('build/empty')
@pytest.mark.usefixtures('sample_project')
@pytest.mark.usefixtures('user_override')
class TestUploadDocsTest:
def test_create_zipfile(self):
"""
Ensure zipfile creation handles common cases, including a folder
containing an empty folder.
"""
dist = Distribution()
cmd = upload_docs(dist)
cmd.target_dir = cmd.upload_dir = 'build'
with contexts.tempdir() as tmp_dir:
tmp_file = os.path.join(tmp_dir, 'foo.zip')
zip_file = cmd.create_zipfile(tmp_file)
assert zipfile.is_zipfile(tmp_file)
with contextlib.closing(zipfile.ZipFile(tmp_file)) as zip_file:
assert zip_file.namelist() == ['index.html']
|
Mafarricos/Mafarricos-modded-xbmc-addons
|
refs/heads/master
|
plugin.video.xbmctorrentV2/resources/site-packages/html5lib/treebuilders/_base.py
|
715
|
from __future__ import absolute_import, division, unicode_literals
from six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
|
savoirfairelinux/sous-chef
|
refs/heads/dev
|
src/order/migrations/0013_orderstatuschange.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-11-30 20:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('order', '0012_auto_20161122_0458'),
]
operations = [
migrations.CreateModel(
name='OrderStatusChange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_from', models.CharField(choices=[('O', 'Ordered'), ('D', 'Delivered'), ('N', 'No Charge'), ('C', 'Cancelled'), ('B', 'Billed'), ('P', 'Paid')], max_length=1)),
('status_to', models.CharField(choices=[('O', 'Ordered'), ('D', 'Delivered'), ('N', 'No Charge'), ('C', 'Cancelled'), ('B', 'Billed'), ('P', 'Paid')], max_length=1)),
('reason', models.CharField(blank=True, default='', max_length=200)),
('change_time', models.DateTimeField(auto_now_add=True)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status_changes', to='order.Order')),
],
options={
'ordering': ['change_time'],
},
),
]
|
mocne/PycharmProjects
|
refs/heads/master
|
HanderCode/aidaiwangApp/aidaiwangApp/LogOut_aidiawangApp.py
|
1
|
# -*- coding: utf-8 -*-
import time
import Register_aidaiwangApp
def start_to_logout():
driver = Register_aidaiwangApp.driver
print(u'logout')
time.sleep(3)
try:
driver.find_element_by_id('cn.phaidai.loan:id/rb_mine')
print('id')
except:
try:
driver.find_element_by_android_uiautomator('new UiSelector().text("我的")')
except:
return 'can not jump to mine'
else:
driver.find_element_by_android_uiautomator('new UiSelector().text("我的")').click()
print('text')
else:
driver.find_element_by_id('cn.phaidai.loan:id/rb_mine').click()
try:
driver.find_element_by_id('cn.phaidai.loan:id/iv_right')
except:
return 'can not jump into setting page'
else:
driver.find_element_by_id('cn.phaidai.loan:id/iv_right').click()
try:
driver.find_element_by_id('cn.phaidai.loan:id/tv_exit')
except:
return 'can not found logout key'
else:
driver.find_element_by_id('cn.phaidai.loan:id/tv_exit').click()
driver.find_element_by_id('cn.phaidai.loan:id/iv_left').click()
try:
driver.find_element_by_android_uiautomator('new UiSelector().text("首页")').click()
except:
try:
driver.find_element_by_id('cn.phaidai.loan:id/rb_home').click()
except:
return 'can not jump to shouye after logout'
|
kbrebanov/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/netscaler/netscaler.py
|
141
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import re
import sys
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils._text import to_native
class ConfigProxy(object):
def __init__(self, actual, client, attribute_values_dict, readwrite_attrs, transforms=None, readonly_attrs=None, immutable_attrs=None, json_encodes=None):
transforms = {} if transforms is None else transforms
readonly_attrs = [] if readonly_attrs is None else readonly_attrs
immutable_attrs = [] if immutable_attrs is None else immutable_attrs
json_encodes = [] if json_encodes is None else json_encodes
# Actual config object from nitro sdk
self.actual = actual
# nitro client
self.client = client
# ansible attribute_values_dict
self.attribute_values_dict = attribute_values_dict
self.readwrite_attrs = readwrite_attrs
self.readonly_attrs = readonly_attrs
self.immutable_attrs = immutable_attrs
self.json_encodes = json_encodes
self.transforms = transforms
self.attribute_values_processed = {}
for attribute, value in self.attribute_values_dict.items():
if value is None:
continue
if attribute in transforms:
for transform in self.transforms[attribute]:
if transform == 'bool_yes_no':
if value is True:
value = 'YES'
elif value is False:
value = 'NO'
elif transform == 'bool_on_off':
if value is True:
value = 'ON'
elif value is False:
value = 'OFF'
elif callable(transform):
value = transform(value)
else:
raise Exception('Invalid transform %s' % transform)
self.attribute_values_processed[attribute] = value
self._copy_attributes_to_actual()
def _copy_attributes_to_actual(self):
for attribute in self.readwrite_attrs:
if attribute in self.attribute_values_processed:
attribute_value = self.attribute_values_processed[attribute]
if attribute_value is None:
continue
# Fallthrough
if attribute in self.json_encodes:
attribute_value = json.JSONEncoder().encode(attribute_value).strip('"')
setattr(self.actual, attribute, attribute_value)
def __getattr__(self, name):
if name in self.attribute_values_dict:
return self.attribute_values_dict[name]
else:
raise AttributeError('No attribute %s found' % name)
def add(self):
self.actual.__class__.add(self.client, self.actual)
def update(self):
return self.actual.__class__.update(self.client, self.actual)
def delete(self):
self.actual.__class__.delete(self.client, self.actual)
def get(self, *args, **kwargs):
result = self.actual.__class__.get(self.client, *args, **kwargs)
return result
def has_equal_attributes(self, other):
if self.diff_object(other) == {}:
return True
else:
return False
def diff_object(self, other):
diff_dict = {}
for attribute in self.attribute_values_processed:
# Skip readonly attributes
if attribute not in self.readwrite_attrs:
continue
# Skip attributes not present in module arguments
if self.attribute_values_processed[attribute] is None:
continue
# Check existence
if hasattr(other, attribute):
attribute_value = getattr(other, attribute)
else:
diff_dict[attribute] = 'missing from other'
continue
# Compare values
param_type = self.attribute_values_processed[attribute].__class__
if attribute_value is None or param_type(attribute_value) != self.attribute_values_processed[attribute]:
str_tuple = (
type(self.attribute_values_processed[attribute]),
self.attribute_values_processed[attribute],
type(attribute_value),
attribute_value,
)
diff_dict[attribute] = 'difference. ours: (%s) %s other: (%s) %s' % str_tuple
return diff_dict
def get_actual_rw_attributes(self, filter='name'):
if self.actual.__class__.count_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter])) == 0:
return {}
server_list = self.actual.__class__.get_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter]))
actual_instance = server_list[0]
ret_val = {}
for attribute in self.readwrite_attrs:
if not hasattr(actual_instance, attribute):
continue
ret_val[attribute] = getattr(actual_instance, attribute)
return ret_val
def get_actual_ro_attributes(self, filter='name'):
if self.actual.__class__.count_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter])) == 0:
return {}
server_list = self.actual.__class__.get_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter]))
actual_instance = server_list[0]
ret_val = {}
for attribute in self.readonly_attrs:
if not hasattr(actual_instance, attribute):
continue
ret_val[attribute] = getattr(actual_instance, attribute)
return ret_val
def get_missing_rw_attributes(self):
return list(set(self.readwrite_attrs) - set(self.get_actual_rw_attributes().keys()))
def get_missing_ro_attributes(self):
return list(set(self.readonly_attrs) - set(self.get_actual_ro_attributes().keys()))
def get_immutables_intersection(config_proxy, keys):
immutables_set = set(config_proxy.immutable_attrs)
keys_set = set(keys)
# Return list of sets' intersection
return list(immutables_set & keys_set)
def ensure_feature_is_enabled(client, feature_str):
enabled_features = client.get_enabled_features()
if enabled_features is None:
enabled_features = []
if feature_str not in enabled_features:
client.enable_features(feature_str)
client.save_config()
def get_nitro_client(module):
from nssrc.com.citrix.netscaler.nitro.service.nitro_service import nitro_service
client = nitro_service(module.params['nsip'], module.params['nitro_protocol'])
client.set_credential(module.params['nitro_user'], module.params['nitro_pass'])
client.timeout = float(module.params['nitro_timeout'])
client.certvalidation = module.params['validate_certs']
return client
netscaler_common_arguments = dict(
nsip=dict(
required=True,
fallback=(env_fallback, ['NETSCALER_NSIP']),
),
nitro_user=dict(
required=True,
fallback=(env_fallback, ['NETSCALER_NITRO_USER']),
no_log=True
),
nitro_pass=dict(
required=True,
fallback=(env_fallback, ['NETSCALER_NITRO_PASS']),
no_log=True
),
nitro_protocol=dict(
choices=['http', 'https'],
fallback=(env_fallback, ['NETSCALER_NITRO_PROTOCOL']),
default='http'
),
validate_certs=dict(
default=True,
type='bool'
),
nitro_timeout=dict(default=310, type='float'),
state=dict(
choices=[
'present',
'absent',
],
default='present',
),
save_config=dict(
type='bool',
default=True,
),
)
loglines = []
def complete_missing_attributes(actual, attrs_list, fill_value=None):
for attribute in attrs_list:
if not hasattr(actual, attribute):
setattr(actual, attribute, fill_value)
def log(msg):
loglines.append(msg)
def get_ns_version(client):
from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nsversion import nsversion
result = nsversion.get(client)
m = re.match(r'^.*NS(\d+)\.(\d+).*$', result[0].version)
if m is None:
return None
else:
return int(m.group(1)), int(m.group(2))
def get_ns_hardware(client):
from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nshardware import nshardware
result = nshardware.get(client)
return result
def monkey_patch_nitro_api():
from nssrc.com.citrix.netscaler.nitro.resource.base.Json import Json
def new_resource_to_string_convert(self, resrc):
# Line below is the actual patch
dict_valid_values = dict((k.replace('_', '', 1), v) for k, v in resrc.__dict__.items() if v)
return json.dumps(dict_valid_values)
Json.resource_to_string_convert = new_resource_to_string_convert
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
@classmethod
def object_to_string_new(cls, obj):
output = []
flds = obj.__dict__
for k, v in ((k.replace('_', '', 1), v) for k, v in flds.items() if v):
if isinstance(v, bool):
output.append('"%s":%s' % (k, v))
elif isinstance(v, (binary_type, text_type)):
v = to_native(v, errors='surrogate_or_strict')
output.append('"%s":"%s"' % (k, v))
elif isinstance(v, int):
output.append('"%s":"%s"' % (k, v))
return ','.join(output)
@classmethod
def object_to_string_withoutquotes_new(cls, obj):
output = []
flds = obj.__dict__
for k, v in ((k.replace('_', '', 1), v) for k, v in flds.items() if v):
if isinstance(v, (int, bool)):
output.append('%s:%s' % (k, v))
elif isinstance(v, (binary_type, text_type)):
v = to_native(v, errors='surrogate_or_strict')
output.append('%s:%s' % (k, cls.encode(v)))
return ','.join(output)
nitro_util.object_to_string = object_to_string_new
nitro_util.object_to_string_withoutquotes = object_to_string_withoutquotes_new
|
mzdaniel/oh-mainline
|
refs/heads/master
|
vendor/packages/celery/celery/task/__init__.py
|
18
|
# -*- coding: utf-8 -*-
"""
celery.task
~~~~~~~~~~~
Creating tasks, subtasks, sets and chords.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import warnings
from ..app import app_or_default
from ..exceptions import CDeprecationWarning
from .base import Task, PeriodicTask # noqa
from .sets import TaskSet, subtask # noqa
from .chords import chord # noqa
from .control import discard_all # noqa
def task(*args, **kwargs):
"""Decorator to create a task class out of any callable.
**Examples**
.. code-block:: python
@task
def refresh_feed(url):
return Feed.objects.get(url=url).refresh()
With setting extra options and using retry.
.. code-block:: python
@task(max_retries=10)
def refresh_feed(url):
try:
return Feed.objects.get(url=url).refresh()
except socket.error, exc:
refresh_feed.retry(exc=exc)
Calling the resulting task:
>>> refresh_feed("http://example.com/rss") # Regular
<Feed: http://example.com/rss>
>>> refresh_feed.delay("http://example.com/rss") # Async
<AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
"""
kwargs.setdefault("accept_magic_kwargs", False)
return app_or_default().task(*args, **kwargs)
def periodic_task(*args, **options):
"""Decorator to create a task class out of any callable.
.. admonition:: Examples
.. code-block:: python
@task
def refresh_feed(url):
return Feed.objects.get(url=url).refresh()
With setting extra options and using retry.
.. code-block:: python
@task(exchange="feeds")
def refresh_feed(url, **kwargs):
try:
return Feed.objects.get(url=url).refresh()
except socket.error, exc:
refresh_feed.retry(args=[url], kwargs=kwargs, exc=exc)
Calling the resulting task:
>>> refresh_feed("http://example.com/rss") # Regular
<Feed: http://example.com/rss>
>>> refresh_feed.delay("http://example.com/rss") # Async
<AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
"""
return task(**dict({"base": PeriodicTask}, **options))
@task(name="celery.backend_cleanup")
def backend_cleanup():
backend_cleanup.backend.cleanup()
class PingTask(Task): # ✞
name = "celery.ping"
def run(self, **kwargs):
return "pong"
def ping(): # ✞
"""Deprecated and scheduled for removal in Celery 2.3.
Please use :meth:`celery.task.control.ping` instead.
"""
warnings.warn(CDeprecationWarning(
"The ping task has been deprecated and will be removed in Celery "
"v2.3. Please use inspect.ping instead."))
return PingTask.apply_async().get()
|
chrismeyersfsu/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/illumos/ipadm_if.py
|
48
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ipadm_if
short_description: Manage IP interfaces on Solaris/illumos systems.
description:
- Create, delete, enable or disable IP interfaces on Solaris/illumos
systems.
version_added: "2.2"
author: Adam Števko (@xen0l)
options:
name:
description:
- IP interface name.
required: true
temporary:
description:
- Specifies that the IP interface is temporary. Temporary IP
interfaces do not persist across reboots.
required: false
default: false
choices: [ "true", "false" ]
state:
description:
- Create or delete Solaris/illumos IP interfaces.
required: false
default: "present"
choices: [ "present", "absent", "enabled", "disabled" ]
'''
EXAMPLES = '''
# Create vnic0 interface
- ipadm_if:
name: vnic0
state: enabled
# Disable vnic0 interface
- ipadm_if:
name: vnic0
state: disabled
'''
RETURN = '''
name:
description: IP interface name
returned: always
type: string
sample: "vnic0"
state:
description: state of the target
returned: always
type: string
sample: "present"
temporary:
description: persistence of a IP interface
returned: always
type: boolean
sample: "True"
'''
class IPInterface(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.temporary = module.params['temporary']
self.state = module.params['state']
def interface_exists(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('show-if')
cmd.append(self.name)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def interface_is_disabled(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('show-if')
cmd.append('-o')
cmd.append('state')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(name=self.name, rc=rc, msg=err)
return 'disabled' in out
def create_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('create-if')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def delete_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('delete-if')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def enable_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('enable-if')
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def disable_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('disable-if')
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
temporary=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent',
'present',
'enabled',
'disabled']),
),
supports_check_mode=True
)
interface = IPInterface(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = interface.name
result['state'] = interface.state
result['temporary'] = interface.temporary
if interface.state == 'absent':
if interface.interface_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = interface.delete_interface()
if rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'present':
if not interface.interface_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = interface.create_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'enabled':
if interface.interface_is_disabled():
(rc, out, err) = interface.enable_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'disabled':
if not interface.interface_is_disabled():
(rc, out, err) = interface.disable_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
heiths/allura
|
refs/heads/master
|
Allura/allura/ext/project_home/__init__.py
|
5
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .project_main import ProjectHomeApp
|
rrrene/django
|
refs/heads/master
|
django/db/backends/utils.py
|
430
|
from __future__ import unicode_literals
import datetime
import decimal
import hashlib
import logging
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
for item in self.cursor:
yield item
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior. Catch errors liberally because errors in cleanup
# code aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super(CursorDebugWrapper, self).execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries_log.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super(CursorDebugWrapper, self).executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries_log.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int(float('.' + microseconds) * 1000000))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hsh = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
return '%s%s' % (name[:length - hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(decimal.Decimal(".1") ** decimal_places, context=context)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
if decimal_places is not None:
return "%.*f" % (decimal_places, value)
return "{:f}".format(value)
|
bitmazk/django-flipbook
|
refs/heads/master
|
flipbook/migrations/__init__.py
|
12133432
| |
evanma92/routeh
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py
|
12133432
| |
ksrajkumar/openerp-6.1
|
refs/heads/master
|
openerp/addons/mrp_repair/mrp_repair.py
|
9
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields,osv
import netsvc
from datetime import datetime
from dateutil.relativedelta import relativedelta
from tools.translate import _
import decimal_precision as dp
class mrp_repair(osv.osv):
_name = 'mrp.repair'
_description = 'Repair Order'
def _amount_untaxed(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates untaxed amount.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param field_name: Name of field.
@param arg: Argument
@param context: A standard dictionary for contextual values
@return: Dictionary of values.
"""
res = {}
cur_obj = self.pool.get('res.currency')
for repair in self.browse(cr, uid, ids, context=context):
res[repair.id] = 0.0
for line in repair.operations:
res[repair.id] += line.price_subtotal
for line in repair.fees_lines:
res[repair.id] += line.price_subtotal
cur = repair.pricelist_id.currency_id
res[repair.id] = cur_obj.round(cr, uid, cur, res[repair.id])
return res
def _amount_tax(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates taxed amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
#return {}.fromkeys(ids, 0)
cur_obj = self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for repair in self.browse(cr, uid, ids, context=context):
val = 0.0
cur = repair.pricelist_id.currency_id
for line in repair.operations:
#manage prices with tax included use compute_all instead of compute
if line.to_invoice:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, repair.partner_invoice_id.id, line.product_id, repair.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
for line in repair.fees_lines:
if line.to_invoice:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, repair.partner_invoice_id.id, line.product_id, repair.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
res[repair.id] = cur_obj.round(cr, uid, cur, val)
return res
def _amount_total(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates total amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
untax = self._amount_untaxed(cr, uid, ids, field_name, arg, context=context)
tax = self._amount_tax(cr, uid, ids, field_name, arg, context=context)
cur_obj = self.pool.get('res.currency')
for id in ids:
repair = self.browse(cr, uid, id, context=context)
cur = repair.pricelist_id.currency_id
res[id] = cur_obj.round(cr, uid, cur, untax.get(id, 0.0) + tax.get(id, 0.0))
return res
def _get_default_address(self, cr, uid, ids, field_name, arg, context=None):
res = {}
partner_obj = self.pool.get('res.partner')
for data in self.browse(cr, uid, ids, context=context):
adr_id = False
if data.partner_id:
adr_id = partner_obj.address_get(cr, uid, [data.partner_id.id], ['default'])['default']
res[data.id] = adr_id
return res
def _get_lines(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('mrp.repair.line').browse(cr, uid, ids, context=context):
result[line.repair_id.id] = True
return result.keys()
_columns = {
'name': fields.char('Repair Reference',size=24, required=True),
'product_id': fields.many2one('product.product', string='Product to Repair', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'partner_id' : fields.many2one('res.partner', 'Partner', select=True, help='This field allow you to choose the parner that will be invoiced and delivered'),
'address_id': fields.many2one('res.partner.address', 'Delivery Address', domain="[('partner_id','=',partner_id)]"),
'default_address_id': fields.function(_get_default_address, type="many2one", relation="res.partner.address"),
'prodlot_id': fields.many2one('stock.production.lot', 'Lot Number', select=True, domain="[('product_id','=',product_id)]"),
'state': fields.selection([
('draft','Quotation'),
('confirmed','Confirmed'),
('ready','Ready to Repair'),
('under_repair','Under Repair'),
('2binvoiced','To be Invoiced'),
('invoice_except','Invoice Exception'),
('done','Done'),
('cancel','Cancel')
], 'State', readonly=True,
help=' * The \'Draft\' state is used when a user is encoding a new and unconfirmed repair order. \
\n* The \'Confirmed\' state is used when a user confirms the repair order. \
\n* The \'Ready to Repair\' state is used to start to repairing, user can start repairing only after repair order is confirmed. \
\n* The \'To be Invoiced\' state is used to generate the invoice before or after repairing done. \
\n* The \'Done\' state is set when repairing is completed.\
\n* The \'Cancelled\' state is used when user cancel repair order.'),
'location_id': fields.many2one('stock.location', 'Current Location', select=True, readonly=True, states={'draft':[('readonly',False)]}),
'location_dest_id': fields.many2one('stock.location', 'Delivery Location', readonly=True, states={'draft':[('readonly',False)]}),
'move_id': fields.many2one('stock.move', 'Move',required=True, domain="[('product_id','=',product_id)]", readonly=True, states={'draft':[('readonly',False)]}),
'guarantee_limit': fields.date('Guarantee limit', help="The guarantee limit is computed as: last move date + warranty defined on selected product. If the current date is below the guarantee limit, each operation and fee you will add will be set as 'not to invoiced' by default. Note that you can change manually afterwards."),
'operations' : fields.one2many('mrp.repair.line', 'repair_id', 'Operation Lines', readonly=True, states={'draft':[('readonly',False)]}),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', help='The pricelist comes from the selected partner, by default.'),
'partner_invoice_id':fields.many2one('res.partner.address', 'Invoicing Address', domain="[('partner_id','=',partner_id)]"),
'invoice_method':fields.selection([
("none","No Invoice"),
("b4repair","Before Repair"),
("after_repair","After Repair")
], "Invoice Method",
select=True, required=True, states={'draft':[('readonly',False)]}, readonly=True, help='This field allow you to change the workflow of the repair order. If value selected is different from \'No Invoice\', it also allow you to select the pricelist and invoicing address.'),
'invoice_id': fields.many2one('account.invoice', 'Invoice', readonly=True),
'picking_id': fields.many2one('stock.picking', 'Picking',readonly=True),
'fees_lines': fields.one2many('mrp.repair.fee', 'repair_id', 'Fees Lines', readonly=True, states={'draft':[('readonly',False)]}),
'internal_notes': fields.text('Internal Notes'),
'quotation_notes': fields.text('Quotation Notes'),
'company_id': fields.many2one('res.company', 'Company'),
'deliver_bool': fields.boolean('Deliver', help="Check this box if you want to manage the delivery once the product is repaired. If cheked, it will create a picking with selected product. Note that you can select the locations in the Info tab, if you have the extended view."),
'invoiced': fields.boolean('Invoiced', readonly=True),
'repaired': fields.boolean('Repaired', readonly=True),
'amount_untaxed': fields.function(_amount_untaxed, string='Untaxed Amount',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_tax': fields.function(_amount_tax, string='Taxes',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_total': fields.function(_amount_total, string='Total',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
}
_defaults = {
'state': lambda *a: 'draft',
'deliver_bool': lambda *a: True,
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'mrp.repair'),
'invoice_method': lambda *a: 'none',
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.repair', context=context),
'pricelist_id': lambda self, cr, uid,context : self.pool.get('product.pricelist').search(cr, uid, [('type','=','sale')])[0]
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'repaired':False,
'invoiced':False,
'invoice_id': False,
'picking_id': False,
'name': self.pool.get('ir.sequence').get(cr, uid, 'mrp.repair'),
})
return super(mrp_repair, self).copy(cr, uid, id, default, context)
def onchange_product_id(self, cr, uid, ids, product_id=None):
""" On change of product sets some values.
@param product_id: Changed product
@return: Dictionary of values.
"""
return {'value': {
'prodlot_id': False,
'move_id': False,
'guarantee_limit' :False,
'location_id': False,
'location_dest_id': False,
}
}
def onchange_move_id(self, cr, uid, ids, prod_id=False, move_id=False):
""" On change of move id sets values of guarantee limit, source location,
destination location, partner and partner address.
@param prod_id: Id of product in current record.
@param move_id: Changed move.
@return: Dictionary of values.
"""
data = {}
data['value'] = {}
if not prod_id:
return data
if move_id:
move = self.pool.get('stock.move').browse(cr, uid, move_id)
product = self.pool.get('product.product').browse(cr, uid, prod_id)
limit = datetime.strptime(move.date_expected, '%Y-%m-%d %H:%M:%S') + relativedelta(months=int(product.warranty))
data['value']['guarantee_limit'] = limit.strftime('%Y-%m-%d')
data['value']['location_id'] = move.location_dest_id.id
data['value']['location_dest_id'] = move.location_dest_id.id
if move.address_id:
data['value']['partner_id'] = move.address_id.partner_id and move.address_id.partner_id.id
else:
data['value']['partner_id'] = False
data['value']['address_id'] = move.address_id and move.address_id.id
d = self.onchange_partner_id(cr, uid, ids, data['value']['partner_id'], data['value']['address_id'])
data['value'].update(d['value'])
return data
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_partner_id(self, cr, uid, ids, part, address_id):
""" On change of partner sets the values of partner address,
partner invoice address and pricelist.
@param part: Changed id of partner.
@param address_id: Address id from current record.
@return: Dictionary of values.
"""
part_obj = self.pool.get('res.partner')
pricelist_obj = self.pool.get('product.pricelist')
if not part:
return {'value': {
'address_id': False,
'partner_invoice_id': False,
'pricelist_id': pricelist_obj.search(cr, uid, [('type','=','sale')])[0]
}
}
addr = part_obj.address_get(cr, uid, [part], ['delivery', 'invoice', 'default'])
partner = part_obj.browse(cr, uid, part)
pricelist = partner.property_product_pricelist and partner.property_product_pricelist.id or False
return {'value': {
'address_id': address_id or addr['delivery'],
'partner_invoice_id': addr['invoice'],
'pricelist_id': pricelist
}
}
def onchange_lot_id(self, cr, uid, ids, lot, product_id):
""" On change of production lot sets the values of source location,
destination location, move and guarantee limit.
@param lot: Changed id of production lot.
@param product_id: Product id from current record.
@return: Dictionary of values.
"""
move_obj = self.pool.get('stock.move')
data = {}
data['value'] = {
'location_id': False,
'location_dest_id': False,
'move_id': False,
'guarantee_limit': False
}
if not lot:
return data
move_ids = move_obj.search(cr, uid, [('prodlot_id', '=', lot)])
if not len(move_ids):
return data
def get_last_move(lst_move):
while lst_move.move_dest_id and lst_move.move_dest_id.state == 'done':
lst_move = lst_move.move_dest_id
return lst_move
move_id = move_ids[0]
move = get_last_move(move_obj.browse(cr, uid, move_id))
data['value']['move_id'] = move.id
d = self.onchange_move_id(cr, uid, ids, product_id, move.id)
data['value'].update(d['value'])
return data
def action_cancel_draft(self, cr, uid, ids, *args):
""" Cancels repair order when it is in 'Draft' state.
@param *arg: Arguments
@return: True
"""
if not len(ids):
return False
mrp_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids):
mrp_line_obj.write(cr, uid, [l.id for l in repair.operations], {'state': 'draft'})
self.write(cr, uid, ids, {'state':'draft'})
wf_service = netsvc.LocalService("workflow")
for id in ids:
wf_service.trg_create(uid, 'mrp.repair', id, cr)
return True
def action_confirm(self, cr, uid, ids, *args):
""" Repair order state is set to 'To be invoiced' when invoice method
is 'Before repair' else state becomes 'Confirmed'.
@param *arg: Arguments
@return: True
"""
mrp_line_obj = self.pool.get('mrp.repair.line')
for o in self.browse(cr, uid, ids):
if (o.invoice_method == 'b4repair'):
self.write(cr, uid, [o.id], {'state': '2binvoiced'})
else:
self.write(cr, uid, [o.id], {'state': 'confirmed'})
if not o.operations:
raise osv.except_osv(_('Error !'),_('You cannot confirm a repair order which has no line.'))
for line in o.operations:
if line.product_id.track_production and not line.prodlot_id:
raise osv.except_osv(_('Warning'), _("Production lot is required for opration line with product '%s'") % (line.product_id.name))
mrp_line_obj.write(cr, uid, [l.id for l in o.operations], {'state': 'confirmed'})
return True
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels repair order.
@return: True
"""
mrp_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
mrp_line_obj.write(cr, uid, [l.id for l in repair.operations], {'state': 'cancel'}, context=context)
self.write(cr,uid,ids,{'state':'cancel'})
return True
def wkf_invoice_create(self, cr, uid, ids, *args):
return self.action_invoice_create(cr, uid, ids)
def action_invoice_create(self, cr, uid, ids, group=False, context=None):
""" Creates invoice(s) for repair order.
@param group: It is set to true when group invoice is to be generated.
@return: Invoice Ids.
"""
res = {}
invoices_group = {}
inv_line_obj = self.pool.get('account.invoice.line')
inv_obj = self.pool.get('account.invoice')
repair_line_obj = self.pool.get('mrp.repair.line')
repair_fee_obj = self.pool.get('mrp.repair.fee')
for repair in self.browse(cr, uid, ids, context=context):
res[repair.id] = False
if repair.state in ('draft','cancel') or repair.invoice_id:
continue
if not (repair.partner_id.id and repair.partner_invoice_id.id):
raise osv.except_osv(_('No partner !'),_('You have to select a Partner Invoice Address in the repair form !'))
comment = repair.quotation_notes
if (repair.invoice_method != 'none'):
if group and repair.partner_invoice_id.id in invoices_group:
inv_id = invoices_group[repair.partner_invoice_id.id]
invoice = inv_obj.browse(cr, uid, inv_id)
invoice_vals = {
'name': invoice.name +', '+repair.name,
'origin': invoice.origin+', '+repair.name,
'comment':(comment and (invoice.comment and invoice.comment+"\n"+comment or comment)) or (invoice.comment and invoice.comment or ''),
}
inv_obj.write(cr, uid, [inv_id], invoice_vals, context=context)
else:
if not repair.partner_id.property_account_receivable:
raise osv.except_osv(_('Error !'), _('No account defined for partner "%s".') % repair.partner_id.name )
account_id = repair.partner_id.property_account_receivable.id
inv = {
'name': repair.name,
'origin':repair.name,
'type': 'out_invoice',
'account_id': account_id,
'partner_id': repair.partner_id.id,
'address_invoice_id': repair.address_id.id,
'currency_id': repair.pricelist_id.currency_id.id,
'comment': repair.quotation_notes,
'fiscal_position': repair.partner_id.property_account_position.id
}
inv_id = inv_obj.create(cr, uid, inv)
invoices_group[repair.partner_invoice_id.id] = inv_id
self.write(cr, uid, repair.id, {'invoiced': True, 'invoice_id': inv_id})
for operation in repair.operations:
if operation.to_invoice == True:
if group:
name = repair.name + '-' + operation.name
else:
name = operation.name
if operation.product_id.property_account_income:
account_id = operation.product_id.property_account_income.id
elif operation.product_id.categ_id.property_account_income_categ:
account_id = operation.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error !'), _('No account defined for product "%s".') % operation.product_id.name )
invoice_line_id = inv_line_obj.create(cr, uid, {
'invoice_id': inv_id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': operation.product_uom_qty,
'invoice_line_tax_id': [(6,0,[x.id for x in operation.tax_id])],
'uos_id': operation.product_uom.id,
'price_unit': operation.price_unit,
'price_subtotal': operation.product_uom_qty*operation.price_unit,
'product_id': operation.product_id and operation.product_id.id or False
})
repair_line_obj.write(cr, uid, [operation.id], {'invoiced': True, 'invoice_line_id': invoice_line_id})
for fee in repair.fees_lines:
if fee.to_invoice == True:
if group:
name = repair.name + '-' + fee.name
else:
name = fee.name
if not fee.product_id:
raise osv.except_osv(_('Warning !'), _('No product defined on Fees!'))
if fee.product_id.property_account_income:
account_id = fee.product_id.property_account_income.id
elif fee.product_id.categ_id.property_account_income_categ:
account_id = fee.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error !'), _('No account defined for product "%s".') % fee.product_id.name)
invoice_fee_id = inv_line_obj.create(cr, uid, {
'invoice_id': inv_id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': fee.product_uom_qty,
'invoice_line_tax_id': [(6,0,[x.id for x in fee.tax_id])],
'uos_id': fee.product_uom.id,
'product_id': fee.product_id and fee.product_id.id or False,
'price_unit': fee.price_unit,
'price_subtotal': fee.product_uom_qty*fee.price_unit
})
repair_fee_obj.write(cr, uid, [fee.id], {'invoiced': True, 'invoice_line_id': invoice_fee_id})
res[repair.id] = inv_id
return res
def action_repair_ready(self, cr, uid, ids, context=None):
""" Writes repair order state to 'Ready'
@return: True
"""
for repair in self.browse(cr, uid, ids, context=context):
self.pool.get('mrp.repair.line').write(cr, uid, [l.id for
l in repair.operations], {'state': 'confirmed'}, context=context)
self.write(cr, uid, [repair.id], {'state': 'ready'})
return True
def action_repair_start(self, cr, uid, ids, context=None):
""" Writes repair order state to 'Under Repair'
@return: True
"""
repair_line = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
repair_line.write(cr, uid, [l.id for
l in repair.operations], {'state': 'confirmed'}, context=context)
repair.write({'state': 'under_repair'})
return True
def action_repair_end(self, cr, uid, ids, context=None):
""" Writes repair order state to 'To be invoiced' if invoice method is
After repair else state is set to 'Ready'.
@return: True
"""
for order in self.browse(cr, uid, ids, context=context):
val = {}
val['repaired'] = True
if (not order.invoiced and order.invoice_method=='after_repair'):
val['state'] = '2binvoiced'
elif (not order.invoiced and order.invoice_method=='b4repair'):
val['state'] = 'ready'
else:
pass
self.write(cr, uid, [order.id], val)
return True
def wkf_repair_done(self, cr, uid, ids, *args):
self.action_repair_done(cr, uid, ids)
return True
def action_repair_done(self, cr, uid, ids, context=None):
""" Creates stock move and picking for repair order.
@return: Picking ids.
"""
res = {}
move_obj = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
repair_line_obj = self.pool.get('mrp.repair.line')
seq_obj = self.pool.get('ir.sequence')
pick_obj = self.pool.get('stock.picking')
for repair in self.browse(cr, uid, ids, context=context):
for move in repair.operations:
move_id = move_obj.create(cr, uid, {
'name': move.name,
'product_id': move.product_id.id,
'product_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'address_id': repair.address_id and repair.address_id.id or False,
'location_id': move.location_id.id,
'location_dest_id': move.location_dest_id.id,
'tracking_id': False,
'prodlot_id': move.prodlot_id and move.prodlot_id.id or False,
'state': 'done',
})
repair_line_obj.write(cr, uid, [move.id], {'move_id': move_id, 'state': 'done'}, context=context)
if repair.deliver_bool:
pick_name = seq_obj.get(cr, uid, 'stock.picking.out')
picking = pick_obj.create(cr, uid, {
'name': pick_name,
'origin': repair.name,
'state': 'draft',
'move_type': 'one',
'address_id': repair.address_id and repair.address_id.id or False,
'note': repair.internal_notes,
'invoice_state': 'none',
'type': 'out',
})
move_id = move_obj.create(cr, uid, {
'name': repair.name,
'picking_id': picking,
'product_id': repair.product_id.id,
'product_qty': move.product_uom_qty or 1.0,
'product_uom': repair.product_id.uom_id.id,
'prodlot_id': repair.prodlot_id and repair.prodlot_id.id or False,
'address_id': repair.address_id and repair.address_id.id or False,
'location_id': repair.location_id.id,
'location_dest_id': repair.location_dest_id.id,
'tracking_id': False,
'state': 'assigned',
})
wf_service.trg_validate(uid, 'stock.picking', picking, 'button_confirm', cr)
self.write(cr, uid, [repair.id], {'state': 'done', 'picking_id': picking})
res[repair.id] = picking
else:
self.write(cr, uid, [repair.id], {'state': 'done'})
return res
mrp_repair()
class ProductChangeMixin(object):
def product_id_change(self, cr, uid, ids, pricelist, product, uom=False,
product_uom_qty=0, partner_id=False, guarantee_limit=False):
""" On change of product it sets product quantity, tax account, name,
uom of product, unit price and price subtotal.
@param pricelist: Pricelist of current record.
@param product: Changed id of product.
@param uom: UoM of current record.
@param product_uom_qty: Quantity of current record.
@param partner_id: Partner of current record.
@param guarantee_limit: Guarantee limit of current record.
@return: Dictionary of values and warning message.
"""
result = {}
warning = {}
if not product_uom_qty:
product_uom_qty = 1
result['product_uom_qty'] = product_uom_qty
if product:
product_obj = self.pool.get('product.product').browse(cr, uid, product)
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id)
result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, partner.property_account_position, product_obj.taxes_id)
result['name'] = product_obj.partner_ref
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id or False
if not pricelist:
warning = {
'title':'No Pricelist !',
'message':
'You have to select a pricelist in the Repair form !\n'
'Please set one before choosing a product.'
}
else:
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product, product_uom_qty, partner_id, {'uom': uom,})[pricelist]
if price is False:
warning = {
'title':'No valid pricelist line found !',
'message':
"Couldn't find a pricelist line matching this product and quantity.\n"
"You have to change either the product, the quantity or the pricelist."
}
else:
result.update({'price_unit': price, 'price_subtotal': price*product_uom_qty})
return {'value': result, 'warning': warning}
class mrp_repair_line(osv.osv, ProductChangeMixin):
_name = 'mrp.repair.line'
_description = 'Repair Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default: default = {}
default.update( {'invoice_line_id': False, 'move_id': False, 'invoiced': False, 'state': 'draft'})
return super(mrp_repair_line, self).copy_data(cr, uid, id, default, context)
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
cur_obj=self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.to_invoice and line.price_unit * line.product_uom_qty or 0
cur = line.repair_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])
return res
_columns = {
'name' : fields.char('Description',size=64,required=True),
'repair_id': fields.many2one('mrp.repair', 'Repair Order Reference',ondelete='cascade', select=True),
'type': fields.selection([('add','Add'),('remove','Remove')],'Type', required=True),
'to_invoice': fields.boolean('To Invoice'),
'product_id': fields.many2one('product.product', 'Product', domain=[('sale_ok','=',True)], required=True),
'invoiced': fields.boolean('Invoiced',readonly=True),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Sale Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal',digits_compute= dp.get_precision('Sale Price')),
'tax_id': fields.many2many('account.tax', 'repair_operation_line_tax', 'repair_operation_line_id', 'tax_id', 'Taxes'),
'product_uom_qty': fields.float('Quantity (UoM)', digits=(16,2), required=True),
'product_uom': fields.many2one('product.uom', 'Product UoM', required=True),
'prodlot_id': fields.many2one('stock.production.lot', 'Lot Number',domain="[('product_id','=',product_id)]"),
'invoice_line_id': fields.many2one('account.invoice.line', 'Invoice Line', readonly=True),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True),
'location_dest_id': fields.many2one('stock.location', 'Dest. Location', required=True, select=True),
'move_id': fields.many2one('stock.move', 'Inventory Move', readonly=True),
'state': fields.selection([
('draft','Draft'),
('confirmed','Confirmed'),
('done','Done'),
('cancel','Canceled')], 'State', required=True, readonly=True,
help=' * The \'Draft\' state is set automatically as draft when repair order in draft state. \
\n* The \'Confirmed\' state is set automatically as confirm when repair order in confirm state. \
\n* The \'Done\' state is set automatically when repair order is completed.\
\n* The \'Cancelled\' state is set automatically when user cancel repair order.'),
}
_defaults = {
'state': lambda *a: 'draft',
'product_uom_qty': lambda *a: 1,
}
def onchange_operation_type(self, cr, uid, ids, type, guarantee_limit, company_id=False, context=None):
""" On change of operation type it sets source location, destination location
and to invoice field.
@param product: Changed operation type.
@param guarantee_limit: Guarantee limit of current record.
@return: Dictionary of values.
"""
if not type:
return {'value': {
'location_id': False,
'location_dest_id': False
}}
warehouse_obj = self.pool.get('stock.warehouse')
location_id = self.pool.get('stock.location').search(cr, uid, [('usage','=','production')], context=context)
location_id = location_id and location_id[0] or False
if type == 'add':
# TOCHECK: Find stock location for user's company warehouse or
# repair order's company's warehouse (company_id field is added in fix of lp:831583)
args = company_id and [('company_id', '=', company_id)] or []
warehouse_ids = warehouse_obj.search(cr, uid, args, context=context)
stock_id = False
if warehouse_ids:
stock_id = warehouse_obj.browse(cr, uid, warehouse_ids[0], context=context).lot_stock_id.id
to_invoice = (guarantee_limit and datetime.strptime(guarantee_limit, '%Y-%m-%d') < datetime.now())
return {'value': {
'to_invoice': to_invoice,
'location_id': stock_id,
'location_dest_id': location_id
}}
return {'value': {
'to_invoice': False,
'location_id': location_id,
'location_dest_id': False
}}
mrp_repair_line()
class mrp_repair_fee(osv.osv, ProductChangeMixin):
_name = 'mrp.repair.fee'
_description = 'Repair Fees Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default: default = {}
default.update({'invoice_line_id': False, 'invoiced': False})
return super(mrp_repair_fee, self).copy_data(cr, uid, id, default, context)
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.to_invoice and line.price_unit * line.product_uom_qty or 0
cur = line.repair_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])
return res
_columns = {
'repair_id': fields.many2one('mrp.repair', 'Repair Order Reference', required=True, ondelete='cascade', select=True),
'name': fields.char('Description', size=64, select=True,required=True),
'product_id': fields.many2one('product.product', 'Product'),
'product_uom_qty': fields.float('Quantity', digits=(16,2), required=True),
'price_unit': fields.float('Unit Price', required=True),
'product_uom': fields.many2one('product.uom', 'Product UoM', required=True),
'price_subtotal': fields.function(_amount_line, string='Subtotal',digits_compute= dp.get_precision('Sale Price')),
'tax_id': fields.many2many('account.tax', 'repair_fee_line_tax', 'repair_fee_line_id', 'tax_id', 'Taxes'),
'invoice_line_id': fields.many2one('account.invoice.line', 'Invoice Line', readonly=True),
'to_invoice': fields.boolean('To Invoice'),
'invoiced': fields.boolean('Invoiced',readonly=True),
}
_defaults = {
'to_invoice': lambda *a: True,
}
mrp_repair_fee()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
saydulk/horizon
|
refs/heads/master
|
horizon/test/test_dashboards/cats/tigers/urls.py
|
67
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from horizon.test.test_dashboards.cats.tigers.views import IndexView # noqa
urlpatterns = patterns(
'',
url(r'^$', IndexView.as_view(), name='index'),
)
|
grandmasterchef/WhatManager2
|
refs/heads/master
|
bibliotik/trans_sync.py
|
3
|
import os
import os.path
from django.db import transaction
from bibliotik import manage_bibliotik
from bibliotik.models import BibliotikTransTorrent, BibliotikTorrent
from home.models import LogEntry, DownloadLocation
def sync_instance_db(instance):
b_torrents = instance.get_b_torrents_by_hash()
t_torrents = instance.get_t_torrents_by_hash(BibliotikTransTorrent.sync_t_arguments)
for c_hash, b_torrent in b_torrents.items():
if c_hash not in t_torrents:
b_torrent_path = b_torrent.path.encode('utf-8')
messages = []
with transaction.atomic():
b_torrent.delete()
del b_torrents[c_hash]
if instance.replica_set.is_master:
if os.path.exists(b_torrent_path):
files = os.listdir(b_torrent_path)
if len(files):
messages.append(u'There are other files so leaving in place.')
else:
messages.append(u'No other files. Deleting directory.')
os.rmdir(b_torrent_path)
else:
messages.append(u'Path does not exist.')
LogEntry.add(None, u'action',
u'Bibliotik torrent {0} deleted from instance {1}. {2}'
.format(b_torrent, instance, ' '.join(messages)))
with transaction.atomic():
for c_hash, t_torrent in t_torrents.items():
if c_hash not in b_torrents:
torrent_id = int(os.path.basename(t_torrent.downloadDir))
w_torrent = BibliotikTorrent.get_or_create(None, torrent_id)
d_location = DownloadLocation.get_by_full_path(t_torrent.downloadDir)
m_torrent = manage_bibliotik.add_bibliotik_torrent(w_torrent.id, instance,
d_location, None, False)
b_torrents[m_torrent.info_hash] = m_torrent
LogEntry.add(None, u'action',
u'Bibliotik torrent {0} appeared in instance {1}.'
.format(t_torrent.name, instance))
else:
b_torrent = b_torrents[c_hash]
b_torrent.sync_t_torrent(t_torrent)
def sync_all_instances_db(replica_set):
for instance in replica_set.transinstance_set.all():
sync_instance_db(instance)
def init_sync_instance_db(instance):
b_torrents = instance.get_b_torrents_by_hash()
t_torrents = instance.get_t_torrents_by_hash(BibliotikTransTorrent.sync_t_arguments)
with transaction.atomic():
for c_hash, t_torrent in t_torrents.items():
if c_hash not in b_torrents:
try:
bibliotik_torrent = BibliotikTorrent.objects.get(info_hash=c_hash)
d_location = DownloadLocation.get_by_full_path(t_torrent.downloadDir)
b_torrent = manage_bibliotik.add_bibliotik_torrent(
bibliotik_torrent.id,
instance,
d_location,
add_to_client=False
)
b_torrents[b_torrent.info_hash] = b_torrent
except BibliotikTorrent.DoesNotExist:
raise Exception(u'Could not find hash {0} for name {1} in '
u'DB during initial sync.'
.format(c_hash, t_torrent.name))
b_torrent = b_torrents[c_hash]
b_torrent.sync_t_torrent(t_torrent)
def init_sync_all_instances_db(replica_set):
for instance in replica_set.transinstance_set.all():
init_sync_instance_db(instance)
|
TathagataChakraborti/resource-conflicts
|
refs/heads/master
|
PLANROB-2015/seq-sat-lama/py2.5/lib/python2.5/pprint.py
|
92
|
# Author: Fred L. Drake, Jr.
# fdrake@acm.org
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import sys as _sys
from cStringIO import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
# cache these for faster access:
_commajoin = ", ".join
_id = id
_len = len
_type = type
def pprint(object, stream=None, indent=1, width=80, depth=None):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
level = level + 1
objid = _id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level - 1)
typ = _type(object)
sepLines = _len(rep) > (self._width - 1 - indent - allowance)
write = stream.write
if sepLines:
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
items = object.items()
items.sort()
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
write(',\n%s%s: ' % (' '*indent, rep))
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
write('}')
return
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
write('[')
endchar = ']'
else:
write('(')
endchar = ')'
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
self._format(object[0], stream, indent, allowance + 1,
context, level)
if length > 1:
for ent in object[1:]:
write(',\n' + ' '*indent)
self._format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
if issubclass(typ, tuple) and length == 1:
write(',')
write(endchar)
return
write(rep)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = _type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = _id(object)
if maxlevels and level > maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in sorted(object.items()):
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % _commajoin(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = _id(object)
if maxlevels and level > maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % _commajoin(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (_type(object).__name__, _id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print "_safe_repr:", t2 - t1
print "pformat:", t3 - t2
if __name__ == "__main__":
_perfcheck()
|
ajdawson/iris
|
refs/heads/master
|
lib/iris/tests/test_intersect.py
|
17
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test the intersection of Coords
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import numpy as np
import iris
import iris.cube
import iris.coord_systems
import iris.coords
import iris.tests.stock
class TestCubeIntersectTheoretical(tests.IrisTest):
def test_simple_intersect(self):
cube = iris.cube.Cube(np.array([[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8],
[5, 6, 7, 8, 9]], dtype=np.int32))
lonlat_cs = iris.coord_systems.RotatedGeogCS(10, 20)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(5, dtype=np.float32) * 90 - 180, 'longitude', units='degrees', coord_system=lonlat_cs), 1)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(5, dtype=np.float32) * 45 - 90, 'latitude', units='degrees', coord_system=lonlat_cs), 0)
cube.add_aux_coord(iris.coords.DimCoord(points=np.int32(11), long_name='pressure', units='Pa'))
cube.rename("temperature")
cube.units = "K"
cube2 = iris.cube.Cube(np.array([[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8],
[5, 6, 7, 8, 50]], dtype=np.int32))
lonlat_cs = iris.coord_systems.RotatedGeogCS(10, 20)
cube2.add_dim_coord(iris.coords.DimCoord(np.arange(5, dtype=np.float32) * 90, 'longitude', units='degrees', coord_system=lonlat_cs), 1)
cube2.add_dim_coord(iris.coords.DimCoord(np.arange(5, dtype=np.float32) * 45 - 90, 'latitude', units='degrees', coord_system=lonlat_cs), 0)
cube2.add_aux_coord(iris.coords.DimCoord(points=np.int32(11), long_name='pressure', units='Pa'))
cube2.rename("")
r = iris.analysis.maths.intersection_of_cubes(cube, cube2)
self.assertCML(r, ('cdm', 'test_simple_cube_intersection.cml'))
class TestCoordIntersect(tests.IrisTest):
def test_commutative(self):
step = 4.0
c1 = iris.coords.DimCoord(np.arange(100) * step)
offset_points = c1.points.copy()
offset_points -= step * 30
c2 = c1.copy(points=offset_points)
i1 = c1.intersect(c2)
i2 = c2.intersect(c1)
self.assertEqual(i1, i2)
if __name__ == "__main__":
tests.main()
|
ojengwa/sympy
|
refs/heads/master
|
sympy/polys/tests/test_rationaltools.py
|
124
|
"""Tests for tools for manipulation of rational expressions. """
from sympy.polys.rationaltools import together
from sympy import S, symbols, Rational, sin, exp, Eq, Integral, Mul
from sympy.abc import x, y, z
A, B = symbols('A,B', commutative=False)
def test_together():
assert together(0) == 0
assert together(1) == 1
assert together(x*y*z) == x*y*z
assert together(x + y) == x + y
assert together(1/x) == 1/x
assert together(1/x + 1) == (x + 1)/x
assert together(1/x + 3) == (3*x + 1)/x
assert together(1/x + x) == (x**2 + 1)/x
assert together(1/x + Rational(1, 2)) == (x + 2)/(2*x)
assert together(Rational(1, 2) + x/2) == Mul(S.Half, x + 1, evaluate=False)
assert together(1/x + 2/y) == (2*x + y)/(y*x)
assert together(1/(1 + 1/x)) == x/(1 + x)
assert together(x/(1 + 1/x)) == x**2/(1 + x)
assert together(1/x + 1/y + 1/z) == (x*y + x*z + y*z)/(x*y*z)
assert together(1/(1 + x + 1/y + 1/z)) == y*z/(y + z + y*z + x*y*z)
assert together(1/(x*y) + 1/(x*y)**2) == y**(-2)*x**(-2)*(1 + x*y)
assert together(1/(x*y) + 1/(x*y)**4) == y**(-4)*x**(-4)*(1 + x**3*y**3)
assert together(1/(x**7*y) + 1/(x*y)**4) == y**(-4)*x**(-7)*(x**3 + y**3)
assert together(5/(2 + 6/(3 + 7/(4 + 8/(5 + 9/x))))) == \
(S(5)/2)*((171 + 119*x)/(279 + 203*x))
assert together(1 + 1/(x + 1)**2) == (1 + (x + 1)**2)/(x + 1)**2
assert together(1 + 1/(x*(1 + x))) == (1 + x*(1 + x))/(x*(1 + x))
assert together(
1/(x*(x + 1)) + 1/(x*(x + 2))) == (3 + 2*x)/(x*(1 + x)*(2 + x))
assert together(1 + 1/(2*x + 2)**2) == (4*(x + 1)**2 + 1)/(4*(x + 1)**2)
assert together(sin(1/x + 1/y)) == sin(1/x + 1/y)
assert together(sin(1/x + 1/y), deep=True) == sin((x + y)/(x*y))
assert together(1/exp(x) + 1/(x*exp(x))) == (1 + x)/(x*exp(x))
assert together(1/exp(2*x) + 1/(x*exp(3*x))) == (1 + exp(x)*x)/(x*exp(3*x))
assert together(Integral(1/x + 1/y, x)) == Integral((x + y)/(x*y), x)
assert together(Eq(1/x + 1/y, 1 + 1/z)) == Eq((x + y)/(x*y), (z + 1)/z)
assert together((A*B)**-1 + (B*A)**-1) == (A*B)**-1 + (B*A)**-1
|
hynekcer/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/lookuperror_a/migrations/__init__.py
|
12133432
| |
zacps/zulip
|
refs/heads/master
|
zerver/lib/__init__.py
|
12133432
| |
edx/edx-enterprise
|
refs/heads/master
|
consent/migrations/__init__.py
|
12133432
| |
ixc/django-reversion
|
refs/heads/master
|
src/tests/test_reversion/migrations/__init__.py
|
12133432
| |
francois-contat/scapy
|
refs/heads/master
|
scapy/layers/gprs.py
|
6
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
GPRS (General Packet Radio Service) for mobile data communication.
"""
from scapy.fields import StrStopField
from scapy.packet import Packet, bind_layers
from scapy.layers.inet import IP
class GPRS(Packet):
name = "GPRSdummy"
fields_desc = [
StrStopField("dummy", "", b"\x65\x00\x00", 1)
]
bind_layers(GPRS, IP,)
|
ImaginaryLandscape/djangocms-blog
|
refs/heads/develop
|
djangocms_blog/migrations/0001_initial.py
|
12
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import cms.models.fields
import django.utils.timezone
import djangocms_text_ckeditor.fields
import filer.fields.image
import meta_mixin.models
import taggit_autosuggest.managers
from django.conf import settings
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cms', '__latest__'),
('taggit', '__latest__'),
('filer', '__latest__'),
('cmsplugin_filer_image', '__latest__'),
]
operations = [
migrations.CreateModel(
name='AuthorEntriesPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('latest_posts', models.IntegerField(default=5, help_text='The number of author articles to be displayed.', verbose_name='Articles')),
('authors', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='Authors')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='modified at')),
('parent', models.ForeignKey(verbose_name='parent', blank=True, to='djangocms_blog.BlogCategory', null=True)),
],
options={
'verbose_name': 'blog category',
'verbose_name_plural': 'blog categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BlogCategoryTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language', choices=settings.LANGUAGES)),
('name', models.CharField(max_length=255, verbose_name='name')),
('slug', models.SlugField(verbose_name='slug', blank=True)),
('master', models.ForeignKey(related_name='translations', editable=False, to='djangocms_blog.BlogCategory', null=True)),
],
options={
'db_table': 'djangocms_blog_blogcategory_translation',
'verbose_name': 'blog category Translation',
'default_permissions': (),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LatestPostsPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('latest_posts', models.IntegerField(default=5, help_text='The number of latests articles to be displayed.', verbose_name='Articles')),
('categories', models.ManyToManyField(help_text='Show only the blog articles tagged with chosen categories.', to='djangocms_blog.BlogCategory', blank=True)),
('tags', models.ManyToManyField(help_text='Show only the blog articles tagged with chosen tags.', to='taggit.Tag', blank=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('date_published', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Published Since')),
('date_published_end', models.DateTimeField(null=True, verbose_name='Published Until', blank=True)),
('publish', models.BooleanField(default=False, verbose_name='Publish')),
('enable_comments', models.BooleanField(default=True, verbose_name='Enable comments on post')),
('author', models.ForeignKey(related_name='djangocms_blog_post_author', verbose_name='Author', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('categories', models.ManyToManyField(related_name='blog_posts', verbose_name='category', to='djangocms_blog.BlogCategory')),
('content', cms.models.fields.PlaceholderField(slotname='post_content', editable=False, to='cms.Placeholder', null=True)),
('main_image', filer.fields.image.FilerImageField(related_name='djangocms_blog_post_image', verbose_name='Main image', blank=True, to='filer.Image', null=True)),
('main_image_full', models.ForeignKey(related_name='djangocms_blog_post_full', verbose_name='Main image full', blank=True, to='cmsplugin_filer_image.ThumbnailOption', null=True)),
('main_image_thumbnail', models.ForeignKey(related_name='djangocms_blog_post_thumbnail', verbose_name='Main image thumbnail', blank=True, to='cmsplugin_filer_image.ThumbnailOption', null=True)),
('tags', taggit_autosuggest.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')),
],
options={
'ordering': ('-date_published', '-date_created'),
'get_latest_by': 'date_published',
'verbose_name': 'blog article',
'verbose_name_plural': 'blog articles',
},
bases=(meta_mixin.models.ModelMeta, models.Model),
),
migrations.CreateModel(
name='PostTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language', choices=settings.LANGUAGES)),
('title', models.CharField(max_length=255, verbose_name='Title')),
('slug', models.SlugField(verbose_name='slug', blank=True)),
('abstract', djangocms_text_ckeditor.fields.HTMLField(verbose_name='Abstract')),
('meta_description', models.TextField(default='', verbose_name='Post meta description', blank=True)),
('meta_keywords', models.TextField(default='', verbose_name='Post meta keywords', blank=True)),
('meta_title', models.CharField(default='', help_text='used in title tag and social sharing', max_length=255, verbose_name='Post meta title', blank=True)),
('post_text', djangocms_text_ckeditor.fields.HTMLField(default='', verbose_name='Text', blank=True)),
('master', models.ForeignKey(related_name='translations', editable=False, to='djangocms_blog.Post', null=True)),
],
options={
'db_table': 'djangocms_blog_post_translation',
'verbose_name': 'blog article Translation',
'default_permissions': (),
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='posttranslation',
unique_together=set([('language_code', 'master'), ('language_code', 'slug')]),
),
migrations.AlterUniqueTogether(
name='blogcategorytranslation',
unique_together=set([('language_code', 'master'), ('language_code', 'slug')]),
),
]
|
levilucio/SyVOLT
|
refs/heads/master
|
UMLRT2Kiltera_MM/transformation/no_contains/HTransition2QInstOUT.py
|
1
|
from core.himesis import Himesis
import uuid
class HTransition2QInstOUT(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule Transition2QInstOUT.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HTransition2QInstOUT, self).__init__(name='HTransition2QInstOUT', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """Transition2QInstOUT"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Transition2QInstOUT')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """Transition2QInstOUT"""
# match class Transition() node
self.add_node()
self.vs[3]["mm__"] = """Transition"""
self.vs[3]["attr1"] = """+"""
# match class OUT2() node
self.add_node()
self.vs[4]["mm__"] = """OUT2"""
self.vs[4]["attr1"] = """1"""
# match class StateMachine() node
self.add_node()
self.vs[5]["mm__"] = """StateMachine"""
self.vs[5]["attr1"] = """1"""
# match class Vertex() node
self.add_node()
self.vs[6]["mm__"] = """Vertex"""
self.vs[6]["attr1"] = """1"""
# apply class Inst() node
self.add_node()
self.vs[7]["mm__"] = """Inst"""
self.vs[7]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[8]["mm__"] = """Name"""
self.vs[8]["attr1"] = """1"""
# match association Transition--type-->OUT2 node
self.add_node()
self.vs[9]["attr1"] = """type"""
self.vs[9]["mm__"] = """directLink_S"""
# match association Transition--owningStateMachine-->StateMachine node
self.add_node()
self.vs[10]["attr1"] = """owningStateMachine"""
self.vs[10]["mm__"] = """directLink_S"""
# match association StateMachine--exitPoints-->Vertex node
self.add_node()
self.vs[11]["attr1"] = """exitPoints"""
self.vs[11]["mm__"] = """directLink_S"""
# match association Transition--dest-->Vertex node
self.add_node()
self.vs[12]["attr1"] = """dest"""
self.vs[12]["mm__"] = """directLink_S"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[13]["attr1"] = """channelNames"""
self.vs[13]["mm__"] = """directLink_T"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class Transition()
(0,4), # matchmodel -> match_class OUT2()
(0,5), # matchmodel -> match_class StateMachine()
(0,6), # matchmodel -> match_class Vertex()
(1,7), # applymodel -> -> apply_class Inst()
(1,8), # applymodel -> -> apply_class Name()
(3,9), # match_class Transition() -> association type
(9,4), # association type -> match_class OUT2()
(3,10), # match_class Transition() -> association owningStateMachine
(10,5), # association owningStateMachine -> match_class StateMachine()
(5,11), # match_class StateMachine() -> association exitPoints
(11,6), # association exitPoints -> match_class Vertex()
(3,12), # match_class Transition() -> association dest
(12,6), # association dest -> match_class Vertex()
(7,13), # apply_class Inst() -> association channelNames
(13,8), # association channelNames -> apply_class Name()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((7,'name'),('concat',(('constant','B'),(6,'name')))), ((7,'__ApplyAttribute'),('constant','instfortrans')), ((8,'literal'),('constant','sh')), ]
|
ravna-livada/Bricknoid
|
refs/heads/master
|
Game/draw.py
|
1
|
import pygame
from constants import GameConstants
class Draw:
def __init__(self):
self.constants = GameConstants()
if pygame.display.get_caption():
self.screen = pygame.display.get_surface()
else:
self.screen = pygame.display.set_mode(self.constants.SCREEN_SIZE)
if pygame.font:
self.font = pygame.font.Font(None, 30)
else:
self.font = None
def print_on_screen(self, text, color, hpos, vpos=50):
font_surface = self.font.render(text, False, color)
self.screen.blit(font_surface, (hpos, vpos))
def menu(self):
if self.font:
self.screen.fill(self.constants.BLACK)
self.print_on_screen(
"New game/ continue(n)",
self.constants.WHITE,
self.constants.CENTER_COORDS[0] - 100,
self.constants.CENTER_COORDS[1] - 150)
self.print_on_screen(
"Instructions(i)",
self.constants.WHITE,
self.constants.CENTER_COORDS[0] - 100,
self.constants.CENTER_COORDS[1] - 100)
self.print_on_screen(
"High scores(h) - not ready",
self.constants.WHITE,
self.constants.CENTER_COORDS[0] - 100,
self.constants.CENTER_COORDS[1] - 50)
self.print_on_screen(
"Quit(q)",
self.constants.WHITE,
self.constants.CENTER_COORDS[0] - 100,
self.constants.CENTER_COORDS[1])
def instructions(self):
if self.font:
self.screen.fill(self.constants.BLACK)
self.print_on_screen(
"Press the left arrow to move the platform to the left.",
self.constants.WHITE,
self.constants.CENTER_COORDS[0] - 250,
self.constants.CENTER_COORDS[1] - 100)
self.print_on_screen(
"Press the right arrow to move the platform to the right.",
self.constants.WHITE,
self.constants.CENTER_COORDS[0] - 250,
self.constants.CENTER_COORDS[1] - 50)
self.print_on_screen(
"Press 'Esc' during play time to go to main menu.",
self.constants.WHITE,
self.constants.CENTER_COORDS[0] - 250,
self.constants.CENTER_COORDS[1])
self.print_on_screen(
"Press 'Esc' to go back.",
self.constants.WHITE,
self.constants.CENTER_COORDS[0] - 250,
self.constants.CENTER_COORDS[1] + 50)
def bricks(self, bricks):
for brick in bricks:
pygame.draw.rect(self.screen, self.constants.BRICK_COLOR, brick)
def show_message(self, msg):
if self.font:
size = self.font.size(msg)
font_surface = self.font.render(msg, False, self.constants.WHITE)
x = self.constants.CENTER_COORDS[0] - size[0] / 2
y = self.constants.CENTER_COORDS[1]
self.screen.blit(font_surface, (x, y))
def paddle(self, paddle):
pygame.draw.rect(
self.screen,
self.constants.BLUE,
paddle)
def ball(self, ball):
pygame.draw.circle(
self.screen,
self.constants.WHITE,
(
int(ball.left + self.constants.BALL_RADIUS),
int(ball.top + self.constants.BALL_RADIUS)
),
int(self.constants.BALL_RADIUS))
# def level(self):
|
pbs/django-hubbubs
|
refs/heads/master
|
hubbubs/tests/tests.py
|
2
|
from django.test import TestCase
class Test(TestCase):
def test_sample(self):
self.assertEqual(1+1, 2)
|
motion2015/a3
|
refs/heads/a3
|
common/djangoapps/terrain/steps.py
|
36
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
# pylint: disable=wildcard-import
# Disable the "Unused import %s from wildcard import" warning
# pylint: disable=unused-wildcard-import
# Disable the "unused argument" warning because lettuce uses "step"
# pylint: disable=unused-argument
# django_url is assigned late in the process of loading lettuce,
# so we import this as a module, and then read django_url from
# it to get the correct value
import lettuce.django
from lettuce import world, step
from .course_helpers import *
from .ui_helpers import *
from nose.tools import assert_equals # pylint: disable=no-name-in-module
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from logging import getLogger
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+\.?\d*)" seconds?$')
def wait_for_seconds(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.wait_for_ajax_complete()
world.browser.reload()
world.wait_for_js_to_load()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('section.container.dashboard')
assert 'Dashboard' in world.browser.title
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('section.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
if 'COURSE' in world.scenario_dict:
path = path.format(world.scenario_dict['COURSE'].id)
assert world.url_equals(path), (
"path should be {!r} but is {!r}".format(path, world.browser.url)
)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert(title in world.browser.title)
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
world.register_by_course_key(course_key, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], lettuce.django.django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
multiplier = 2
else:
multiplier = 1
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5 * multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5 * multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(lettuce.django.django_url('/'))
dash_css = 'section.container.dashboard'
assert world.is_css_present(dash_css)
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
if 'COURSE' in world.scenario_dict:
url = url.format(world.scenario_dict['COURSE'].id)
world.browser.visit(lettuce.django.django_url(url))
@step(u'wait for AJAX to (?:finish|complete)')
def wait_ajax(_step):
wait_for_ajax_complete()
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
@step('I run ipdb')
def run_ipdb(_step):
"""Run ipdb as step for easy debugging"""
import ipdb
ipdb.set_trace()
assert True
@step(u'(I am viewing|s?he views) the course team settings$')
def view_course_team_settings(_step, whom):
""" navigates to course team settings page """
world.click_course_settings()
link_css = 'li.nav-course-settings-team a'
world.css_click(link_css)
|
arborh/tensorflow
|
refs/heads/master
|
tensorflow/python/keras/layers/lstm_test.py
|
4
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LSTM layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
@keras_parameterized.run_all_keras_modes
class LSTMLayerTest(keras_parameterized.TestCase):
def test_return_sequences_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@tf_test_util.run_v2_only
def test_float64_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'return_sequences': True,
'dtype': 'float64'},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype='float64')
def test_static_shape_inference_LSTM(self):
# Github issue: 15165
timesteps = 3
embedding_dim = 4
units = 2
model = keras.models.Sequential()
inputs = keras.layers.Dense(embedding_dim,
input_shape=(timesteps, embedding_dim))
model.add(inputs)
layer = keras.layers.LSTM(units, return_sequences=True)
model.add(layer)
outputs = model.layers[-1].output
self.assertEqual(outputs.shape.as_list(), [None, timesteps, units])
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_recurrent_dropout_with_implementation_restriction(self):
layer = keras.layers.LSTM(2, recurrent_dropout=0.1, implementation=2)
# The implementation is force to 1 due to the limit of recurrent_dropout.
self.assertEqual(layer.implementation, 1)
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_LSTM(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'implementation': implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_constraints_LSTM(self):
embedding_dim = 4
layer_class = keras.layers.LSTM
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
@parameterized.parameters([True, False])
def test_with_masking_layer_LSTM(self, unroll):
layer_class = keras.layers.LSTM
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=unroll))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@parameterized.parameters([True, False])
def test_masking_with_stacking_LSTM(self, unroll):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
lstm_cells = [keras.layers.LSTMCell(10), keras.layers.LSTMCell(5)]
model.add(keras.layers.RNN(
lstm_cells, return_sequences=True, unroll=unroll))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = keras.layers.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_specify_initial_state_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = keras.layers.LSTM(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
self.assertTrue(
any(initial_state[0] is t
for t in layer._inbound_nodes[0].input_tensors))
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_specify_initial_state_non_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with non-Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.backend.random_normal_variable(
(num_samples, units), 0, 1)
for _ in range(num_states)]
layer = keras.layers.LSTM(units)
output = layer(inputs, initial_state=initial_state)
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.train_on_batch(inputs, targets)
def test_reset_states_with_values(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
layer = keras.layers.LSTM(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.zeros(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [keras.backend.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.ones(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
# Test with invalid data
with self.assertRaises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
def test_specify_state_with_masking(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input((timesteps, embedding_dim))
_ = keras.layers.Masking()(inputs)
initial_state = [keras.Input((units,)) for _ in range(num_states)]
output = keras.layers.LSTM(units)(inputs, initial_state=initial_state)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_return_state(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = keras.layers.LSTM(units, return_state=True, stateful=True)
outputs = layer(inputs)
state = outputs[1:]
assert len(state) == num_states
model = keras.models.Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, embedding_dim))
state = model.predict(inputs)
self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4)
def test_state_reuse(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = keras.layers.LSTM(units, return_state=True, return_sequences=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
output = keras.layers.LSTM(units)(output, initial_state=state)
model = keras.models.Model(inputs, output)
inputs = np.random.random((num_samples, timesteps, embedding_dim))
outputs = model.predict(inputs)
def test_initial_states_as_other_inputs(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
num_states = 2
layer_class = keras.layers.LSTM
# Test with Keras tensor
main_inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
inputs = [main_inputs] + initial_state
layer = layer_class(units)
output = layer(inputs)
self.assertTrue(
any(initial_state[0] is t
for t in layer._inbound_nodes[0].input_tensors))
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([main_inputs] + initial_state, targets)
def test_regularizers_LSTM(self):
embedding_dim = 4
layer_class = keras.layers.LSTM
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
def test_statefulness_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.LSTM
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
if __name__ == '__main__':
test.main()
|
SatoshiNXSimudrone/sl4a-damon-clone
|
refs/heads/master
|
python-build/python-libs/gdata/tests/gdata_tests/spreadsheet_test.py
|
92
|
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata
import gdata.spreadsheet
SPREADSHEETS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<id>http://spreadsheets.google.com/feeds/spreadsheets/private/full</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Available Spreadsheets</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1</openSearch:itemsPerPage>
<entry>
<id>http://spreadsheets.google.com/feeds/spreadsheets/private/full/key</id>
<updated>2006-11-17T18:24:18.231Z</updated>
<title type="text">Groceries R Us</title>
<content type="text">Groceries R Us</content>
<link rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/spreadsheets/private/full/key"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
</entry>
</feed>
"""
WORKSHEETS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<id>http://spreadsheets.google.com/feeds/worksheets/key/private/full</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Groceries R Us</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1</openSearch:itemsPerPage>
<entry>
<id>http://spreadsheets.google.com/feeds/worksheets/key/private/full/od6</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Sheet1</title>
<content type="text">Sheet1</content>
<link rel="http://schemas.google.com/spreadsheets/2006#listfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full"/>
<link rel="http://schemas.google.com/spreadsheets/2006#cellsfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full/od6"/>
<gs:rowCount>100</gs:rowCount>
<gs:colCount>20</gs:colCount>
</entry>
</feed>
"""
CELLS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<id>http://spreadsheets.google.com/feeds/cells/key/od6/private/full</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<title type="text">Sheet1</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1</openSearch:itemsPerPage>
<gs:rowCount>100</gs:rowCount>
<gs:colCount>20</gs:colCount>
<entry>
<id>http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C1</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#cell"/>
<title type="text">A1</title>
<content type="text">Name</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C1"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C1/bgvjf"/>
<gs:cell row="1" col="1" inputValue="Name">Name</gs:cell>
</entry>
<entry>
<id>http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C2</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#cell"/>
<title type="text">B1</title>
<content type="text">Hours</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C2"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C2/1pn567"/>
<gs:cell row="1" col="2" inputValue="Hours">Hours</gs:cell>
</entry>
</feed>
"""
LIST_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gsx="http://schemas.google.com/spreadsheets/2006/extended">
<id>http://spreadsheets.google.com/feeds/list/key/od6/private/full</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Sheet1</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full"/>
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>2</openSearch:itemsPerPage>
<entry>
<id>http://spreadsheets.google.com/feeds/list/key/od6/private/full/cokwr</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#list"/>
<title type="text">Bingley</title>
<content type="text">Hours: 10, Items: 2, IPM: 0.0033</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full/cokwr"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full/cokwr/2ehkc2oh7d"/>
<gsx:name>Bingley</gsx:name>
<gsx:hours>10</gsx:hours>
<gsx:items>2</gsx:items>
<gsx:ipm>0.0033</gsx:ipm>
</entry>
<entry>
<id>http://spreadsheets.google.com/feeds/list/key/od6/private/full/cyevm</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#list"/>
<title type="text">Charlotte</title>
<content type="text">Hours: 60, Items: 18000, IPM: 5</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full/cyevm"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/key/od6/private/full/cyevm/64rl27px3zyn"/>
<gsx:name>Charlotte</gsx:name>
<gsx:hours>60</gsx:hours>
<gsx:items>18000</gsx:items>
<gsx:ipm>5</gsx:ipm>
</entry>
</feed>
"""
class ColCountTest(unittest.TestCase):
def setUp(self):
self.col_count = gdata.spreadsheet.ColCount()
def testToAndFromString(self):
self.col_count.text = '20'
self.assert_(self.col_count.text == '20')
new_col_count = gdata.spreadsheet.ColCountFromString(self.col_count.ToString())
self.assert_(self.col_count.text == new_col_count.text)
class RowCountTest(unittest.TestCase):
def setUp(self):
self.row_count = gdata.spreadsheet.RowCount()
def testToAndFromString(self):
self.row_count.text = '100'
self.assert_(self.row_count.text == '100')
new_row_count = gdata.spreadsheet.RowCountFromString(self.row_count.ToString())
self.assert_(self.row_count.text == new_row_count.text)
class CellTest(unittest.TestCase):
def setUp(self):
self.cell = gdata.spreadsheet.Cell()
def testToAndFromString(self):
self.cell.text = 'test cell'
self.assert_(self.cell.text == 'test cell')
self.cell.row = '1'
self.assert_(self.cell.row == '1')
self.cell.col = '2'
self.assert_(self.cell.col == '2')
self.cell.inputValue = 'test input value'
self.assert_(self.cell.inputValue == 'test input value')
self.cell.numericValue = 'test numeric value'
self.assert_(self.cell.numericValue == 'test numeric value')
new_cell = gdata.spreadsheet.CellFromString(self.cell.ToString())
self.assert_(self.cell.text == new_cell.text)
self.assert_(self.cell.row == new_cell.row)
self.assert_(self.cell.col == new_cell.col)
self.assert_(self.cell.inputValue == new_cell.inputValue)
self.assert_(self.cell.numericValue == new_cell.numericValue)
class CustomTest(unittest.TestCase):
def setUp(self):
self.custom = gdata.spreadsheet.Custom()
def testToAndFromString(self):
self.custom.text = 'value'
self.custom.column = 'column_name'
self.assert_(self.custom.text == 'value')
self.assert_(self.custom.column == 'column_name')
new_custom = gdata.spreadsheet.CustomFromString(self.custom.ToString())
self.assert_(self.custom.text == new_custom.text)
self.assert_(self.custom.column == new_custom.column)
class SpreadsheetsWorksheetTest(unittest.TestCase):
def setUp(self):
self.worksheet = gdata.spreadsheet.SpreadsheetsWorksheet()
def testToAndFromString(self):
self.worksheet.row_count = gdata.spreadsheet.RowCount(text='100')
self.assert_(self.worksheet.row_count.text == '100')
self.worksheet.col_count = gdata.spreadsheet.ColCount(text='20')
self.assert_(self.worksheet.col_count.text == '20')
new_worksheet = gdata.spreadsheet.SpreadsheetsWorksheetFromString(
self.worksheet.ToString())
self.assert_(self.worksheet.row_count.text == new_worksheet.row_count.text)
self.assert_(self.worksheet.col_count.text == new_worksheet.col_count.text)
class SpreadsheetsCellTest(unittest.TestCase):
def setUp(self):
self.entry = gdata.spreadsheet.SpreadsheetsCell()
def testToAndFromString(self):
self.entry.cell = gdata.spreadsheet.Cell(text='my cell', row='1', col='2',
inputValue='my input value', numericValue='my numeric value')
self.assert_(self.entry.cell.text == 'my cell')
self.assert_(self.entry.cell.row == '1')
self.assert_(self.entry.cell.col == '2')
self.assert_(self.entry.cell.inputValue == 'my input value')
self.assert_(self.entry.cell.numericValue == 'my numeric value')
new_cell = gdata.spreadsheet.SpreadsheetsCellFromString(self.entry.ToString())
self.assert_(self.entry.cell.text == new_cell.cell.text)
self.assert_(self.entry.cell.row == new_cell.cell.row)
self.assert_(self.entry.cell.col == new_cell.cell.col)
self.assert_(self.entry.cell.inputValue == new_cell.cell.inputValue)
self.assert_(self.entry.cell.numericValue == new_cell.cell.numericValue)
class SpreadsheetsListTest(unittest.TestCase):
def setUp(self):
self.row = gdata.spreadsheet.SpreadsheetsList()
def testToAndFromString(self):
self.row.custom['column_1'] = gdata.spreadsheet.Custom(column='column_1',
text='my first column')
self.row.custom['column_2'] = gdata.spreadsheet.Custom(column='column_2',
text='my second column')
self.assert_(self.row.custom['column_1'].column == 'column_1')
self.assert_(self.row.custom['column_1'].text == 'my first column')
self.assert_(self.row.custom['column_2'].column == 'column_2')
self.assert_(self.row.custom['column_2'].text == 'my second column')
new_row = gdata.spreadsheet.SpreadsheetsListFromString(self.row.ToString())
self.assert_(self.row.custom['column_1'].column == new_row.custom['column_1'].column)
self.assert_(self.row.custom['column_1'].text == new_row.custom['column_1'].text)
self.assert_(self.row.custom['column_2'].column == new_row.custom['column_2'].column)
self.assert_(self.row.custom['column_2'].text == new_row.custom['column_2'].text)
class SpreadsheetsSpreadsheetsFeedTest(unittest.TestCase):
def setUp(self):
#self.item_feed = gdata.spreadsheet.SpreadsheetSpreadsheetsFeed()
self.feed = gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString(
SPREADSHEETS_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 1)
for an_entry in self.feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsSpreadsheet))
new_feed = gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString(
str(self.feed))
for an_entry in new_feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsSpreadsheet))
class SpreadsheetsWorksheetsFeedTest(unittest.TestCase):
def setUp(self):
#self.item_feed = gdata.spreadsheet.SpreadsheetWorksheetsFeed()
self.feed = gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString(
WORKSHEETS_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 1)
for an_entry in self.feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsWorksheet))
new_feed = gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString(
str(self.feed))
for an_entry in new_feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsWorksheet))
class SpreadsheetsCellsFeedTest(unittest.TestCase):
def setUp(self):
#self.item_feed = gdata.spreadsheet.SpreadsheetCellsFeed()
self.feed = gdata.spreadsheet.SpreadsheetsCellsFeedFromString(
CELLS_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 2)
for an_entry in self.feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsCell))
new_feed = gdata.spreadsheet.SpreadsheetsCellsFeedFromString(str(self.feed))
self.assert_(isinstance(new_feed.row_count,
gdata.spreadsheet.RowCount))
self.assert_(new_feed.row_count.text == '100')
self.assert_(isinstance(new_feed.col_count,
gdata.spreadsheet.ColCount))
self.assert_(new_feed.col_count.text == '20')
for an_entry in new_feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsCell))
class SpreadsheetsListFeedTest(unittest.TestCase):
def setUp(self):
#self.item_feed = gdata.spreadsheet.SpreadsheetListFeed()
self.feed = gdata.spreadsheet.SpreadsheetsListFeedFromString(
LIST_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 2)
for an_entry in self.feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsList))
new_feed = gdata.spreadsheet.SpreadsheetsListFeedFromString(str(self.feed))
for an_entry in new_feed.entry:
self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsList))
if __name__ == '__main__':
unittest.main()
|
joemathai/problems
|
refs/heads/master
|
Project Euler/81.py
|
1
|
#project euler 81
size=80
data="4445,2697,5115,718,2209,2212,654,4348,3079,6821,7668,3276,8874,4190,3785,2752,9473,7817,9137,496,7338,3434,7152,4355,4552,7917,7827,2460,2350,691,3514,5880,3145,7633,7199,3783,5066,7487,3285,1084,8985,760,872,8609,8051,1134,9536,5750,9716,9371,7619,5617,275,9721,2997,2698,1887,8825,6372,3014,2113,7122,7050,6775,5948,2758,1219,3539,348,7989,2735,9862,1263,8089,6401,9462,3168,2758,3748,5870,1096,20,1318,7586,5167,2642,1443,5741,7621,7030,5526,4244,2348,4641,9827,2448,6918,5883,3737,300,7116,6531,567,5997,3971,6623,820,6148,3287,1874,7981,8424,7672,7575,6797,6717,1078,5008,4051,8795,5820,346,1851,6463,2117,6058,3407,8211,117,4822,1317,4377,4434,5925,8341,4800,1175,4173,690,8978,7470,1295,3799,8724,3509,9849,618,3320,7068,9633,2384,7175,544,6583,1908,9983,481,4187,9353,9377,9607,7385,521,6084,1364,8983,7623,1585,6935,8551,2574,8267,4781,3834,2764,2084,2669,4656,9343,7709,2203,9328,8004,6192,5856,3555,2260,5118,6504,1839,9227,1259,9451,1388,7909,5733,6968,8519,9973,1663,5315,7571,3035,4325,4283,2304,6438,3815,9213,9806,9536,196,5542,6907,2475,1159,5820,9075,9470,2179,9248,1828,4592,9167,3713,4640,47,3637,309,7344,6955,346,378,9044,8635,7466,5036,9515,6385,9230,7206,3114,7760,1094,6150,5182,7358,7387,4497,955,101,1478,7777,6966,7010,8417,6453,4955,3496,107,449,8271,131,2948,6185,784,5937,8001,6104,8282,4165,3642,710,2390,575,715,3089,6964,4217,192,5949,7006,715,3328,1152,66,8044,4319,1735,146,4818,5456,6451,4113,1063,4781,6799,602,1504,6245,6550,1417,1343,2363,3785,5448,4545,9371,5420,5068,4613,4882,4241,5043,7873,8042,8434,3939,9256,2187,3620,8024,577,9997,7377,7682,1314,1158,6282,6310,1896,2509,5436,1732,9480,706,496,101,6232,7375,2207,2306,110,6772,3433,2878,8140,5933,8688,1399,2210,7332,6172,6403,7333,4044,2291,1790,2446,7390,8698,5723,3678,7104,1825,2040,140,3982,4905,4160,2200,5041,2512,1488,2268,1175,7588,8321,8078,7312,977,5257,8465,5068,3453,3096,1651,7906,253,9250,6021,8791,8109,6651,3412,345,4778,5152,4883,7505,1074,5438,9008,2679,5397,5429,2652,3403,770,9188,4248,2493,4361,8327,9587,707,9525,5913,93,1899,328,2876,3604,673,8576,6908,7659,2544,3359,3883,5273,6587,3065,1749,3223,604,9925,6941,2823,8767,7039,3290,3214,1787,7904,3421,7137,9560,8451,2669,9219,6332,1576,5477,6755,8348,4164,4307,2984,4012,6629,1044,2874,6541,4942,903,1404,9125,5160,8836,4345,2581,460,8438,1538,5507,668,3352,2678,6942,4295,1176,5596,1521,3061,9868,7037,7129,8933,6659,5947,5063,3653,9447,9245,2679,767,714,116,8558,163,3927,8779,158,5093,2447,5782,3967,1716,931,7772,8164,1117,9244,5783,7776,3846,8862,6014,2330,6947,1777,3112,6008,3491,1906,5952,314,4602,8994,5919,9214,3995,5026,7688,6809,5003,3128,2509,7477,110,8971,3982,8539,2980,4689,6343,5411,2992,5270,5247,9260,2269,7474,1042,7162,5206,1232,4556,4757,510,3556,5377,1406,5721,4946,2635,7847,4251,8293,8281,6351,4912,287,2870,3380,3948,5322,3840,4738,9563,1906,6298,3234,8959,1562,6297,8835,7861,239,6618,1322,2553,2213,5053,5446,4402,6500,5182,8585,6900,5756,9661,903,5186,7687,5998,7997,8081,8955,4835,6069,2621,1581,732,9564,1082,1853,5442,1342,520,1737,3703,5321,4793,2776,1508,1647,9101,2499,6891,4336,7012,3329,3212,1442,9993,3988,4930,7706,9444,3401,5891,9716,1228,7107,109,3563,2700,6161,5039,4992,2242,8541,7372,2067,1294,3058,1306,320,8881,5756,9326,411,8650,8824,5495,8282,8397,2000,1228,7817,2099,6473,3571,5994,4447,1299,5991,543,7874,2297,1651,101,2093,3463,9189,6872,6118,872,1008,1779,2805,9084,4048,2123,5877,55,3075,1737,9459,4535,6453,3644,108,5982,4437,5213,1340,6967,9943,5815,669,8074,1838,6979,9132,9315,715,5048,3327,4030,7177,6336,9933,5296,2621,4785,2755,4832,2512,2118,2244,4407,2170,499,7532,9742,5051,7687,970,6924,3527,4694,5145,1306,2165,5940,2425,8910,3513,1909,6983,346,6377,4304,9330,7203,6605,3709,3346,970,369,9737,5811,4427,9939,3693,8436,5566,1977,3728,2399,3985,8303,2492,5366,9802,9193,7296,1033,5060,9144,2766,1151,7629,5169,5995,58,7619,7565,4208,1713,6279,3209,4908,9224,7409,1325,8540,6882,1265,1775,3648,4690,959,5837,4520,5394,1378,9485,1360,4018,578,9174,2932,9890,3696,116,1723,1178,9355,7063,1594,1918,8574,7594,7942,1547,6166,7888,354,6932,4651,1010,7759,6905,661,7689,6092,9292,3845,9605,8443,443,8275,5163,7720,7265,6356,7779,1798,1754,5225,6661,1180,8024,5666,88,9153,1840,3508,1193,4445,2648,3538,6243,6375,8107,5902,5423,2520,1122,5015,6113,8859,9370,966,8673,2442,7338,3423,4723,6533,848,8041,7921,8277,4094,5368,7252,8852,9166,2250,2801,6125,8093,5738,4038,9808,7359,9494,601,9116,4946,2702,5573,2921,9862,1462,1269,2410,4171,2709,7508,6241,7522,615,2407,8200,4189,5492,5649,7353,2590,5203,4274,710,7329,9063,956,8371,3722,4253,4785,1194,4828,4717,4548,940,983,2575,4511,2938,1827,2027,2700,1236,841,5760,1680,6260,2373,3851,1841,4968,1172,5179,7175,3509,4420,1327,3560,2376,6260,2988,9537,4064,4829,8872,9598,3228,1792,7118,9962,9336,4368,9189,6857,1829,9863,6287,7303,7769,2707,8257,2391,2009,3975,4993,3068,9835,3427,341,8412,2134,4034,8511,6421,3041,9012,2983,7289,100,1355,7904,9186,6920,5856,2008,6545,8331,3655,5011,839,8041,9255,6524,3862,8788,62,7455,3513,5003,8413,3918,2076,7960,6108,3638,6999,3436,1441,4858,4181,1866,8731,7745,3744,1000,356,8296,8325,1058,1277,4743,3850,2388,6079,6462,2815,5620,8495,5378,75,4324,3441,9870,1113,165,1544,1179,2834,562,6176,2313,6836,8839,2986,9454,5199,6888,1927,5866,8760,320,1792,8296,7898,6121,7241,5886,5814,2815,8336,1576,4314,3109,2572,6011,2086,9061,9403,3947,5487,9731,7281,3159,1819,1334,3181,5844,5114,9898,4634,2531,4412,6430,4262,8482,4546,4555,6804,2607,9421,686,8649,8860,7794,6672,9870,152,1558,4963,8750,4754,6521,6256,8818,5208,5691,9659,8377,9725,5050,5343,2539,6101,1844,9700,7750,8114,5357,3001,8830,4438,199,9545,8496,43,2078,327,9397,106,6090,8181,8646,6414,7499,5450,4850,6273,5014,4131,7639,3913,6571,8534,9703,4391,7618,445,1320,5,1894,6771,7383,9191,4708,9706,6939,7937,8726,9382,5216,3685,2247,9029,8154,1738,9984,2626,9438,4167,6351,5060,29,1218,1239,4785,192,5213,8297,8974,4032,6966,5717,1179,6523,4679,9513,1481,3041,5355,9303,9154,1389,8702,6589,7818,6336,3539,5538,3094,6646,6702,6266,2759,4608,4452,617,9406,8064,6379,444,5602,4950,1810,8391,1536,316,8714,1178,5182,5863,5110,5372,4954,1978,2971,5680,4863,2255,4630,5723,2168,538,1692,1319,7540,440,6430,6266,7712,7385,5702,620,641,3136,7350,1478,3155,2820,9109,6261,1122,4470,14,8493,2095,1046,4301,6082,474,4974,7822,2102,5161,5172,6946,8074,9716,6586,9962,9749,5015,2217,995,5388,4402,7652,6399,6539,1349,8101,3677,1328,9612,7922,2879,231,5887,2655,508,4357,4964,3554,5930,6236,7384,4614,280,3093,9600,2110,7863,2631,6626,6620,68,1311,7198,7561,1768,5139,1431,221,230,2940,968,5283,6517,2146,1646,869,9402,7068,8645,7058,1765,9690,4152,2926,9504,2939,7504,6074,2944,6470,7859,4659,736,4951,9344,1927,6271,8837,8711,3241,6579,7660,5499,5616,3743,5801,4682,9748,8796,779,1833,4549,8138,4026,775,4170,2432,4174,3741,7540,8017,2833,4027,396,811,2871,1150,9809,2719,9199,8504,1224,540,2051,3519,7982,7367,2761,308,3358,6505,2050,4836,5090,7864,805,2566,2409,6876,3361,8622,5572,5895,3280,441,7893,8105,1634,2929,274,3926,7786,6123,8233,9921,2674,5340,1445,203,4585,3837,5759,338,7444,7968,7742,3755,1591,4839,1705,650,7061,2461,9230,9391,9373,2413,1213,431,7801,4994,2380,2703,6161,6878,8331,2538,6093,1275,5065,5062,2839,582,1014,8109,3525,1544,1569,8622,7944,2905,6120,1564,1839,5570,7579,1318,2677,5257,4418,5601,7935,7656,5192,1864,5886,6083,5580,6202,8869,1636,7907,4759,9082,5854,3185,7631,6854,5872,5632,5280,1431,2077,9717,7431,4256,8261,9680,4487,4752,4286,1571,1428,8599,1230,7772,4221,8523,9049,4042,8726,7567,6736,9033,2104,4879,4967,6334,6716,3994,1269,8995,6539,3610,7667,6560,6065,874,848,4597,1711,7161,4811,6734,5723,6356,6026,9183,2586,5636,1092,7779,7923,8747,6887,7505,9909,1792,3233,4526,3176,1508,8043,720,5212,6046,4988,709,5277,8256,3642,1391,5803,1468,2145,3970,6301,7767,2359,8487,9771,8785,7520,856,1605,8972,2402,2386,991,1383,5963,1822,4824,5957,6511,9868,4113,301,9353,6228,2881,2966,6956,9124,9574,9233,1601,7340,973,9396,540,4747,8590,9535,3650,7333,7583,4806,3593,2738,8157,5215,8472,2284,9473,3906,6982,5505,6053,7936,6074,7179,6688,1564,1103,6860,5839,2022,8490,910,7551,7805,881,7024,1855,9448,4790,1274,3672,2810,774,7623,4223,4850,6071,9975,4935,1915,9771,6690,3846,517,463,7624,4511,614,6394,3661,7409,1395,8127,8738,3850,9555,3695,4383,2378,87,6256,6740,7682,9546,4255,6105,2000,1851,4073,8957,9022,6547,5189,2487,303,9602,7833,1628,4163,6678,3144,8589,7096,8913,5823,4890,7679,1212,9294,5884,2972,3012,3359,7794,7428,1579,4350,7246,4301,7779,7790,3294,9547,4367,3549,1958,8237,6758,3497,3250,3456,6318,1663,708,7714,6143,6890,3428,6853,9334,7992,591,6449,9786,1412,8500,722,5468,1371,108,3939,4199,2535,7047,4323,1934,5163,4166,461,3544,2767,6554,203,6098,2265,9078,2075,4644,6641,8412,9183,487,101,7566,5622,1975,5726,2920,5374,7779,5631,3753,3725,2672,3621,4280,1162,5812,345,8173,9785,1525,955,5603,2215,2580,5261,2765,2990,5979,389,3907,2484,1232,5933,5871,3304,1138,1616,5114,9199,5072,7442,7245,6472,4760,6359,9053,7876,2564,9404,3043,9026,2261,3374,4460,7306,2326,966,828,3274,1712,3446,3975,4565,8131,5800,4570,2306,8838,4392,9147,11,3911,7118,9645,4994,2028,6062,5431,2279,8752,2658,7836,994,7316,5336,7185,3289,1898,9689,2331,5737,3403,1124,2679,3241,7748,16,2724,5441,6640,9368,9081,5618,858,4969,17,2103,6035,8043,7475,2181,939,415,1617,8500,8253,2155,7843,7974,7859,1746,6336,3193,2617,8736,4079,6324,6645,8891,9396,5522,6103,1857,8979,3835,2475,1310,7422,610,8345,7615,9248,5397,5686,2988,3446,4359,6634,9141,497,9176,6773,7448,1907,8454,916,1596,2241,1626,1384,2741,3649,5362,8791,7170,2903,2475,5325,6451,924,3328,522,90,4813,9737,9557,691,2388,1383,4021,1609,9206,4707,5200,7107,8104,4333,9860,5013,1224,6959,8527,1877,4545,7772,6268,621,4915,9349,5970,706,9583,3071,4127,780,8231,3017,9114,3836,7503,2383,1977,4870,8035,2379,9704,1037,3992,3642,1016,4303,5093,138,4639,6609,1146,5565,95,7521,9077,2272,974,4388,2465,2650,722,4998,3567,3047,921,2736,7855,173,2065,4238,1048,5,6847,9548,8632,9194,5942,4777,7910,8971,6279,7253,2516,1555,1833,3184,9453,9053,6897,7808,8629,4877,1871,8055,4881,7639,1537,7701,2508,7564,5845,5023,2304,5396,3193,2955,1088,3801,6203,1748,3737,1276,13,4120,7715,8552,3047,2921,106,7508,304,1280,7140,2567,9135,5266,6237,4607,7527,9047,522,7371,4883,2540,5867,6366,5301,1570,421,276,3361,527,6637,4861,2401,7522,5808,9371,5298,2045,5096,5447,7755,5115,7060,8529,4078,1943,1697,1764,5453,7085,960,2405,739,2100,5800,728,9737,5704,5693,1431,8979,6428,673,7540,6,7773,5857,6823,150,5869,8486,684,5816,9626,7451,5579,8260,3397,5322,6920,1879,2127,2884,5478,4977,9016,6165,6292,3062,5671,5968,78,4619,4763,9905,7127,9390,5185,6923,3721,9164,9705,4341,1031,1046,5127,7376,6528,3248,4941,1178,7889,3364,4486,5358,9402,9158,8600,1025,874,1839,1783,309,9030,1843,845,8398,1433,7118,70,8071,2877,3904,8866,6722,4299,10,1929,5897,4188,600,1889,3325,2485,6473,4474,7444,6992,4846,6166,4441,2283,2629,4352,7775,1101,2214,9985,215,8270,9750,2740,8361,7103,5930,8664,9690,8302,9267,344,2077,1372,1880,9550,5825,8517,7769,2405,8204,1060,3603,7025,478,8334,1997,3692,7433,9101,7294,7498,9415,5452,3850,3508,6857,9213,6807,4412,7310,854,5384,686,4978,892,8651,3241,2743,3801,3813,8588,6701,4416,6990,6490,3197,6838,6503,114,8343,5844,8646,8694,65,791,5979,2687,2621,2019,8097,1423,3644,9764,4921,3266,3662,5561,2476,8271,8138,6147,1168,3340,1998,9874,6572,9873,6659,5609,2711,3931,9567,4143,7833,8887,6223,2099,2700,589,4716,8333,1362,5007,2753,2848,4441,8397,7192,8191,4916,9955,6076,3370,6396,6971,3156,248,3911,2488,4930,2458,7183,5455,170,6809,6417,3390,1956,7188,577,7526,2203,968,8164,479,8699,7915,507,6393,4632,1597,7534,3604,618,3280,6061,9793,9238,8347,568,9645,2070,5198,6482,5000,9212,6655,5961,7513,1323,3872,6170,3812,4146,2736,67,3151,5548,2781,9679,7564,5043,8587,1893,4531,5826,3690,6724,2121,9308,6986,8106,6659,2142,1642,7170,2877,5757,6494,8026,6571,8387,9961,6043,9758,9607,6450,8631,8334,7359,5256,8523,2225,7487,1977,9555,8048,5763,2414,4948,4265,2427,8978,8088,8841,9208,9601,5810,9398,8866,9138,4176,5875,7212,3272,6759,5678,7649,4922,5422,1343,8197,3154,3600,687,1028,4579,2084,9467,4492,7262,7296,6538,7657,7134,2077,1505,7332,6890,8964,4879,7603,7400,5973,739,1861,1613,4879,1884,7334,966,2000,7489,2123,4287,1472,3263,4726,9203,1040,4103,6075,6049,330,9253,4062,4268,1635,9960,577,1320,3195,9628,1030,4092,4979,6474,6393,2799,6967,8687,7724,7392,9927,2085,3200,6466,8702,265,7646,8665,7986,7266,4574,6587,612,2724,704,3191,8323,9523,3002,704,5064,3960,8209,2027,2758,8393,4875,4641,9584,6401,7883,7014,768,443,5490,7506,1852,2005,8850,5776,4487,4269,4052,6687,4705,7260,6645,6715,3706,5504,8672,2853,1136,8187,8203,4016,871,1809,1366,4952,9294,5339,6872,2645,6083,7874,3056,5218,7485,8796,7401,3348,2103,426,8572,4163,9171,3176,948,7654,9344,3217,1650,5580,7971,2622,76,2874,880,2034,9929,1546,2659,5811,3754,7096,7436,9694,9960,7415,2164,953,2360,4194,2397,1047,2196,6827,575,784,2675,8821,6802,7972,5996,6699,2134,7577,2887,1412,4349,4380,4629,2234,6240,8132,7592,3181,6389,1214,266,1910,2451,8784,2790,1127,6932,1447,8986,2492,5476,397,889,3027,7641,5083,5776,4022,185,3364,5701,2442,2840,4160,9525,4828,6602,2614,7447,3711,4505,7745,8034,6514,4907,2605,7753,6958,7270,6936,3006,8968,439,2326,4652,3085,3425,9863,5049,5361,8688,297,7580,8777,7916,6687,8683,7141,306,9569,2384,1500,3346,4601,7329,9040,6097,2727,6314,4501,4974,2829,8316,4072,2025,6884,3027,1808,5714,7624,7880,8528,4205,8686,7587,3230,1139,7273,6163,6986,3914,9309,1464,9359,4474,7095,2212,7302,2583,9462,7532,6567,1606,4436,8981,5612,6796,4385,5076,2007,6072,3678,8331,1338,3299,8845,4783,8613,4071,1232,6028,2176,3990,2148,3748,103,9453,538,6745,9110,926,3125,473,5970,8728,7072,9062,1404,1317,5139,9862,6496,6062,3338,464,1600,2532,1088,8232,7739,8274,3873,2341,523,7096,8397,8301,6541,9844,244,4993,2280,7689,4025,4196,5522,7904,6048,2623,9258,2149,9461,6448,8087,7245,1917,8340,7127,8466,5725,6996,3421,5313,512,9164,9837,9794,8369,4185,1488,7210,1524,1016,4620,9435,2478,7765,8035,697,6677,3724,6988,5853,7662,3895,9593,1185,4727,6025,5734,7665,3070,138,8469,6748,6459,561,7935,8646,2378,462,7755,3115,9690,8877,3946,2728,8793,244,6323,8666,4271,6430,2406,8994,56,1267,3826,9443,7079,7579,5232,6691,3435,6718,5698,4144,7028,592,2627,217,734,6194,8156,9118,58,2640,8069,4127,3285,694,3197,3377,4143,4802,3324,8134,6953,7625,3598,3584,4289,7065,3434,2106,7132,5802,7920,9060,7531,3321,1725,1067,3751,444,5503,6785,7937,6365,4803,198,6266,8177,1470,6390,1606,2904,7555,9834,8667,2033,1723,5167,1666,8546,8152,473,4475,6451,7947,3062,3281,2810,3042,7759,1741,2275,2609,7676,8640,4117,1958,7500,8048,1757,3954,9270,1971,4796,2912,660,5511,3553,1012,5757,4525,6084,7198,8352,5775,7726,8591,7710,9589,3122,4392,6856,5016,749,2285,3356,7482,9956,7348,2599,8944,495,3462,3578,551,4543,7207,7169,7796,1247,4278,6916,8176,3742,8385,2310,1345,8692,2667,4568,1770,8319,3585,4920,3890,4928,7343,5385,9772,7947,8786,2056,9266,3454,2807,877,2660,6206,8252,5928,5837,4177,4333,207,7934,5581,9526,8906,1498,8411,2984,5198,5134,2464,8435,8514,8674,3876,599,5327,826,2152,4084,2433,9327,9697,4800,2728,3608,3849,3861,3498,9943,1407,3991,7191,9110,5666,8434,4704,6545,5944,2357,1163,4995,9619,6754,4200,9682,6654,4862,4744,5953,6632,1054,293,9439,8286,2255,696,8709,1533,1844,6441,430,1999,6063,9431,7018,8057,2920,6266,6799,356,3597,4024,6665,3847,6356,8541,7225,2325,2946,5199,469,5450,7508,2197,9915,8284,7983,6341,3276,3321,16,1321,7608,5015,3362,8491,6968,6818,797,156,2575,706,9516,5344,5457,9210,5051,8099,1617,9951,7663,8253,9683,2670,1261,4710,1068,8753,4799,1228,2621,3275,6188,4699,1791,9518,8701,5932,4275,6011,9877,2933,4182,6059,2930,6687,6682,9771,654,9437,3169,8596,1827,5471,8909,2352,123,4394,3208,8756,5513,6917,2056,5458,8173,3138,3290,4570,4892,3317,4251,9699,7973,1163,1935,5477,6648,9614,5655,9592,975,9118,2194,7322,8248,8413,3462,8560,1907,7810,6650,7355,2939,4973,6894,3933,3784,3200,2419,9234,4747,2208,2207,1945,2899,1407,6145,8023,3484,5688,7686,2737,3828,3704,9004,5190,9740,8643,8650,5358,4426,1522,1707,3613,9887,6956,2447,2762,833,1449,9489,2573,1080,4167,3456,6809,2466,227,7125,2759,6250,6472,8089,3266,7025,9756,3914,1265,9116,7723,9788,6805,5493,2092,8688,6592,9173,4431,4028,6007,7131,4446,4815,3648,6701,759,3312,8355,4485,4187,5188,8746,7759,3528,2177,5243,8379,3838,7233,4607,9187,7216,2190,6967,2920,6082,7910,5354,3609,8958,6949,7731,494,8753,8707,1523,4426,3543,7085,647,6771,9847,646,5049,824,8417,5260,2730,5702,2513,9275,4279,2767,8684,1165,9903,4518,55,9682,8963,6005,2102,6523,1998,8731,936,1479,5259,7064,4085,91,7745,7136,3773,3810,730,8255,2705,2653,9790,6807,2342,355,9344,2668,3690,2028,9679,8102,574,4318,6481,9175,5423,8062,2867,9657,7553,3442,3920,7430,3945,7639,3714,3392,2525,4995,4850,2867,7951,9667,486,9506,9888,781,8866,1702,3795,90,356,1483,4200,2131,6969,5931,486,6880,4404,1084,5169,4910,6567,8335,4686,5043,2614,3352,2667,4513,6472,7471,5720,1616,8878,1613,1716,868,1906,2681,564,665,5995,2474,7496,3432,9491,9087,8850,8287,669,823,347,6194,2264,2592,7871,7616,8508,4827,760,2676,4660,4881,7572,3811,9032,939,4384,929,7525,8419,5556,9063,662,8887,7026,8534,3111,1454,2082,7598,5726,6687,9647,7608,73,3014,5063,670,5461,5631,3367,9796,8475,7908,5073,1565,5008,5295,4457,1274,4788,1728,338,600,8415,8535,9351,7750,6887,5845,1741,125,3637,6489,9634,9464,9055,2413,7824,9517,7532,3577,7050,6186,6980,9365,9782,191,870,2497,8498,2218,2757,5420,6468,586,3320,9230,1034,1393,9886,5072,9391,1178,8464,8042,6869,2075,8275,3601,7715,9470,8786,6475,8373,2159,9237,2066,3264,5000,679,355,3069,4073,494,2308,5512,4334,9438,8786,8637,9774,1169,1949,6594,6072,4270,9158,7916,5752,6794,9391,6301,5842,3285,2141,3898,8027,4310,8821,7079,1307,8497,6681,4732,7151,7060,5204,9030,7157,833,5014,8723,3207,9796,9286,4913,119,5118,7650,9335,809,3675,2597,5144,3945,5090,8384,187,4102,1260,2445,2792,4422,8389,9290,50,1765,1521,6921,8586,4368,1565,5727,7855,2003,4834,9897,5911,8630,5070,1330,7692,7557,7980,6028,5805,9090,8265,3019,3802,698,9149,5748,1965,9658,4417,5994,5584,8226,2937,272,5743,1278,5698,8736,2595,6475,5342,6596,1149,6920,8188,8009,9546,6310,8772,2500,9846,6592,6872,3857,1307,8125,7042,1544,6159,2330,643,4604,7899,6848,371,8067,2062,3200,7295,1857,9505,6936,384,2193,2190,301,8535,5503,1462,7380,5114,4824,8833,1763,4974,8711,9262,6698,3999,2645,6937,7747,1128,2933,3556,7943,2885,3122,9105,5447,418,2899,5148,3699,9021,9501,597,4084,175,1621,1,1079,6067,5812,4326,9914,6633,5394,4233,6728,9084,1864,5863,1225,9935,8793,9117,1825,9542,8246,8437,3331,9128,9675,6086,7075,319,1334,7932,3583,7167,4178,1726,7720,695,8277,7887,6359,5912,1719,2780,8529,1359,2013,4498,8072,1129,9998,1147,8804,9405,6255,1619,2165,7491,1,8882,7378,3337,503,5758,4109,3577,985,3200,7615,8058,5032,1080,6410,6873,5496,1466,2412,9885,5904,4406,3605,8770,4361,6205,9193,1537,9959,214,7260,9566,1685,100,4920,7138,9819,5637,976,3466,9854,985,1078,7222,8888,5466,5379,3578,4540,6853,8690,3728,6351,7147,3134,6921,9692,857,3307,4998,2172,5783,3931,9417,2541,6299,13,787,2099,9131,9494,896,8600,1643,8419,7248,2660,2609,8579,91,6663,5506,7675,1947,6165,4286,1972,9645,3805,1663,1456,8853,5705,9889,7489,1107,383,4044,2969,3343,152,7805,4980,9929,5033,1737,9953,7197,9158,4071,1324,473,9676,3984,9680,3606,8160,7384,5432,1005,4512,5186,3953,2164,3372,4097,3247,8697,3022,9896,4101,3871,6791,3219,2742,4630,6967,7829,5991,6134,1197,1414,8923,8787,1394,8852,5019,7768,5147,8004,8825,5062,9625,7988,1110,3992,7984,9966,6516,6251,8270,421,3723,1432,4830,6935,8095,9059,2214,6483,6846,3120,1587,6201,6691,9096,9627,6671,4002,3495,9939,7708,7465,5879,6959,6634,3241,3401,2355,9061,2611,7830,3941,2177,2146,5089,7079,519,6351,7280,8586,4261,2831,7217,3141,9994,9940,5462,2189,4005,6942,9848,5350,8060,6665,7519,4324,7684,657,9453,9296,2944,6843,7499,7847,1728,9681,3906,6353,5529,2822,3355,3897,7724,4257,7489,8672,4356,3983,1948,6892,7415,4153,5893,4190,621,1736,4045,9532,7701,3671,1211,1622,3176,4524,9317,7800,5638,6644,6943,5463,3531,2821,1347,5958,3436,1438,2999,994,850,4131,2616,1549,3465,5946,690,9273,6954,7991,9517,399,3249,2596,7736,2142,1322,968,7350,1614,468,3346,3265,7222,6086,1661,5317,2582,7959,4685,2807,2917,1037,5698,1529,3972,8716,2634,3301,3412,8621,743,8001,4734,888,7744,8092,3671,8941,1487,5658,7099,2781,99,1932,4443,4756,4652,9328,1581,7855,4312,5976,7255,6480,3996,2748,1973,9731,4530,2790,9417,7186,5303,3557,351,7182,9428,1342,9020,7599,1392,8304,2070,9138,7215,2008,9937,1106,7110,7444,769,9688,632,1571,6820,8743,4338,337,3366,3073,1946,8219,104,4210,6986,249,5061,8693,7960,6546,1004,8857,5997,9352,4338,6105,5008,2556,6518,6694,4345,3727,7956,20,3954,8652,4424,9387,2035,8358,5962,5304,5194,8650,8282,1256,1103,2138,6679,1985,3653,2770,2433,4278,615,2863,1715,242,3790,2636,6998,3088,1671,2239,957,5411,4595,6282,2881,9974,2401,875,7574,2987,4587,3147,6766,9885,2965,3287,3016,3619,6818,9073,6120,5423,557,2900,2015,8111,3873,1314,4189,1846,4399,7041,7583,2427,2864,3525,5002,2069,748,1948,6015,2684,438,770,8367,1663,7887,7759,1885,157,7770,4520,4878,3857,1137,3525,3050,6276,5569,7649,904,4533,7843,2199,5648,7628,9075,9441,3600,7231,2388,5640,9096,958,3058,584,5899,8150,1181,9616,1098,8162,6819,8171,1519,1140,7665,8801,2632,1299,9192,707,9955,2710,7314,1772,2963,7578,3541,3095,1488,7026,2634,6015,4633,4370,2762,1650,2174,909,8158,2922,8467,4198,4280,9092,8856,8835,5457,2790,8574,9742,5054,9547,4156,7940,8126,9824,7340,8840,6574,3547,1477,3014,6798,7134,435,9484,9859,3031,4,1502,4133,1738,1807,4825,463,6343,9701,8506,9822,9555,8688,8168,3467,3234,6318,1787,5591,419,6593,7974,8486,9861,6381,6758,194,3061,4315,2863,4665,3789,2201,1492,4416,126,8927,6608,5682,8986,6867,1715,6076,3159,788,3140,4744,830,9253,5812,5021,7616,8534,1546,9590,1101,9012,9821,8132,7857,4086,1069,7491,2988,1579,2442,4321,2149,7642,6108,250,6086,3167,24,9528,7663,2685,1220,9196,1397,5776,1577,1730,5481,977,6115,199,6326,2183,3767,5928,5586,7561,663,8649,9688,949,5913,9160,1870,5764,9887,4477,6703,1413,4995,5494,7131,2192,8969,7138,3997,8697,646,1028,8074,1731,8245,624,4601,8706,155,8891,309,2552,8208,8452,2954,3124,3469,4246,3352,1105,4509,8677,9901,4416,8191,9283,5625,7120,2952,8881,7693,830,4580,8228,9459,8611,4499,1179,4988,1394,550,2336,6089,6872,269,7213,1848,917,6672,4890,656,1478,6536,3165,4743,4990,1176,6211,7207,5284,9730,4738,1549,4986,4942,8645,3698,9429,1439,2175,6549,3058,6513,1574,6988,8333,3406,5245,5431,7140,7085,6407,7845,4694,2530,8249,290,5948,5509,1588,5940,4495,5866,5021,4626,3979,3296,7589,4854,1998,5627,3926,8346,6512,9608,1918,7070,4747,4182,2858,2766,4606,6269,4107,8982,8568,9053,4244,5604,102,2756,727,5887,2566,7922,44,5986,621,1202,374,6988,4130,3627,6744,9443,4568,1398,8679,397,3928,9159,367,2917,6127,5788,3304,8129,911,2669,1463,9749,264,4478,8940,1109,7309,2462,117,4692,7724,225,2312,4164,3637,2000,941,8903,39,3443,7172,1031,3687,4901,8082,4945,4515,7204,9310,9349,9535,9940,218,1788,9245,2237,1541,5670,6538,6047,5553,9807,8101,1925,8714,445,8332,7309,6830,5786,5736,7306,2710,3034,1838,7969,6318,7912,2584,2080,7437,6705,2254,7428,820,782,9861,7596,3842,3631,8063,5240,6666,394,4565,7865,4895,9890,6028,6117,4724,9156,4473,4552,602,470,6191,4927,5387,884,3146,1978,3000,4258,6880,1696,3582,5793,4923,2119,1155,9056,9698,6603,3768,5514,9927,9609,6166,6566,4536,4985,4934,8076,9062,6741,6163,7399,4562,2337,5600,2919,9012,8459,1308,6072,1225,9306,8818,5886,7243,7365,8792,6007,9256,6699,7171,4230,7002,8720,7839,4533,1671,478,7774,1607,2317,5437,4705,7886,4760,6760,7271,3081,2997,3088,7675,6208,3101,6821,6840,122,9633,4900,2067,8546,4549,2091,7188,5605,8599,6758,5229,7854,5243,9155,3556,8812,7047,2202,1541,5993,4600,4760,713,434,7911,7426,7414,8729,322,803,7960,7563,4908,6285,6291,736,3389,9339,4132,8701,7534,5287,3646,592,3065,7582,2592,8755,6068,8597,1982,5782,1894,2900,6236,4039,6569,3037,5837,7698,700,7815,2491,7272,5878,3083,6778,6639,3589,5010,8313,2581,6617,5869,8402,6808,2951,2321,5195,497,2190,6187,1342,1316,4453,7740,4154,2959,1781,1482,8256,7178,2046,4419,744,8312,5356,6855,8839,319,2962,5662,47,6307,8662,68,4813,567,2712,9931,1678,3101,8227,6533,4933,6656,92,5846,4780,6256,6361,4323,9985,1231,2175,7178,3034,9744,6155,9165,7787,5836,9318,7860,9644,8941,6480,9443,8188,5928,161,6979,2352,5628,6991,1198,8067,5867,6620,3778,8426,2994,3122,3124,6335,3918,8897,2655,9670,634,1088,1576,8935,7255,474,8166,7417,9547,2886,5560,3842,6957,3111,26,7530,7143,1295,1744,6057,3009,1854,8098,5405,2234,4874,9447,2620,9303,27,7410,969,40,2966,5648,7596,8637,4238,3143,3679,7187,690,9980,7085,7714,9373,5632,7526,6707,3951,9734,4216,2146,3602,5371,6029,3039,4433,4855,4151,1449,3376,8009,7240,7027,4602,2947,9081,4045,8424,9352,8742,923,2705,4266,3232,2264,6761,363,2651,3383,7770,6730,7856,7340,9679,2158,610,4471,4608,910,6241,4417,6756,1013,8797,658,8809,5032,8703,7541,846,3357,2920,9817,1745,9980,7593,4667,3087,779,3218,6233,5568,4296,2289,2654,7898,5021,9461,5593,8214,9173,4203,2271,7980,2983,5952,9992,8399,3468,1776,3188,9314,1720,6523,2933,621,8685,5483,8986,6163,3444,9539,4320,155,3992,2828,2150,6071,524,2895,5468,8063,1210,3348,9071,4862,483,9017,4097,6186,9815,3610,5048,1644,1003,9865,9332,2145,1944,2213,9284,3803,4920,1927,6706,4344,7383,4786,9890,2010,5228,1224,3158,6967,8580,8990,8883,5213,76,8306,2031,4980,5639,9519,7184,5645,7769,3259,8077,9130,1317,3096,9624,3818,1770,695,2454,947,6029,3474,9938,3527,5696,4760,7724,7738,2848,6442,5767,6845,8323,4131,2859,7595,2500,4815,3660,9130,8580,7016,8231,4391,8369,3444,4069,4021,556,6154,627,2778,1496,4206,6356,8434,8491,3816,8231,3190,5575,1015,3787,7572,1788,6803,5641,6844,1961,4811,8535,9914,9999,1450,8857,738,4662,8569,6679,2225,7839,8618,286,2648,5342,2294,3205,4546,176,8705,3741,6134,8324,8021,7004,5205,7032,6637,9442,5539,5584,4819,5874,5807,8589,6871,9016,983,1758,3786,1519,6241,185,8398,495,3370,9133,3051,4549,9674,7311,9738,3316,9383,2658,2776,9481,7558,619,3943,3324,6491,4933,153,9738,4623,912,3595,7771,7939,1219,4405,2650,3883,4154,5809,315,7756,4430,1788,4451,1631,6461,7230,6017,5751,138,588,5282,2442,9110,9035,6349,2515,1570,6122,4192,4174,3530,1933,4186,4420,4609,5739,4135,2963,6308,1161,8809,8619,2796,3819,6971,8228,4188,1492,909,8048,2328,6772,8467,7671,9068,2226,7579,6422,7056,8042,3296,2272,3006,2196,7320,3238,3490,3102,37,1293,3212,4767,5041,8773,5794,4456,6174,7279,7054,2835,7053,9088,790,6640,3101,1057,7057,3826,6077,1025,2955,1224,1114,6729,5902,4698,6239,7203,9423,1804,4417,6686,1426,6941,8071,1029,4985,9010,6122,6597,1622,1574,3513,1684,7086,5505,3244,411,9638,4150,907,9135,829,981,1707,5359,8781,9751,5,9131,3973,7159,1340,6955,7514,7993,6964,8198,1933,2797,877,3993,4453,8020,9349,8646,2779,8679,2961,3547,3374,3510,1129,3568,2241,2625,9138,5974,8206,7669,7678,1833,8700,4480,4865,9912,8038,8238,782,3095,8199,1127,4501,7280,2112,2487,3626,2790,9432,1475,6312,8277,4827,2218,5806,7132,8752,1468,7471,6386,739,8762,8323,8120,5169,9078,9058,3370,9560,7987,8585,8531,5347,9312,1058,4271,1159,5286,5404,6925,8606,9204,7361,2415,560,586,4002,2644,1927,2824,768,4409,2942,3345,1002,808,4941,6267,7979,5140,8643,7553,9438,7320,4938,2666,4609,2778,8158,6730,3748,3867,1866,7181,171,3771,7134,8927,4778,2913,3326,2004,3089,7853,1378,1729,4777,2706,9578,1360,5693,3036,1851,7248,2403,2273,8536,6501,9216,613,9671,7131,7719,6425,773,717,8803,160,1114,7554,7197,753,4513,4322,8499,4533,2609,4226,8710,6627,644,9666,6260,4870,5744,7385,6542,6203,7703,6130,8944,5589,2262,6803,6381,7414,6888,5123,7320,9392,9061,6780,322,8975,7050,5089,1061,2260,3199,1150,1865,5386,9699,6501,3744,8454,6885,8277,919,1923,4001,6864,7854,5519,2491,6057,8794,9645,1776,5714,9786,9281,7538,6916,3215,395,2501,9618,4835,8846,9708,2813,3303,1794,8309,7176,2206,1602,1838,236,4593,2245,8993,4017,10,8215,6921,5206,4023,5932,6997,7801,262,7640,3107,8275,4938,7822,2425,3223,3886,2105,8700,9526,2088,8662,8034,7004,5710,2124,7164,3574,6630,9980,4242,2901,9471,1491,2117,4562,1130,9086,4117,6698,2810,2280,2331,1170,4554,4071,8387,1215,2274,9848,6738,1604,7281,8805,439,1298,8318,7834,9426,8603,6092,7944,1309,8828,303,3157,4638,4439,9175,1921,4695,7716,1494,1015,1772,5913,1127,1952,1950,8905,4064,9890,385,9357,7945,5035,7082,5369,4093,6546,5187,5637,2041,8946,1758,7111,6566,1027,1049,5148,7224,7248,296,6169,375,1656,7993,2816,3717,4279,4675,1609,3317,42,6201,3100,3144,163,9530,4531,7096,6070,1009,4988,3538,5801,7149,3063,2324,2912,7911,7002,4338,7880,2481,7368,3516,2016,7556,2193,1388,3865,8125,4637,4096,8114,750,3144,1938,7002,9343,4095,1392,4220,3455,6969,9647,1321,9048,1996,1640,6626,1788,314,9578,6630,2813,6626,4981,9908,7024,4355,3201,3521,3864,3303,464,1923,595,9801,3391,8366,8084,9374,1041,8807,9085,1892,9431,8317,9016,9221,8574,9981,9240,5395,2009,6310,2854,9255,8830,3145,2960,9615,8220,6061,3452,2918,6481,9278,2297,3385,6565,7066,7316,5682,107,7646,4466,68,1952,9603,8615,54,7191,791,6833,2560,693,9733,4168,570,9127,9537,1925,8287,5508,4297,8452,8795,6213,7994,2420,4208,524,5915,8602,8330,2651,8547,6156,1812,6271,7991,9407,9804,1553,6866,1128,2119,4691,9711,8315,5879,9935,6900,482,682,4126,1041,428,6247,3720,5882,7526,2582,4327,7725,3503,2631,2738,9323,721,7434,1453,6294,2957,3786,5722,6019,8685,4386,3066,9057,6860,499,5315,3045,5194,7111,3137,9104,941,586,3066,755,4177,8819,7040,5309,3583,3897,4428,7788,4721,7249,6559,7324,825,7311,3760,6064,6070,9672,4882,584,1365,9739,9331,5783,2624,7889,1604,1303,1555,7125,8312,425,8936,3233,7724,1480,403,7440,1784,1754,4721,1569,652,3893,4574,5692,9730,4813,9844,8291,9199,7101,3391,8914,6044,2928,9332,3328,8588,447,3830,1176,3523,2705,8365,6136,5442,9049,5526,8575,8869,9031,7280,706,2794,8814,5767,4241,7696,78,6570,556,5083,1426,4502,3336,9518,2292,1885,3740,3153,9348,9331,8051,2759,5407,9028,7840,9255,831,515,2612,9747,7435,8964,4971,2048,4900,5967,8271,1719,9670,2810,6777,1594,6367,6259,8316,3815,1689,6840,9437,4361,822,9619,3065,83,6344,7486,8657,8228,9635,6932,4864,8478,4777,6334,4678,7476,4963,6735,3096,5860,1405,5127,7269,7793,4738,227,9168,2996,8928,765,733,1276,7677,6258,1528,9558,3329,302,8901,1422,8277,6340,645,9125,8869,5952,141,8141,1816,9635,4025,4184,3093,83,2344,2747,9352,7966,1206,1126,1826,218,7939,2957,2729,810,8752,5247,4174,4038,8884,7899,9567,301,5265,5752,7524,4381,1669,3106,8270,6228,6373,754,2547,4240,2313,5514,3022,1040,9738,2265,8192,1763,1369,8469,8789,4836,52,1212,6690,5257,8918,6723,6319,378,4039,2421,8555,8184,9577,1432,7139,8078,5452,9628,7579,4161,7490,5159,8559,1011,81,478,5840,1964,1334,6875,8670,9900,739,1514,8692,522,9316,6955,1345,8132,2277,3193,9773,3923,4177,2183,1236,6747,6575,4874,6003,6409,8187,745,8776,9440,7543,9825,2582,7381,8147,7236,5185,7564,6125,218,7991,6394,391,7659,7456,5128,5294,2132,8992,8160,5782,4420,3371,3798,5054,552,5631,7546,4716,1332,6486,7892,7441,4370,6231,4579,2121,8615,1145,9391,1524,1385,2400,9437,2454,7896,7467,2928,8400,3299,4025,7458,4703,7206,6358,792,6200,725,4275,4136,7390,5984,4502,7929,5085,8176,4600,119,3568,76,9363,6943,2248,9077,9731,6213,5817,6729,4190,3092,6910,759,2682,8380,1254,9604,3011,9291,5329,9453,9746,2739,6522,3765,5634,1113,5789,5304,5499,564,2801,679,2653,1783,3608,7359,7797,3284,796,3222,437,7185,6135,8571,2778,7488,5746,678,6140,861,7750,803,9859,9918,2425,3734,2698,9005,4864,9818,6743,2475,132,9486,3825,5472,919,292,4411,7213,7699,6435,9019,6769,1388,802,2124,1345,8493,9487,8558,7061,8777,8833,2427,2238,5409,4957,8503,3171,7622,5779,6145,2417,5873,5563,5693,9574,9491,1937,7384,4563,6842,5432,2751,3406,7981"
data1=data.split(",")
grid=[[0 for i in range(size)] for j in range(size)]
#input data
for i in range(size):
for j in range(size):
grid[i][j]=int(data1.pop(0))
#set the path sum for the edges
for j in range(1,size):
grid[0][j]+=grid[0][j-1]
grid[j][0]+=grid[j-1][0]
for i in range(1,size):
for j in range(1,size):
grid[i][j]+=min(grid[i-1][j],grid[i][j-1])
print grid[size-1][size-1]
|
dywisor/kernelconfig
|
refs/heads/master
|
kernelconfig/sources/_sources.py
|
1
|
# This file is part of kernelconfig.
# -*- coding: utf-8 -*-
import os.path
import shlex
from ..util import fs
from .abc import sources as _sources_abc
from .abc import exc
from . import sourcedef
from . import sourcetype
from . import sourceenv
__all__ = ["ConfigurationSources"]
class ConfigurationSources(_sources_abc.AbstractConfigurationSources):
"""
A collection of configuration sources.
From the configuration sources container perspective,
a configuration source has two (three) states:
* loaded -- configuration source has been loaded
(or, conf source object has been registered
with register_source())
* available -- configuration could be loaded
The necessary information (i.e. files) exists, but it
is unknown whether the source would load successfully
and whether it is supported (e.g. target architecture)
* unavailable -- configuration source does exist
Initially, no conf sources are loaded, but this class
has enough information to create 'available' sources on demand.
See also abc.sources.AbstractConfigurationSources.
@ivar senv: configuration source environment,
shared with all configuration sources
@type senv: L{ConfigurationSourcesEnv}
"""
def __init__(self, install_info, source_info, **kwargs):
super().__init__(**kwargs)
self.senv = sourceenv.ConfigurationSourcesEnv(
logger=self.logger, # ref
install_info=install_info,
source_info=source_info
)
def iter_available_sources_info(self):
# fs lookup is done sources env, dispatch
return self.senv.iter_available_sources_info()
def _create_curated_source_def_by_name_from_files(
self, source_key, source_def_file, source_script_file
):
"""
This method should only be used by _create_source_by_name_from_files().
It creates the source definition data from def_file/script_file
and returns a 2-tuple (source def data, source type descriptor).
At least one of source_def_file, source_script_file must be set
to a not-None value. All not-None files should exist.
@raises ConfigurationSourceNotFound:
@raises ConfigurationSourceInvalidError:
@raises ConfigurationSourceMissingType:
@raises ConfigurationSourceError:
@param source_key: normalized source name
(get_source_name_key())
@param source_def_file: source definition file or None
@param source_script_file: source script file or None
@return: 2-tuple (def data, source type descriptor)
@rtype: 2-tuple (L{CuratedSourceDef}, L{ConfigurationSourceType})
"""
self.logger.debug("Initializing curated source %s", source_key)
source_def = sourcedef.CuratedSourceDef.new_from_ini(
conf_source_env=self.senv,
name=source_key,
parent_logger=self.logger,
source_def_file=source_def_file,
source_script_file=source_script_file
)
try:
source_type = source_def.get_source_type()
except exc.ConfigurationSourceMissingType:
# failed to detect source type
if source_def_file:
# raising same, but new exception? FIXME
raise exc.ConfigurationSourceMissingType(
"source {} has unknown type".format(source_key)
) from None
else:
raise exc.ConfigurationSourceNotFound(source_key) from None
except KeyError:
# if there is no source type matching source_type_name,
# then the type has to be guessed
# however, this should have already been done
# ('if not source_def.get("type")'),
# so it indicates an error here
#
raise exc.ConfigurationSourceInvalidError(
"{}: could not detect source type".format(source_key)
) from None
# --
if source_type.is_source():
# type of source is source => error
raise exc.ConfigurationSourceInvalidError(
"{}: type of source must not be 'source'".format(source_key)
)
elif source_type.source_subtype:
# couldfix - shouldfix
raise exc.ConfigurationSourceInvalidError(
"{}: subtypes are not supported: {}".format(
source_key, source_type.source_subtype
)
)
elif source_type.source_cls is None:
# if source_cls is None, then the type has no class and
# it cannot be handled here.
# (this should be an redundant case, since source_cls==None
# implies a no source_type or a "source" source_type)
#
raise exc.ConfigurationSourceInvalidError(
"{}: has no source type class".format(source_key)
)
# --
self.logger.debug("%s is a %s source", source_key, source_type)
return (source_def, source_type)
# --- end of _create_curated_source_def_by_name_from_files (...) ---
def _create_source_by_name_from_files(
self, source_key, source_def_file, source_script_file
):
"""
This method should only be used by create_source_by_name().
It loads the source definition data
(see _create_curated_source_def_by_name_from_files()),
and creates a configuration source object.
At least one of source_def_file, source_script_file must be set
to a not-None value. All not-None files should exist.
@raises ConfigurationSourceNotFound:
@raises ConfigurationSourceInvalidError:
@raises ConfigurationSourceMissingType:
@raises ConfigurationSourceError:
@param source_key: normalized source name
(get_source_name_key())
@param source_def_file: source definition file or None
@param source_script_file: source script file or None
@return: configuration source object
@rtype: subclass of L{AbstractConfigurationSource}
"""
source_def, source_type = (
self._create_curated_source_def_by_name_from_files(
source_key, source_def_file, source_script_file
)
)
if not source_def.arch:
# Debatable if this is an init-time or
# a get_configuration_basis()-time error.
#
# It makes no difference where the error is raised, and there is
# simply no point in creating unusable source objects,
# so fail early.
raise exc.ConfigurationSourceArchNotSupported(
source_key,
archs=(
name for _, name
in self.senv.source_info.iter_target_arch_dedup()
),
supported_archs=source_def.get("architectures")
)
# --
return source_type.source_cls.new_from_def(
name=source_key,
conf_source_env=self.senv,
source_def=source_def,
parent_logger=self.logger
)
# --- end of _create_source_by_name_from_files (...) ---
def create_source_by_name(self, source_name):
source_key = self.get_source_name_key(source_name)
self.logger.info("Trying to locate curated source %s", source_key)
sfiles = self.senv.get_source_def_files(source_key)
if any(sfiles):
return self._create_source_by_name_from_files(
source_key, sfiles.def_file, sfiles.script_file
)
else:
self.logger.warning(
"Could not locate curated source %s", source_key
)
# redundant,
# _create_source_by_name_from_files() would handle this, too
raise exc.ConfigurationSourceNotFound(source_key)
# --- end of create_source_by_name (...) ---
def _get_curated_source_from_settings(self, subtype, args, data):
"""
Condition: subtype||args -- subtype and args cannot be both empty
@param subtype: either None or name of the config source
@param args: mixed init_from_def(), get_configuration_basis() args
If subtype is empty, the first arg is used as
name of the curated source
@param data: data for init_from_def()
Note that sources-from-def usually do not accept data
@return: 2-tuple (configuration source object, args remainder)
@rtype: 2-tuple (L{AbstractConfigurationSource}, C{list} of C{str})
"""
if data:
raise exc.ConfigurationSourceInvalidError(
"curated source does not accept data"
)
# --
if subtype is not None:
source_name = subtype
conf_args = args
elif args:
source_name = args[0]
conf_args = args[1:]
else:
raise exc.ConfigurationSourceInvalidError(
"missing curated source name"
)
# --
conf_source = self.get_source(source_name)
return (conf_source, conf_args)
# ---
def get_configuration_source_from_settings(self, settings):
"""Returns a configuration source.
Its name and related information is read from the [source] section
of the given pre-parsed settings file.
Already loaded sources are considered,
and newly created sources may be added to this container.
("curated sources" are added, but "settings-only" sources are not.)
@param settings: pre-parsed settings file
@type settings: L{SettingsFileReader}
or C{dict} :: C{str} => C{list} of C{str}
@return: 2-tuple (configuration source object, args)
or 2-tuple (True, None) if no source configured
"""
def read_settings():
"""
@return: 2-tuple (source definition line, source data)
@rtype: 2-tuple (C{str}, C{list} of C{str})
"""
# format of the settings file, [source] section:
#
# [source]
# [<type>] <arg> [<arg>...]
# [<data line 1>]
# ...
# [<data line N>]
#
# * comment lines are ignored
# * the first line may span over multiple lines,
# separated with a backslash char (line cont.)
#
# So basically, the non-comment, non-empty first line is the
# "source definition" line, and all remaining lines are "data".
#
source_def = []
data_gen = settings.iter_section("source", skip_comments=True)
# handle backslash line continuation
for line in data_gen:
assert line
if line[-1] == "\\":
source_def.append(line[:-1].strip())
else:
source_def.append(line.strip())
break
# --
source_data = list(data_gen)
return (" ".join(source_def), source_data)
# ---
source_def_str, source_data = read_settings()
if not source_def_str:
assert not source_data
return (True, None)
# alternatively, set source_def_str and continue:
# source_def_str = "file \"{!s}\"".format(
# self.senv.source_info.get_filepath(".config")
# )
#
# or create a "default" source with an arbitrary type.
# Currently, <srctree>/.config is loaded if no source is
# configured, so simply return a dummy value here
# and let get_configuration_basis_from_settings() handle this
# without involving conf source objects.
#
# --
source_def = shlex.split(source_def_str)
source_type = None # set below
source_args = None
# source_data is already set
if os.path.isabs(source_def[0]):
# implicit file
source_type = sourcetype.get_source_type("local_file")
source_args = source_def
else:
source_args = source_def[1:]
try:
source_type = sourcetype.get_source_type(source_def[0])
except KeyError:
source_type = None
if source_type is None:
self.logger.debug(
"Trying to guess type of %s", source_def[0]
)
# then the type is implicit, and we have to "guess":
#
# (a) is source_def[0] the name of a curated source?
source_key = self.get_source_name_key(source_def[0])
if source_key in self:
# (a1) ... that is maybe already loaded?
is_curated_source = True
else:
# (a2) ... for which def/script files can be found?
try:
sfiles = self.senv.get_source_def_files(source_key)
except ValueError:
is_curated_source = False
else:
is_curated_source = any(sfiles)
# --
if is_curated_source:
self.logger.debug("%s is a curated source", source_def[0])
# could return directly from
# _create_source_by_name_from_files(...) here,
# but that requires duplicated checks (source_data)
source_type = sourcetype.get_source_type("source")
source_args = source_def
else:
# (b) file path of any type?
# note that relative paths are not supported here
# unless they begin with file://,
# and absolute paths starting with "/" have already
# been handled
scheme, scheme_sep, rem = source_def[0].partition("://")
if scheme_sep:
self.logger.debug("%s is a file source", source_def[0])
source_type = sourcetype.get_source_type("file")
source_args = source_def
# -- end if try detect
if source_type is None:
# then guessing was not successful
self.logger.warning(
"could not guess source type of %s", source_def[0]
)
raise exc.ConfigurationSourceNotFound(source_def[0])
# -- end if source_type is None
# --
if source_type.is_source():
# calling convention different from source_cls.new_from*
#
# Note that already loaded sources will be re-used here,
# possibly ignoring the file search results from the
# guess-type block above.
#
return self._get_curated_source_from_settings(
source_type.source_subtype, source_args, source_data
)
elif source_type.source_type is None:
raise NotImplementedError(
"missing source cls for type {}".format(source_type)
)
else:
return source_type.source_cls.new_from_settings(
conf_source_env=self.senv,
subtype=source_type.source_subtype,
args=source_args, data=source_data,
parent_logger=self.logger
)
# --- end of get_configuration_source_from_settings (...) ---
def get_configuration_basis_from_settings(self, settings):
conf_source, conf_args = (
self.get_configuration_source_from_settings(settings)
)
if conf_source is True:
assert conf_args is None
default_input_config = (
self.senv.source_info.get_filepath(".config")
)
if not fs.is_readable_file(default_input_config):
raise exc.ConfigurationSourceNotFound(default_input_config)
return [default_input_config]
else:
return conf_source.get_configuration_basis(conf_args)
# --- end of get_configuration_basis_from_settings (...) ---
# --- end of ConfigurationSources ---
|
maestrano/odoo
|
refs/heads/master
|
openerp/models.py
|
3
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Object Relational Mapping module:
* Hierarchical structure
* Constraints consistency and validation
* Object metadata depends on its status
* Optimised processing by complex query (multiple actions at once)
* Default field values
* Permissions optimisation
* Persistant object: DB postgresql
* Data conversion
* Multi-level caching system
* Two different inheritance mechanisms
* Rich set of field types:
- classical (varchar, integer, boolean, ...)
- relational (one2many, many2one, many2many)
- functional
"""
import copy
import datetime
import functools
import itertools
import logging
import operator
import pickle
import pytz
import re
import time
from collections import defaultdict, MutableMapping
from inspect import getmembers
import babel.dates
import dateutil.relativedelta
import psycopg2
from lxml import etree
import openerp
from . import SUPERUSER_ID
from . import api
from . import tools
from .api import Environment
from .exceptions import except_orm, AccessError, MissingError, ValidationError
from .osv import fields
from .osv.query import Query
from .tools import lazy_property, ormcache
from .tools.config import config
from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from .tools.safe_eval import safe_eval as eval
from .tools.translate import _
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
def check_object_name(name):
""" Check if the given name is a valid openerp object name.
The _name attribute in osv and osv_memory object is subject to
some restrictions. This function returns True or False whether
the given name is allowed or not.
TODO: this is an approximation. The goal in this approximation
is to disallow uppercase characters (in some places, we quote
table/column names and in other not, which leads to this kind
of errors:
psycopg2.ProgrammingError: relation "xxx" does not exist).
The same restriction should apply to both osv and osv_memory
objects for consistency.
"""
if regex_object_name.match(name) is None:
return False
return True
def raise_on_invalid_object_name(name):
if not check_object_name(name):
msg = "The _name attribute %s is not valid." % name
_logger.error(msg)
raise except_orm('ValueError', msg)
POSTGRES_CONFDELTYPES = {
'RESTRICT': 'r',
'NO ACTION': 'a',
'CASCADE': 'c',
'SET NULL': 'n',
'SET DEFAULT': 'd',
}
def intersect(la, lb):
return filter(lambda x: x in lb, la)
def same_name(f, g):
""" Test whether functions `f` and `g` are identical or have the same name """
return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
def fix_import_export_id_paths(fieldname):
"""
Fixes the id fields in import and exports, and splits field paths
on '/'.
:param str fieldname: name of the field to import/export
:return: split field name
:rtype: list of str
"""
fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
return fixed_external_id.split('/')
def pg_varchar(size=0):
""" Returns the VARCHAR declaration for the provided size:
* If no size (or an empty or negative size is provided) return an
'infinite' VARCHAR
* Otherwise return a VARCHAR(n)
:type int size: varchar size, optional
:rtype: str
"""
if size:
if not isinstance(size, int):
raise TypeError("VARCHAR parameter should be an int, got %s"
% type(size))
if size > 0:
return 'VARCHAR(%d)' % size
return 'VARCHAR'
FIELDS_TO_PGTYPES = {
fields.boolean: 'bool',
fields.integer: 'int4',
fields.text: 'text',
fields.html: 'text',
fields.date: 'date',
fields.datetime: 'timestamp',
fields.binary: 'bytea',
fields.many2one: 'int4',
fields.serialized: 'text',
}
def get_pg_type(f, type_override=None):
"""
:param fields._column f: field to get a Postgres type for
:param type type_override: use the provided type for dispatching instead of the field's own type
:returns: (postgres_identification_type, postgres_type_specification)
:rtype: (str, str)
"""
field_type = type_override or type(f)
if field_type in FIELDS_TO_PGTYPES:
pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
elif issubclass(field_type, fields.float):
if f.digits:
pg_type = ('numeric', 'NUMERIC')
else:
pg_type = ('float8', 'DOUBLE PRECISION')
elif issubclass(field_type, (fields.char, fields.reference)):
pg_type = ('varchar', pg_varchar(f.size))
elif issubclass(field_type, fields.selection):
if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
or getattr(f, 'size', None) == -1:
pg_type = ('int4', 'INTEGER')
else:
pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
elif issubclass(field_type, fields.function):
if f._type == 'selection':
pg_type = ('varchar', pg_varchar())
else:
pg_type = get_pg_type(f, getattr(fields, f._type))
else:
_logger.warning('%s type not supported!', field_type)
pg_type = None
return pg_type
class MetaModel(api.Meta):
""" Metaclass for the models.
This class is used as the metaclass for the class :class:`BaseModel` to
discover the models defined in a module (without instanciating them).
If the automatic discovery is not needed, it is possible to set the model's
``_register`` attribute to False.
"""
module_to_models = {}
def __init__(self, name, bases, attrs):
if not self._register:
self._register = True
super(MetaModel, self).__init__(name, bases, attrs)
return
if not hasattr(self, '_module'):
# The (OpenERP) module name can be in the `openerp.addons` namespace
# or not. For instance, module `sale` can be imported as
# `openerp.addons.sale` (the right way) or `sale` (for backward
# compatibility).
module_parts = self.__module__.split('.')
if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
module_name = self.__module__.split('.')[2]
else:
module_name = self.__module__.split('.')[0]
self._module = module_name
# Remember which models to instanciate for this module.
if not self._custom:
self.module_to_models.setdefault(self._module, []).append(self)
# transform columns into new-style fields (enables field inheritance)
for name, column in self._columns.iteritems():
if not hasattr(self, name):
setattr(self, name, column.to_field())
class NewId(object):
""" Pseudo-ids for new records. """
def __nonzero__(self):
return False
IdType = (int, long, basestring, NewId)
# maximum number of prefetched records
PREFETCH_MAX = 200
# special columns automatically created by the ORM
LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
class BaseModel(object):
""" Base class for OpenERP models.
OpenERP models are created by inheriting from this class' subclasses:
* :class:`Model` for regular database-persisted models
* :class:`TransientModel` for temporary data, stored in the database but
automatically vaccuumed every so often
* :class:`AbstractModel` for abstract super classes meant to be shared by
multiple inheriting model
The system automatically instantiates every model once per database. Those
instances represent the available models on each database, and depend on
which modules are installed on that database. The actual class of each
instance is built from the Python classes that create and inherit from the
corresponding model.
Every model instance is a "recordset", i.e., an ordered collection of
records of the model. Recordsets are returned by methods like
:meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
explicit representation: a record is represented as a recordset of one
record.
To create a class that should not be instantiated, the _register class
attribute may be set to False.
"""
__metaclass__ = MetaModel
_auto = True # create database backend
_register = False # Set to false if the model shouldn't be automatically discovered.
_name = None
_columns = {}
_constraints = []
_custom = False
_defaults = {}
_rec_name = None
_parent_name = 'parent_id'
_parent_store = False
_parent_order = False
_date_name = 'date'
_order = 'id'
_sequence = None
_description = None
_needaction = False
_translate = True # set to False to disable translations export for this model
# dict of {field:method}, with method returning the (name_get of records, {id: fold})
# to include in the _read_group, if grouped on this field
_group_by_full = {}
# Transience
_transient = False # True in a TransientModel
# structure:
# { 'parent_model': 'm2o_field', ... }
_inherits = {}
# Mapping from inherits'd field name to triple (m, r, f, n) where m is the
# model from which it is inherits'd, r is the (local) field towards m, f
# is the _column object itself, and n is the original (i.e. top-most)
# parent model.
# Example:
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
_inherit_fields = {}
# Mapping field name/column_info object
# This is similar to _inherit_fields but:
# 1. includes self fields,
# 2. uses column_info instead of a triple.
_all_columns = {}
_table = None
_log_create = False
_sql_constraints = []
# model dependencies, for models backed up by sql views:
# {model_name: field_names, ...}
_depends = {}
CONCURRENCY_CHECK_FIELD = '__last_update'
def log(self, cr, uid, id, message, secondary=False, context=None):
return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
def view_init(self, cr, uid, fields_list, context=None):
"""Override this method to do specific things when a view on the object is opened."""
pass
def _field_create(self, cr, context=None):
""" Create entries in ir_model_fields for all the model's fields.
If necessary, also create an entry in ir_model, and if called from the
modules loading scheme (by receiving 'module' in the context), also
create entries in ir_model_data (for the model and the fields).
- create an entry in ir_model (if there is not already one),
- create an entry in ir_model_data (if there is not already one, and if
'module' is in the context),
- update ir_model_fields with the fields found in _columns
(TODO there is some redundancy as _columns is updated from
ir_model_fields in __init__).
"""
if context is None:
context = {}
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
if not cr.rowcount:
cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
model_id = cr.fetchone()[0]
cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
else:
model_id = cr.fetchone()[0]
if 'module' in context:
name_id = 'model_'+self._name.replace('.', '_')
cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
if not cr.rowcount:
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name_id, context['module'], 'ir.model', model_id)
)
cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
cols = {}
for rec in cr.dictfetchall():
cols[rec['name']] = rec
ir_model_fields_obj = self.pool.get('ir.model.fields')
# sparse field should be created at the end, as it depends on its serialized field already existing
model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
for (k, f) in model_fields:
vals = {
'model_id': model_id,
'model': self._name,
'name': k,
'field_description': f.string,
'ttype': f._type,
'relation': f._obj or '',
'select_level': tools.ustr(int(f.select)),
'readonly': (f.readonly and 1) or 0,
'required': (f.required and 1) or 0,
'selectable': (f.selectable and 1) or 0,
'translate': (f.translate and 1) or 0,
'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
'serialization_field_id': None,
}
if getattr(f, 'serialization_field', None):
# resolve link to serialization_field if specified by name
serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
if not serialization_field_id:
raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
vals['serialization_field_id'] = serialization_field_id[0]
# When its a custom field,it does not contain f.select
if context.get('field_state', 'base') == 'manual':
if context.get('field_name', '') == k:
vals['select_level'] = context.get('select', '0')
#setting value to let the problem NOT occur next time
elif k in cols:
vals['select_level'] = cols[k]['select_level']
if k not in cols:
cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
id = cr.fetchone()[0]
vals['id'] = id
cr.execute("""INSERT INTO ir_model_fields (
id, model_id, model, name, field_description, ttype,
relation,state,select_level,relation_field, translate, serialization_field_id
) VALUES (
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], 'base',
vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
))
if 'module' in context:
name1 = 'field_' + self._table + '_' + k
cr.execute("select name from ir_model_data where name=%s", (name1,))
if cr.fetchone():
name1 = name1 + "_" + str(id)
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name1, context['module'], 'ir.model.fields', id)
)
else:
for key, val in vals.items():
if cols[k][key] != vals[key]:
cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
cr.execute("""UPDATE ir_model_fields SET
model_id=%s, field_description=%s, ttype=%s, relation=%s,
select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
WHERE
model=%s AND name=%s""", (
vals['model_id'], vals['field_description'], vals['ttype'],
vals['relation'],
vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
))
break
self.invalidate_cache(cr, SUPERUSER_ID)
@classmethod
def _add_field(cls, name, field):
""" Add the given `field` under the given `name` in the class """
field.set_class_name(cls, name)
# add field in _fields (for reflection)
cls._fields[name] = field
# add field as an attribute, unless another kind of value already exists
if isinstance(getattr(cls, name, field), Field):
setattr(cls, name, field)
else:
_logger.warning("In model %r, member %r is not a field", cls._name, name)
if field.store:
cls._columns[name] = field.to_column()
else:
# remove potential column that may be overridden by field
cls._columns.pop(name, None)
@classmethod
def _add_magic_fields(cls):
""" Introduce magic fields on the current class
* id is a "normal" field (with a specific getter)
* create_uid, create_date, write_uid and write_date have become
"normal" fields
* $CONCURRENCY_CHECK_FIELD is a computed field with its computing
method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
to get the same structure as the previous
``(now() at time zone 'UTC')::timestamp``::
# select (now() at time zone 'UTC')::timestamp;
timezone
----------------------------
2013-06-18 08:30:37.292809
>>> str(datetime.datetime.utcnow())
'2013-06-18 08:31:32.821177'
"""
def add(name, field):
""" add `field` with the given `name` if it does not exist yet """
if name not in cls._columns and name not in cls._fields:
cls._add_field(name, field)
# cyclic import
from . import fields
# this field 'id' must override any other column or field
cls._add_field('id', fields.Id(automatic=True))
add('display_name', fields.Char(string='Display Name', automatic=True,
compute='_compute_display_name'))
if cls._log_access:
add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
add('create_date', fields.Datetime(string='Created on', automatic=True))
add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
last_modified_name = 'compute_concurrency_field_with_access'
else:
last_modified_name = 'compute_concurrency_field'
# this field must override any other column or field
cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
string='Last Modified on', compute=last_modified_name, automatic=True))
@api.one
def compute_concurrency_field(self):
self[self.CONCURRENCY_CHECK_FIELD] = \
datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
@api.one
@api.depends('create_date', 'write_date')
def compute_concurrency_field_with_access(self):
self[self.CONCURRENCY_CHECK_FIELD] = \
self.write_date or self.create_date or \
datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
#
# Goal: try to apply inheritance at the instanciation level and
# put objects in the pool var
#
@classmethod
def _build_model(cls, pool, cr):
""" Instanciate a given model.
This class method instanciates the class of some model (i.e. a class
deriving from osv or osv_memory). The class might be the class passed
in argument or, if it inherits from another class, a class constructed
by combining the two classes.
"""
# IMPORTANT: the registry contains an instance for each model. The class
# of each model carries inferred metadata that is shared among the
# model's instances for this registry, but not among registries. Hence
# we cannot use that "registry class" for combining model classes by
# inheritance, since it confuses the metadata inference process.
# Keep links to non-inherited constraints in cls; this is useful for
# instance when exporting translations
cls._local_constraints = cls.__dict__.get('_constraints', [])
cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
# determine inherited models
parents = getattr(cls, '_inherit', [])
parents = [parents] if isinstance(parents, basestring) else (parents or [])
# determine the model's name
name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
# determine the module that introduced the model
original_module = pool[name]._original_module if name in parents else cls._module
# build the class hierarchy for the model
for parent in parents:
if parent not in pool:
raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
'You may need to add a dependency on the parent class\' module.' % (name, parent))
parent_model = pool[parent]
# do no use the class of parent_model, since that class contains
# inferred metadata; use its ancestor instead
parent_class = type(parent_model).__base__
# don't inherit custom fields
columns = dict((key, val)
for key, val in parent_class._columns.iteritems()
if not val.manual
)
columns.update(cls._columns)
defaults = dict(parent_class._defaults)
defaults.update(cls._defaults)
inherits = dict(parent_class._inherits)
inherits.update(cls._inherits)
depends = dict(parent_class._depends)
for m, fs in cls._depends.iteritems():
depends[m] = depends.get(m, []) + fs
old_constraints = parent_class._constraints
new_constraints = cls._constraints
# filter out from old_constraints the ones overridden by a
# constraint with the same function name in new_constraints
constraints = new_constraints + [oldc
for oldc in old_constraints
if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
for newc in new_constraints)
]
sql_constraints = cls._sql_constraints + \
parent_class._sql_constraints
attrs = {
'_name': name,
'_register': False,
'_columns': columns,
'_defaults': defaults,
'_inherits': inherits,
'_depends': depends,
'_constraints': constraints,
'_sql_constraints': sql_constraints,
}
cls = type(name, (cls, parent_class), attrs)
# introduce the "registry class" of the model;
# duplicate some attributes so that the ORM can modify them
attrs = {
'_name': name,
'_register': False,
'_columns': dict(cls._columns),
'_defaults': dict(cls._defaults),
'_inherits': dict(cls._inherits),
'_depends': dict(cls._depends),
'_constraints': list(cls._constraints),
'_sql_constraints': list(cls._sql_constraints),
'_original_module': original_module,
}
cls = type(cls._name, (cls,), attrs)
# float fields are registry-dependent (digit attribute); duplicate them
# to avoid issues
for key, col in cls._columns.items():
if col._type == 'float':
cls._columns[key] = copy.copy(col)
# instantiate the model, and initialize it
model = object.__new__(cls)
model.__init__(pool, cr)
return model
@classmethod
def _init_function_fields(cls, pool, cr):
# initialize the list of non-stored function fields for this model
pool._pure_function_fields[cls._name] = []
# process store of low-level function fields
for fname, column in cls._columns.iteritems():
if hasattr(column, 'digits_change'):
column.digits_change(cr)
# filter out existing store about this field
pool._store_function[cls._name] = [
stored
for stored in pool._store_function.get(cls._name, [])
if (stored[0], stored[1]) != (cls._name, fname)
]
if not isinstance(column, fields.function):
continue
if not column.store:
# register it on the pool for invalidation
pool._pure_function_fields[cls._name].append(fname)
continue
# process store parameter
store = column.store
if store is True:
get_ids = lambda self, cr, uid, ids, c={}: ids
store = {cls._name: (get_ids, None, column.priority, None)}
for model, spec in store.iteritems():
if len(spec) == 4:
(fnct, fields2, order, length) = spec
elif len(spec) == 3:
(fnct, fields2, order) = spec
length = None
else:
raise except_orm('Error',
('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
pool._store_function.setdefault(model, [])
t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
if t not in pool._store_function[model]:
pool._store_function[model].append(t)
pool._store_function[model].sort(key=lambda x: x[4])
@classmethod
def _init_manual_fields(cls, pool, cr):
# Check whether the query is already done
if pool.fields_by_model is not None:
manual_fields = pool.fields_by_model.get(cls._name, [])
else:
cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
manual_fields = cr.dictfetchall()
for field in manual_fields:
if field['name'] in cls._columns:
continue
attrs = {
'string': field['field_description'],
'required': bool(field['required']),
'readonly': bool(field['readonly']),
'domain': eval(field['domain']) if field['domain'] else None,
'size': field['size'] or None,
'ondelete': field['on_delete'],
'translate': (field['translate']),
'manual': True,
'_prefetch': False,
#'select': int(field['select_level'])
}
if field['serialization_field_id']:
cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
if field['ttype'] in ['many2one', 'one2many', 'many2many']:
attrs.update({'relation': field['relation']})
cls._columns[field['name']] = fields.sparse(**attrs)
elif field['ttype'] == 'selection':
cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
elif field['ttype'] == 'reference':
cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
elif field['ttype'] == 'many2one':
cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
elif field['ttype'] == 'one2many':
cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
elif field['ttype'] == 'many2many':
_rel1 = field['relation'].replace('.', '_')
_rel2 = field['model'].replace('.', '_')
_rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
else:
cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
@classmethod
def _init_constraints_onchanges(cls):
# store sql constraint error messages
for (key, _, msg) in cls._sql_constraints:
cls.pool._sql_error[cls._table + '_' + key] = msg
# collect constraint and onchange methods
cls._constraint_methods = []
cls._onchange_methods = defaultdict(list)
for attr, func in getmembers(cls, callable):
if hasattr(func, '_constrains'):
if not all(name in cls._fields for name in func._constrains):
_logger.warning("@constrains%r parameters must be field names", func._constrains)
cls._constraint_methods.append(func)
if hasattr(func, '_onchange'):
if not all(name in cls._fields for name in func._onchange):
_logger.warning("@onchange%r parameters must be field names", func._onchange)
for name in func._onchange:
cls._onchange_methods[name].append(func)
def __new__(cls):
# In the past, this method was registering the model class in the server.
# This job is now done entirely by the metaclass MetaModel.
#
# Do not create an instance here. Model instances are created by method
# _build_model().
return None
def __init__(self, pool, cr):
""" Initialize a model and make it part of the given registry.
- copy the stored fields' functions in the registry,
- retrieve custom fields and add them in the model,
- ensure there is a many2one for each _inherits'd parent,
- update the children's _columns,
- give a chance to each field to initialize itself.
"""
cls = type(self)
# link the class to the registry, and update the registry
cls.pool = pool
cls._model = self # backward compatibility
pool.add(cls._name, self)
# determine description, table, sequence and log_access
if not cls._description:
cls._description = cls._name
if not cls._table:
cls._table = cls._name.replace('.', '_')
if not cls._sequence:
cls._sequence = cls._table + '_id_seq'
if not hasattr(cls, '_log_access'):
# If _log_access is not specified, it is the same value as _auto.
cls._log_access = cls._auto
# Transience
if cls.is_transient():
cls._transient_check_count = 0
cls._transient_max_count = config.get('osv_memory_count_limit')
cls._transient_max_hours = config.get('osv_memory_age_limit')
assert cls._log_access, \
"TransientModels must have log_access turned on, " \
"in order to implement their access rights policy"
# retrieve new-style fields and duplicate them (to avoid clashes with
# inheritance between different models)
cls._fields = {}
for attr, field in getmembers(cls, Field.__instancecheck__):
if not field._origin:
cls._add_field(attr, field.copy())
# introduce magic fields
cls._add_magic_fields()
# register stuff about low-level function fields and custom fields
cls._init_function_fields(pool, cr)
cls._init_manual_fields(pool, cr)
# process _inherits
cls._inherits_check()
cls._inherits_reload()
# register constraints and onchange methods
cls._init_constraints_onchanges()
# check defaults
for k in cls._defaults:
assert k in cls._fields, \
"Model %s has a default for nonexiting field %s" % (cls._name, k)
# restart columns
for column in cls._columns.itervalues():
column.restart()
# validate rec_name
if cls._rec_name:
assert cls._rec_name in cls._fields, \
"Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
elif 'name' in cls._fields:
cls._rec_name = 'name'
# prepare ormcache, which must be shared by all instances of the model
cls._ormcache = {}
@api.model
@ormcache()
def _is_an_ordinary_table(self):
self.env.cr.execute("""\
SELECT 1
FROM pg_class
WHERE relname = %s
AND relkind = %s""", [self._table, 'r'])
return bool(self.env.cr.fetchone())
def __export_xml_id(self):
""" Return a valid xml_id for the record `self`. """
if not self._is_an_ordinary_table():
raise Exception(
"You can not export the column ID of model %s, because the "
"table %s is not an ordinary table."
% (self._name, self._table))
ir_model_data = self.sudo().env['ir.model.data']
data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
if data:
if data[0].module:
return '%s.%s' % (data[0].module, data[0].name)
else:
return data[0].name
else:
postfix = 0
name = '%s_%s' % (self._table, self.id)
while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
postfix += 1
name = '%s_%s_%s' % (self._table, self.id, postfix)
ir_model_data.create({
'model': self._name,
'res_id': self.id,
'module': '__export__',
'name': name,
})
return '__export__.' + name
@api.multi
def __export_rows(self, fields):
""" Export fields of the records in `self`.
:param fields: list of lists of fields to traverse
:return: list of lists of corresponding values
"""
lines = []
for record in self:
# main line of record, initially empty
current = [''] * len(fields)
lines.append(current)
# list of primary fields followed by secondary field(s)
primary_done = []
# process column by column
for i, path in enumerate(fields):
if not path:
continue
name = path[0]
if name in primary_done:
continue
if name == '.id':
current[i] = str(record.id)
elif name == 'id':
current[i] = record.__export_xml_id()
else:
field = record._fields[name]
value = record[name]
# this part could be simpler, but it has to be done this way
# in order to reproduce the former behavior
if not isinstance(value, BaseModel):
current[i] = field.convert_to_export(value, self.env)
else:
primary_done.append(name)
# This is a special case, its strange behavior is intended!
if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
xml_ids = [r.__export_xml_id() for r in value]
current[i] = ','.join(xml_ids) or False
continue
# recursively export the fields that follow name
fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
lines2 = value.__export_rows(fields2)
if lines2:
# merge first line with record's main line
for j, val in enumerate(lines2[0]):
if val:
current[j] = val
# check value of current field
if not current[i]:
# assign xml_ids, and forget about remaining lines
xml_ids = [item[1] for item in value.name_get()]
current[i] = ','.join(xml_ids)
else:
# append the other lines at the end
lines += lines2[1:]
else:
current[i] = False
return lines
@api.multi
def export_data(self, fields_to_export, raw_data=False):
""" Export fields for selected objects
:param fields_to_export: list of fields
:param raw_data: True to return value in native Python type
:rtype: dictionary with a *datas* matrix
This method is used when exporting data via client menu
"""
fields_to_export = map(fix_import_export_id_paths, fields_to_export)
if raw_data:
self = self.with_context(export_raw_data=True)
return {'datas': self.__export_rows(fields_to_export)}
def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
"""
.. deprecated:: 7.0
Use :meth:`~load` instead
Import given data in given module
This method is used when importing data via client menu.
Example of fields to import for a sale.order::
.id, (=database_id)
partner_id, (=name_search)
order_line/.id, (=database_id)
order_line/name,
order_line/product_id/id, (=xml id)
order_line/price_unit,
order_line/product_uom_qty,
order_line/product_uom/id (=xml_id)
This method returns a 4-tuple with the following structure::
(return_code, errored_resource, error_message, unused)
* The first item is a return code, it is ``-1`` in case of
import error, or the last imported row number in case of success
* The second item contains the record data dict that failed to import
in case of error, otherwise it's 0
* The third item contains an error message string in case of error,
otherwise it's 0
* The last item is currently unused, with no specific semantics
:param fields: list of fields to import
:param datas: data to import
:param mode: 'init' or 'update' for record creation
:param current_module: module name
:param noupdate: flag for record creation
:param filename: optional file to store partial import state for recovery
:returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
:rtype: (int, dict or 0, str or 0, str or 0)
"""
context = dict(context) if context is not None else {}
context['_import_current_module'] = current_module
fields = map(fix_import_export_id_paths, fields)
ir_model_data_obj = self.pool.get('ir.model.data')
def log(m):
if m['type'] == 'error':
raise Exception(m['message'])
if config.get('import_partial') and filename:
with open(config.get('import_partial'), 'rb') as partial_import_file:
data = pickle.load(partial_import_file)
position = data.get(filename, 0)
position = 0
try:
for res_id, xml_id, res, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, datas,
context=context, log=log),
context=context, log=log):
ir_model_data_obj._update(cr, uid, self._name,
current_module, res, mode=mode, xml_id=xml_id,
noupdate=noupdate, res_id=res_id, context=context)
position = info.get('rows', {}).get('to', 0) + 1
if config.get('import_partial') and filename and (not (position%100)):
with open(config.get('import_partial'), 'rb') as partial_import:
data = pickle.load(partial_import)
data[filename] = position
with open(config.get('import_partial'), 'wb') as partial_import:
pickle.dump(data, partial_import)
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
cr.commit()
except Exception, e:
cr.rollback()
return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
return position, 0, 0, 0
def load(self, cr, uid, fields, data, context=None):
"""
Attempts to load the data matrix, and returns a list of ids (or
``False`` if there was an error and no id could be generated) and a
list of messages.
The ids are those of the records created and saved (in database), in
the same order they were extracted from the file. They can be passed
directly to :meth:`~read`
:param fields: list of fields to import, at the same index as the corresponding data
:type fields: list(str)
:param data: row-major matrix of data to import
:type data: list(list(str))
:param dict context:
:returns: {ids: list(int)|False, messages: [Message]}
"""
cr.execute('SAVEPOINT model_load')
messages = []
fields = map(fix_import_export_id_paths, fields)
ModelData = self.pool['ir.model.data'].clear_caches()
fg = self.fields_get(cr, uid, context=context)
mode = 'init'
current_module = ''
noupdate = False
ids = []
for id, xid, record, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, data,
context=context, log=messages.append),
context=context, log=messages.append):
try:
cr.execute('SAVEPOINT model_load_save')
except psycopg2.InternalError, e:
# broken transaction, exit and hope the source error was
# already logged
if not any(message['type'] == 'error' for message in messages):
messages.append(dict(info, type='error',message=
u"Unknown database error: '%s'" % e))
break
try:
ids.append(ModelData._update(cr, uid, self._name,
current_module, record, mode=mode, xml_id=xid,
noupdate=noupdate, res_id=id, context=context))
cr.execute('RELEASE SAVEPOINT model_load_save')
except psycopg2.Warning, e:
messages.append(dict(info, type='warning', message=str(e)))
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
except psycopg2.Error, e:
messages.append(dict(
info, type='error',
**PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
# Failed to write, log to messages, rollback savepoint (to
# avoid broken transaction) and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
except Exception, e:
message = (_('Unknown error during import:') +
' %s: %s' % (type(e), unicode(e)))
moreinfo = _('Resolve other errors first')
messages.append(dict(info, type='error',
message=message,
moreinfo=moreinfo))
# Failed for some reason, perhaps due to invalid data supplied,
# rollback savepoint and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
if any(message['type'] == 'error' for message in messages):
cr.execute('ROLLBACK TO SAVEPOINT model_load')
ids = False
return {'ids': ids, 'messages': messages}
def _extract_records(self, cr, uid, fields_, data,
context=None, log=lambda a: None):
""" Generates record dicts from the data sequence.
The result is a generator of dicts mapping field names to raw
(unconverted, unvalidated) values.
For relational fields, if sub-fields were provided the value will be
a list of sub-records
The following sub-fields may be set on the record (by key):
* None is the name_get for the record (to use with name_create/name_search)
* "id" is the External ID for the record
* ".id" is the Database ID for the record
"""
columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
# Fake columns to avoid special cases in extractor
columns[None] = fields.char('rec_name')
columns['id'] = fields.char('External ID')
columns['.id'] = fields.integer('Database ID')
# m2o fields can't be on multiple lines so exclude them from the
# is_relational field rows filter, but special-case it later on to
# be handled with relational fields (as it can have subfields)
is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
get_o2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if columns[field[0]]._type == 'one2many'])
get_nono2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if columns[field[0]]._type != 'one2many'])
# Checks if the provided row has any non-empty non-relational field
def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
return any(g(row)) and not any(f(row))
index = 0
while True:
if index >= len(data): return
row = data[index]
# copy non-relational fields to record dict
record = dict((field[0], value)
for field, value in itertools.izip(fields_, row)
if not is_relational(field[0]))
# Get all following rows which have relational values attached to
# the current record (no non-relational values)
record_span = itertools.takewhile(
only_o2m_values, itertools.islice(data, index + 1, None))
# stitch record row back on for relational fields
record_span = list(itertools.chain([row], record_span))
for relfield in set(
field[0] for field in fields_
if is_relational(field[0])):
column = columns[relfield]
# FIXME: how to not use _obj without relying on fields_get?
Model = self.pool[column._obj]
# get only cells for this sub-field, should be strictly
# non-empty, field path [None] is for name_get column
indices, subfields = zip(*((index, field[1:] or [None])
for index, field in enumerate(fields_)
if field[0] == relfield))
# return all rows which have at least one value for the
# subfields of relfield
relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
record[relfield] = [subrecord
for subrecord, _subinfo in Model._extract_records(
cr, uid, subfields, relfield_data,
context=context, log=log)]
yield record, {'rows': {
'from': index,
'to': index + len(record_span) - 1
}}
index += len(record_span)
def _convert_records(self, cr, uid, records,
context=None, log=lambda a: None):
""" Converts records from the source iterable (recursive dicts of
strings) into forms which can be written to the database (via
self.create or (ir.model.data)._update)
:returns: a list of triplets of (id, xid, record)
:rtype: list((int|None, str|None, dict))
"""
if context is None: context = {}
Converter = self.pool['ir.fields.converter']
columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
Translation = self.pool['ir.translation']
field_names = dict(
(f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
context.get('lang'))
or column.string))
for f, column in columns.iteritems())
convert = Converter.for_model(cr, uid, self, context=context)
def _log(base, field, exception):
type = 'warning' if isinstance(exception, Warning) else 'error'
# logs the logical (not human-readable) field name for automated
# processing of response, but injects human readable in message
record = dict(base, type=type, field=field,
message=unicode(exception.args[0]) % base)
if len(exception.args) > 1 and exception.args[1]:
record.update(exception.args[1])
log(record)
stream = CountingStream(records)
for record, extras in stream:
dbid = False
xid = False
# name_get/name_create
if None in record: pass
# xid
if 'id' in record:
xid = record['id']
# dbid
if '.id' in record:
try:
dbid = int(record['.id'])
except ValueError:
# in case of overridden id column
dbid = record['.id']
if not self.search(cr, uid, [('id', '=', dbid)], context=context):
log(dict(extras,
type='error',
record=stream.index,
field='.id',
message=_(u"Unknown database identifier '%s'") % dbid))
dbid = False
converted = convert(record, lambda field, err:\
_log(dict(extras, record=stream.index, field=field_names[field]), field, err))
yield dbid, xid, converted, dict(extras, record=stream.index)
@api.multi
def _validate_fields(self, field_names):
field_names = set(field_names)
# old-style constraint methods
trans = self.env['ir.translation']
cr, uid, context = self.env.args
ids = self.ids
errors = []
for fun, msg, names in self._constraints:
try:
# validation must be context-independent; call `fun` without context
valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
extra_error = None
except Exception, e:
_logger.debug('Exception while validating constraint', exc_info=True)
valid = False
extra_error = tools.ustr(e)
if not valid:
if callable(msg):
res_msg = msg(self._model, cr, uid, ids, context=context)
if isinstance(res_msg, tuple):
template, params = res_msg
res_msg = template % params
else:
res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
if extra_error:
res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
errors.append(
_("Field(s) `%s` failed against a constraint: %s") %
(', '.join(names), res_msg)
)
if errors:
raise ValidationError('\n'.join(errors))
# new-style constraint methods
for check in self._constraint_methods:
if set(check._constrains) & field_names:
try:
check(self)
except ValidationError, e:
raise
except Exception, e:
raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
def default_get(self, cr, uid, fields_list, context=None):
""" default_get(fields) -> default_values
Return default values for the fields in `fields_list`. Default
values are determined by the context, user defaults, and the model
itself.
:param fields_list: a list of field names
:return: a dictionary mapping each field name to its corresponding
default value; the keys of the dictionary are the fields in
`fields_list` that have a default value different from ``False``.
This method should not be overridden. In order to change the
mechanism for determining default values, you should override method
:meth:`add_default_value` instead.
"""
# trigger view init hook
self.view_init(cr, uid, fields_list, context)
# use a new record to determine default values; evaluate fields on the
# new record and put default values in result
record = self.new(cr, uid, {}, context=context)
result = {}
for name in fields_list:
if name in self._fields:
value = record[name]
if name in record._cache:
result[name] = value # it really is a default value
# convert default values to the expected format
result = self._convert_to_write(result)
return result
def add_default_value(self, field):
""" Set the default value of `field` to the new record `self`.
The value must be assigned to `self`.
"""
assert not self.id, "Expected new record: %s" % self
cr, uid, context = self.env.args
name = field.name
# 1. look up context
key = 'default_' + name
if key in context:
self[name] = context[key]
return
# 2. look up ir_values
# Note: performance is good, because get_defaults_dict is cached!
ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
if name in ir_values_dict:
self[name] = ir_values_dict[name]
return
# 3. look up property fields
# TODO: get rid of this one
column = self._columns.get(name)
if isinstance(column, fields.property):
self[name] = self.env['ir.property'].get(name, self._name)
return
# 4. look up _defaults
if name in self._defaults:
value = self._defaults[name]
if callable(value):
value = value(self._model, cr, uid, context)
self[name] = value
return
# 5. delegate to field
field.determine_default(self)
def fields_get_keys(self, cr, user, context=None):
res = self._columns.keys()
# TODO I believe this loop can be replace by
# res.extend(self._inherit_fields.key())
for parent in self._inherits:
res.extend(self.pool[parent].fields_get_keys(cr, user, context))
return res
def _rec_name_fallback(self, cr, uid, context=None):
rec_name = self._rec_name
if rec_name not in self._columns:
rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
return rec_name
#
# Overload this method if you need a window title which depends on the context
#
def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
return False
def user_has_groups(self, cr, uid, groups, context=None):
"""Return true if the user is at least member of one of the groups
in groups_str. Typically used to resolve `groups` attribute
in view and model definitions.
:param str groups: comma-separated list of fully-qualified group
external IDs, e.g.: ``base.group_user,base.group_system``
:return: True if the current user is a member of one of the
given groups
"""
return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
for group_ext_id in groups.split(','))
def _get_default_form_view(self, cr, user, context=None):
""" Generates a default single-line form view using all fields
of the current model except the m2m and o2m ones.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a form view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('form', string=self._description)
group = etree.SubElement(view, 'group', col="4")
for fname, field in self._fields.iteritems():
if field.automatic or field.type in ('one2many', 'many2many'):
continue
etree.SubElement(group, 'field', name=fname)
if field.type == 'text':
etree.SubElement(group, 'newline')
return view
def _get_default_search_view(self, cr, user, context=None):
""" Generates a single-field search view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('search', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_tree_view(self, cr, user, context=None):
""" Generates a single-field tree view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('tree', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_calendar_view(self, cr, user, context=None):
""" Generates a default calendar view by trying to infer
calendar fields from a number of pre-set attribute names
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a calendar view
:rtype: etree._Element
"""
def set_first_of(seq, in_, to):
"""Sets the first value of `seq` also found in `in_` to
the `to` attribute of the view being closed over.
Returns whether it's found a suitable value (and set it on
the attribute) or not
"""
for item in seq:
if item in in_:
view.set(to, item)
return True
return False
view = etree.Element('calendar', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
if self._date_name not in self._columns:
date_found = False
for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
if dt in self._columns:
self._date_name = dt
date_found = True
break
if not date_found:
raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
view.set('date_start', self._date_name)
set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
self._columns, 'color')
if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
self._columns, 'date_stop'):
if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
self._columns, 'date_delay'):
raise except_orm(
_('Invalid Object Architecture!'),
_("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
return view
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" fields_view_get([view_id | view_type='form'])
Get the detailed composition of the requested view like fields, model, view architecture
:param view_id: id of the view or None
:param view_type: type of the view to return if view_id is None ('form', tree', ...)
:param toolbar: true to include contextual actions
:param submenu: deprecated
:return: dictionary describing the composition of the requested view (including inherited views and extensions)
:raise AttributeError:
* if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
* if some tag other than 'position' is found in parent view
:raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
"""
if context is None:
context = {}
View = self.pool['ir.ui.view']
result = {
'model': self._name,
'field_parent': False,
}
# try to find a view_id if none provided
if not view_id:
# <view_type>_view_ref in context can be used to overrride the default view
view_ref_key = view_type + '_view_ref'
view_ref = context.get(view_ref_key)
if view_ref:
if '.' in view_ref:
module, view_ref = view_ref.split('.', 1)
cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
view_ref_res = cr.fetchone()
if view_ref_res:
view_id = view_ref_res[0]
else:
_logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
self._name)
if not view_id:
# otherwise try to find the lowest priority matching ir.ui.view
view_id = View.default_view(cr, uid, self._name, view_type, context=context)
# context for post-processing might be overriden
ctx = context
if view_id:
# read the view with inherited views applied
root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
result['arch'] = root_view['arch']
result['name'] = root_view['name']
result['type'] = root_view['type']
result['view_id'] = root_view['id']
result['field_parent'] = root_view['field_parent']
# override context fro postprocessing
if root_view.get('model') != self._name:
ctx = dict(context, base_model_name=root_view.get('model'))
else:
# fallback on default views methods if no ir.ui.view could be found
try:
get_func = getattr(self, '_get_default_%s_view' % view_type)
arch_etree = get_func(cr, uid, context)
result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
result['type'] = view_type
result['name'] = 'default'
except AttributeError:
raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
# Apply post processing, groups and modifiers etc...
xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
result['arch'] = xarch
result['fields'] = xfields
# Add related action information if aksed
if toolbar:
toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
def clean(x):
x = x[2]
for key in toclean:
x.pop(key, None)
return x
ir_values_obj = self.pool.get('ir.values')
resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
#When multi="True" set it will display only in More of the list view
resrelate = [clean(action) for action in resrelate
if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
for x in itertools.chain(resprint, resaction, resrelate):
x['string'] = x['name']
result['toolbar'] = {
'print': resprint,
'action': resaction,
'relate': resrelate
}
return result
def get_formview_id(self, cr, uid, id, context=None):
""" Return an view id to open the document with. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
return False
def get_formview_action(self, cr, uid, id, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
view_id = self.get_formview_id(cr, uid, id, context=context)
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'view_type': 'form',
'view_mode': 'form',
'views': [(view_id, 'form')],
'target': 'current',
'res_id': id,
}
def get_access_action(self, cr, uid, id, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific access to the document.
By default it opens the formview of the document.
:paramt int id: id of the document to open
"""
return self.get_formview_action(cr, uid, id, context=context)
def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
return self.pool['ir.ui.view'].postprocess_and_fields(
cr, uid, self._name, node, view_id, context=context)
def search_count(self, cr, user, args, context=None):
""" search_count(args) -> int
Returns the number of records in the current model matching :ref:`the
provided domain <reference/orm/domains>`.
"""
res = self.search(cr, user, args, context=context, count=True)
if isinstance(res, list):
return len(res)
return res
@api.returns('self')
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
""" search(args[, offset=0][, limit=None][, order=None][, count=False])
Searches for records based on the ``args``
:ref:`search domain <reference/orm/domains>`.
:param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
list to match all records.
:param int offset: number of results to ignore (default: none)
:param int limit: maximum number of records to return (default: all)
:param str order: sort string
:param bool count: if ``True``, the call should return the number of
records matching ``args`` rather than the records
themselves.
:returns: at most ``limit`` records matching the search criteria
:raise AccessError: * if user tries to bypass access rules for read on the requested object.
"""
return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
#
# display_name, name_get, name_create, name_search
#
@api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
def _compute_display_name(self):
names = dict(self.name_get())
for record in self:
record.display_name = names.get(record.id, False)
@api.multi
def name_get(self):
""" name_get() -> [(id, name), ...]
Returns a textual representation for the records in ``self``.
By default this is the value of the ``display_name`` field.
:return: list of pairs ``(id, text_repr)`` for each records
:rtype: list(tuple)
"""
result = []
name = self._rec_name
if name in self._fields:
convert = self._fields[name].convert_to_display_name
for record in self:
result.append((record.id, convert(record[name])))
else:
for record in self:
result.append((record.id, "%s,%s" % (record._name, record.id)))
return result
@api.model
def name_create(self, name):
""" name_create(name) -> record
Create a new record by calling :meth:`~.create` with only one value
provided: the display name of the new record.
The new record will be initialized with any default values
applicable to this model, or provided through the context. The usual
behavior of :meth:`~.create` applies.
:param name: display name of the record to create
:rtype: tuple
:return: the :meth:`~.name_get` pair value of the created record
"""
if self._rec_name:
record = self.create({self._rec_name: name})
return record.name_get()[0]
else:
_logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
return False
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
""" name_search(name='', args=None, operator='ilike', limit=100) -> records
Search for records that have a display name matching the given
`name` pattern when compared with the given `operator`, while also
matching the optional search domain (`args`).
This is used for example to provide suggestions based on a partial
value for a relational field. Sometimes be seen as the inverse
function of :meth:`~.name_get`, but it is not guaranteed to be.
This method is equivalent to calling :meth:`~.search` with a search
domain based on ``display_name`` and then :meth:`~.name_get` on the
result of the search.
:param str name: the name pattern to match
:param list args: optional search domain (see :meth:`~.search` for
syntax), specifying further restrictions
:param str operator: domain operator for matching `name`, such as
``'like'`` or ``'='``.
:param int limit: optional max number of records to return
:rtype: list
:return: list of pairs ``(id, text_repr)`` for all matching records.
"""
return self._name_search(name, args, operator, limit=limit)
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
# private implementation of name_search, allows passing a dedicated user
# for the name_get part to solve some access rights issues
args = list(args or [])
# optimize out the default criterion of ``ilike ''`` that matches everything
if not self._rec_name:
_logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
elif not (name == '' and operator == 'ilike'):
args += [(self._rec_name, operator, name)]
access_rights_uid = name_get_uid or user
ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
res = self.name_get(cr, access_rights_uid, ids, context)
return res
def read_string(self, cr, uid, id, langs, fields=None, context=None):
res = {}
res2 = {}
self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
if not fields:
fields = self._columns.keys() + self._inherit_fields.keys()
#FIXME: collect all calls to _get_source into one SQL call.
for lang in langs:
res[lang] = {'code': lang}
for f in fields:
if f in self._columns:
res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
if res_trans:
res[lang][f] = res_trans
else:
res[lang][f] = self._columns[f].string
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), fields)
res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
for lang in res2:
if lang in res:
res[lang]['code'] = lang
for f in res2[lang]:
res[lang][f] = res2[lang][f]
return res
def write_string(self, cr, uid, id, langs, vals, context=None):
self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
#FIXME: try to only call the translation in one SQL
for lang in langs:
for field in vals:
if field in self._columns:
src = self._columns[field].string
self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), vals)
if cols:
self.pool[table].write_string(cr, uid, id, langs, vals, context)
return True
def _add_missing_default_values(self, cr, uid, values, context=None):
# avoid overriding inherited values when parent is set
avoid_tables = []
for tables, parent_field in self._inherits.items():
if parent_field in values:
avoid_tables.append(tables)
# compute missing fields
missing_defaults = set()
for field in self._columns.keys():
if not field in values:
missing_defaults.add(field)
for field in self._inherit_fields.keys():
if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
missing_defaults.add(field)
# discard magic fields
missing_defaults -= set(MAGIC_COLUMNS)
if missing_defaults:
# override defaults with the provided values, never allow the other way around
defaults = self.default_get(cr, uid, list(missing_defaults), context)
for dv in defaults:
if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
defaults[dv] = [(6, 0, defaults[dv])]
if (dv in self._columns and self._columns[dv]._type == 'one2many' \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
defaults[dv] = [(0, 0, x) for x in defaults[dv]]
defaults.update(values)
values = defaults
return values
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi``.
"""
try:
self._ormcache.clear()
self.pool._any_cache_cleared = True
except AttributeError:
pass
def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
aggregated_fields, count_field,
read_group_result, read_group_order=None, context=None):
"""Helper method for filling in empty groups for all possible values of
the field being grouped by"""
# self._group_by_full should map groupable fields to a method that returns
# a list of all aggregated values that we want to display for this field,
# in the form of a m2o-like pair (key,label).
# This is useful to implement kanban views for instance, where all columns
# should be displayed even if they don't contain any record.
# Grab the list of all groups that should be displayed, including all present groups
present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
read_group_order=read_group_order,
access_rights_uid=openerp.SUPERUSER_ID,
context=context)
result_template = dict.fromkeys(aggregated_fields, False)
result_template[groupby + '_count'] = 0
if remaining_groupbys:
result_template['__context'] = {'group_by': remaining_groupbys}
# Merge the left_side (current results as dicts) with the right_side (all
# possible values as m2o pairs). Both lists are supposed to be using the
# same ordering, and can be merged in one pass.
result = []
known_values = {}
def append_left(left_side):
grouped_value = left_side[groupby] and left_side[groupby][0]
if not grouped_value in known_values:
result.append(left_side)
known_values[grouped_value] = left_side
else:
known_values[grouped_value].update({count_field: left_side[count_field]})
def append_right(right_side):
grouped_value = right_side[0]
if not grouped_value in known_values:
line = dict(result_template)
line[groupby] = right_side
line['__domain'] = [(groupby,'=',grouped_value)] + domain
result.append(line)
known_values[grouped_value] = line
while read_group_result or all_groups:
left_side = read_group_result[0] if read_group_result else None
right_side = all_groups[0] if all_groups else None
assert left_side is None or left_side[groupby] is False \
or isinstance(left_side[groupby], (tuple,list)), \
'M2O-like pair expected, got %r' % left_side[groupby]
assert right_side is None or isinstance(right_side, (tuple,list)), \
'M2O-like pair expected, got %r' % right_side
if left_side is None:
append_right(all_groups.pop(0))
elif right_side is None:
append_left(read_group_result.pop(0))
elif left_side[groupby] == right_side:
append_left(read_group_result.pop(0))
all_groups.pop(0) # discard right_side
elif not left_side[groupby] or not left_side[groupby][0]:
# left side == "Undefined" entry, not present on right_side
append_left(read_group_result.pop(0))
else:
append_right(all_groups.pop(0))
if folded:
for r in result:
r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
return result
def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
"""
Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
to the query if order should be computed against m2o field.
:param orderby: the orderby definition in the form "%(field)s %(order)s"
:param aggregated_fields: list of aggregated fields in the query
:param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
These dictionaries contains the qualified name of each groupby
(fully qualified SQL name for the corresponding field),
and the (non raw) field name.
:param osv.Query query: the query under construction
:return: (groupby_terms, orderby_terms)
"""
orderby_terms = []
groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
if not orderby:
return groupby_terms, orderby_terms
self._check_qorder(orderby)
for order_part in orderby.split(','):
order_split = order_part.split()
order_field = order_split[0]
if order_field in groupby_fields:
if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
if order_clause:
orderby_terms.append(order_clause)
groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
else:
order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
orderby_terms.append(order)
elif order_field in aggregated_fields:
orderby_terms.append(order_part)
else:
# Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
_logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
self._name, order_part)
return groupby_terms, orderby_terms
def _read_group_process_groupby(self, gb, query, context):
"""
Helper method to collect important information about groupbys: raw
field name, type, time informations, qualified name, ...
"""
split = gb.split(':')
field_type = self._all_columns[split[0]].column._type
gb_function = split[1] if len(split) == 2 else None
temporal = field_type in ('date', 'datetime')
tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
qualified_field = self._inherits_join_calc(split[0], query)
if temporal:
display_formats = {
'day': 'dd MMM YYYY',
'week': "'W'w YYYY",
'month': 'MMMM YYYY',
'quarter': 'QQQ YYYY',
'year': 'YYYY'
}
time_intervals = {
'day': dateutil.relativedelta.relativedelta(days=1),
'week': datetime.timedelta(days=7),
'month': dateutil.relativedelta.relativedelta(months=1),
'quarter': dateutil.relativedelta.relativedelta(months=3),
'year': dateutil.relativedelta.relativedelta(years=1)
}
if tz_convert:
qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
if field_type == 'boolean':
qualified_field = "coalesce(%s,false)" % qualified_field
return {
'field': split[0],
'groupby': gb,
'type': field_type,
'display_format': display_formats[gb_function or 'month'] if temporal else None,
'interval': time_intervals[gb_function or 'month'] if temporal else None,
'tz_convert': tz_convert,
'qualified_field': qualified_field
}
def _read_group_prepare_data(self, key, value, groupby_dict, context):
"""
Helper method to sanitize the data received by read_group. The None
values are converted to False, and the date/datetime are formatted,
and corrected according to the timezones.
"""
value = False if value is None else value
gb = groupby_dict.get(key)
if gb and gb['type'] in ('date', 'datetime') and value:
if isinstance(value, basestring):
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
value = datetime.datetime.strptime(value, dt_format)
if gb['tz_convert']:
value = pytz.timezone(context['tz']).localize(value)
return value
def _read_group_get_domain(self, groupby, value):
"""
Helper method to construct the domain corresponding to a groupby and
a given value. This is mostly relevant for date/datetime.
"""
if groupby['type'] in ('date', 'datetime') and value:
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
domain_dt_begin = value
domain_dt_end = value + groupby['interval']
if groupby['tz_convert']:
domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
domain_dt_end = domain_dt_end.astimezone(pytz.utc)
return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
(groupby['field'], '<', domain_dt_end.strftime(dt_format))]
if groupby['type'] == 'many2one' and value:
value = value[0]
return [(groupby['field'], '=', value)]
def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
"""
Helper method to format the data contained in the dictianary data by
adding the domain corresponding to its values, the groupbys in the
context and by properly formatting the date/datetime values.
"""
domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
for k,v in data.iteritems():
gb = groupby_dict.get(k)
if gb and gb['type'] in ('date', 'datetime') and v:
data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
data['__domain'] = domain_group + domain
if len(groupby) - len(annotated_groupbys) >= 1:
data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
del data['id']
return data
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
"""
Get the list of records in list view grouped by the given ``groupby`` fields
:param cr: database cursor
:param uid: current user id
:param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
:param list fields: list of fields present in the list view specified on the object
:param list groupby: list of groupby descriptions by which the records will be grouped.
A groupby description is either a field (then it will be grouped by that field)
or a string 'field:groupby_function'. Right now, the only functions supported
are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
date/datetime fields.
:param int offset: optional number of records to skip
:param int limit: optional max number of records to return
:param dict context: context arguments, like lang, time zone.
:param list orderby: optional ``order by`` specification, for
overriding the natural sort ordering of the
groups, see also :py:meth:`~osv.osv.osv.search`
(supported only for many2one fields currently)
:param bool lazy: if true, the results are only grouped by the first groupby and the
remaining groupbys are put in the __context key. If false, all the groupbys are
done in one call.
:return: list of dictionaries(one dictionary for each record) containing:
* the values of fields grouped by the fields in ``groupby`` argument
* __domain: list of tuples specifying the search criteria
* __context: dictionary with argument like ``groupby``
:rtype: [{'field_name_1': value, ...]
:raise AccessError: * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
if context is None:
context = {}
self.check_access_rights(cr, uid, 'read')
query = self._where_calc(cr, uid, domain, context=context)
fields = fields or self._columns.keys()
groupby = [groupby] if isinstance(groupby, basestring) else groupby
groupby_list = groupby[:1] if lazy else groupby
annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
for gb in groupby_list]
groupby_fields = [g['field'] for g in annotated_groupbys]
order = orderby or ','.join([g for g in groupby_list])
groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
self._apply_ir_rules(cr, uid, query, 'read', context=context)
for gb in groupby_fields:
assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
if not (gb in self._all_columns):
# Don't allow arbitrary values, as this would be a SQL injection vector!
raise except_orm(_('Invalid group_by'),
_('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
aggregated_fields = [
f for f in fields
if f not in ('id', 'sequence')
if f not in groupby_fields
if f in self._all_columns
if self._all_columns[f].column._type in ('integer', 'float')
if getattr(self._all_columns[f].column, '_classic_write')]
field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
for gb in annotated_groupbys:
select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
from_clause, where_clause, where_clause_params = query.get_sql()
if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
else:
count_field = '_'
count_field += '_count'
prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
query = """
SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
FROM %(from)s
%(where)s
%(groupby)s
%(orderby)s
%(limit)s
%(offset)s
""" % {
'table': self._table,
'count_field': count_field,
'extra_fields': prefix_terms(',', select_terms),
'from': from_clause,
'where': prefix_term('WHERE', where_clause),
'groupby': prefix_terms('GROUP BY', groupby_terms),
'orderby': prefix_terms('ORDER BY', orderby_terms),
'limit': prefix_term('LIMIT', int(limit) if limit else None),
'offset': prefix_term('OFFSET', int(offset) if limit else None),
}
cr.execute(query, where_clause_params)
fetched_data = cr.dictfetchall()
if not groupby_fields:
return fetched_data
many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
if many2onefields:
data_ids = [r['id'] for r in fetched_data]
many2onefields = list(set(many2onefields))
data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
for d in fetched_data:
d.update(data_dict[d['id']])
data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
if lazy and groupby_fields[0] in self._group_by_full:
# Right now, read_group only fill results in lazy mode (by default).
# If you need to have the empty groups in 'eager' mode, then the
# method _read_group_fill_results need to be completely reimplemented
# in a sane way
result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
aggregated_fields, count_field, result, read_group_order=order,
context=context)
return result
def _inherits_join_add(self, current_model, parent_model_name, query):
"""
Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
:param current_model: current model object
:param parent_model_name: name of the parent model for which the clauses should be added
:param query: query object on which the JOIN should be added
"""
inherits_field = current_model._inherits[parent_model_name]
parent_model = self.pool[parent_model_name]
parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
return parent_alias
def _inherits_join_calc(self, field, query):
"""
Adds missing table select and join clause(s) to ``query`` for reaching
the field coming from an '_inherits' parent table (no duplicates).
:param field: name of inherited field to reach
:param query: query object on which the JOIN should be added
:return: qualified name of field, to be used in SELECT clause
"""
current_table = self
parent_alias = '"%s"' % current_table._table
while field in current_table._inherit_fields and not field in current_table._columns:
parent_model_name = current_table._inherit_fields[field][0]
parent_table = self.pool[parent_model_name]
parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
current_table = parent_table
return '%s."%s"' % (parent_alias, field)
def _parent_store_compute(self, cr):
if not self._parent_store:
return
_logger.info('Computing parent left and right for table %s...', self._table)
def browse_rec(root, pos=0):
# TODO: set order
where = self._parent_name+'='+str(root)
if not root:
where = self._parent_name+' IS NULL'
if self._parent_order:
where += ' order by '+self._parent_order
cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
pos2 = pos + 1
for id in cr.fetchall():
pos2 = browse_rec(id[0], pos2)
cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
return pos2 + 1
query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
if self._parent_order:
query += ' order by ' + self._parent_order
pos = 0
cr.execute(query)
for (root,) in cr.fetchall():
pos = browse_rec(root, pos)
self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
return True
def _update_store(self, cr, f, k):
_logger.info("storing computed values of fields.function '%s'", k)
ss = self._columns[k]._symbol_set
update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
cr.execute('select id from '+self._table)
ids_lst = map(lambda x: x[0], cr.fetchall())
while ids_lst:
iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
for key, val in res.items():
if f._multi:
val = val[k]
# if val is a many2one, just write the ID
if type(val) == tuple:
val = val[0]
if val is not False:
cr.execute(update_query, (ss[1](val), key))
def _check_selection_field_value(self, cr, uid, field, value, context=None):
"""Raise except_orm if value is not among the valid values for the selection field"""
if self._columns[field]._type == 'reference':
val_model, val_id_str = value.split(',', 1)
val_id = False
try:
val_id = long(val_id_str)
except ValueError:
pass
if not val_id:
raise except_orm(_('ValidateError'),
_('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
val = val_model
else:
val = value
if isinstance(self._columns[field].selection, (tuple, list)):
if val in dict(self._columns[field].selection):
return
elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
return
raise except_orm(_('ValidateError'),
_('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._name, field))
def _check_removed_columns(self, cr, log=False):
# iterate on the database columns to drop the NOT NULL constraints
# of fields which were required but have been removed (or will be added by another module)
columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
columns += MAGIC_COLUMNS
cr.execute("SELECT a.attname, a.attnotnull"
" FROM pg_class c, pg_attribute a"
" WHERE c.relname=%s"
" AND c.oid=a.attrelid"
" AND a.attisdropped=%s"
" AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
" AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
for column in cr.dictfetchall():
if log:
_logger.debug("column %s is in the table %s but not in the corresponding object %s",
column['attname'], self._table, self._name)
if column['attnotnull']:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, column['attname'])
def _save_constraint(self, cr, constraint_name, type):
"""
Record the creation of a constraint for this model, to make it possible
to delete it later when the module is uninstalled. Type can be either
'f' or 'u' depending on the constraint being a foreign key or not.
"""
if not self._module:
# no need to save constraints for custom models as they're not part
# of any module
return
assert type in ('f', 'u')
cr.execute("""
SELECT 1 FROM ir_model_constraint, ir_module_module
WHERE ir_model_constraint.module=ir_module_module.id
AND ir_model_constraint.name=%s
AND ir_module_module.name=%s
""", (constraint_name, self._module))
if not cr.rowcount:
cr.execute("""
INSERT INTO ir_model_constraint
(name, date_init, date_update, module, model, type)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s), %s)""",
(constraint_name, self._module, self._name, type))
def _save_relation_table(self, cr, relation_table):
"""
Record the creation of a many2many for this model, to make it possible
to delete it later when the module is uninstalled.
"""
cr.execute("""
SELECT 1 FROM ir_model_relation, ir_module_module
WHERE ir_model_relation.module=ir_module_module.id
AND ir_model_relation.name=%s
AND ir_module_module.name=%s
""", (relation_table, self._module))
if not cr.rowcount:
cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s))""",
(relation_table, self._module, self._name))
self.invalidate_cache(cr, SUPERUSER_ID)
# checked version: for direct m2o starting from `self`
def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
assert self.is_transient() or not dest_model.is_transient(), \
'Many2One relationships from non-transient Model to TransientModel are forbidden'
if self.is_transient() and not dest_model.is_transient():
# TransientModel relationships to regular Models are annoying
# usually because they could block deletion due to the FKs.
# So unless stated otherwise we default them to ondelete=cascade.
ondelete = ondelete or 'cascade'
fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
# unchecked version: for custom cases, such as m2m relationships
def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
def _drop_constraint(self, cr, source_table, constraint_name):
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
# Find FK constraint(s) currently established for the m2o field,
# and see whether they are stale or not
cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
cl2.relname as foreign_table
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND cl1.relname = %s
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND att1.attname = %s
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND att2.attname = %s
AND con.contype = 'f'""", (source_table, source_field, 'id'))
constraints = cr.dictfetchall()
if constraints:
if len(constraints) == 1:
# Is it the right constraint?
cons, = constraints
if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
or cons['foreign_table'] != dest_model._table:
# Wrong FK: drop it and recreate
_schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
else:
# it's all good, nothing to do!
return
else:
# Multiple FKs found for the same field, drop them all, and re-create
for cons in constraints:
_schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
# (re-)create the FK
self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
def _set_default_value_on_column(self, cr, column_name, context=None):
# ideally should use add_default_value but fails
# due to ir.values not being ready
# get old-style default
default = self._defaults.get(column_name)
if callable(default):
default = default(self, cr, SUPERUSER_ID, context)
# get new_style default if no old-style
if default is None:
record = self.new(cr, SUPERUSER_ID, context=context)
field = self._fields[column_name]
field.determine_default(record)
defaults = dict(record._cache)
if column_name in defaults:
default = field.convert_to_write(defaults[column_name])
column = self._columns[column_name]
ss = column._symbol_set
db_default = ss[1](default)
# Write default if non-NULL, except for booleans for which False means
# the same as NULL - this saves us an expensive query on large tables.
write_default = (db_default is not None if column._type != 'boolean'
else db_default)
if write_default:
_logger.debug("Table '%s': setting default value of new column %s to %r",
self._table, column_name, default)
query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
self._table, column_name, ss[0], column_name)
cr.execute(query, (db_default,))
# this is a disgrace
cr.commit()
def _auto_init(self, cr, context=None):
"""
Call _field_create and, unless _auto is False:
- create the corresponding table in database for the model,
- possibly add the parent columns in database,
- possibly add the columns 'create_uid', 'create_date', 'write_uid',
'write_date' in database if _log_access is True (the default),
- report on database columns no more existing in _columns,
- remove no more existing not null constraints,
- alter existing database columns to match _columns,
- create database tables to match _columns,
- add database indices to match _columns,
- save in self._foreign_keys a list a foreign keys to create (see
_auto_end).
"""
self._foreign_keys = set()
raise_on_invalid_object_name(self._name)
if context is None:
context = {}
store_compute = False
stored_fields = [] # new-style stored fields with compute
todo_end = []
update_custom_fields = context.get('update_custom_fields', False)
self._field_create(cr, context=context)
create = not self._table_exist(cr)
if self._auto:
if create:
self._create_table(cr)
has_rows = False
else:
cr.execute('SELECT min(id) FROM "%s"' % (self._table,))
has_rows = cr.fetchone()[0] is not None
cr.commit()
if self._parent_store:
if not self._parent_columns_exist(cr):
self._create_parent_columns(cr)
store_compute = True
self._check_removed_columns(cr, log=False)
# iterate on the "object columns"
column_data = self._select_column_data(cr)
for k, f in self._columns.iteritems():
if k == 'id': # FIXME: maybe id should be a regular column?
continue
# Don't update custom (also called manual) fields
if f.manual and not update_custom_fields:
continue
if isinstance(f, fields.one2many):
self._o2m_raise_on_missing_reference(cr, f)
elif isinstance(f, fields.many2many):
self._m2m_raise_or_create_relation(cr, f)
else:
res = column_data.get(k)
# The field is not found as-is in database, try if it
# exists with an old name.
if not res and hasattr(f, 'oldname'):
res = column_data.get(f.oldname)
if res:
cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
res['attname'] = k
column_data[k] = res
_schema.debug("Table '%s': renamed column '%s' to '%s'",
self._table, f.oldname, k)
# The field already exists in database. Possibly
# change its type, rename it, drop it or change its
# constraints.
if res:
f_pg_type = res['typname']
f_pg_size = res['size']
f_pg_notnull = res['attnotnull']
if isinstance(f, fields.function) and not f.store and\
not getattr(f, 'nodrop', False):
_logger.info('column %s (%s) converted to a function, removed from table %s',
k, f.string, self._table)
cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': dropped column '%s' with cascade",
self._table, k)
f_obj_type = None
else:
f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
if f_obj_type:
ok = False
casts = [
('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
('varchar', 'text', 'TEXT', ''),
('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
('timestamp', 'date', 'date', '::date'),
('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
]
if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
try:
with cr.savepoint():
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
except psycopg2.NotSupportedError:
# In place alter table cannot be done because a view is depending of this field.
# Do a manual copy. This will drop the view (that will be recreated later)
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
for c in casts:
if (f_pg_type==c[0]) and (f._type==c[1]):
if f_pg_type != f_obj_type:
ok = True
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' changed type from %s to %s",
self._table, k, c[0], c[1])
break
if f_pg_type != f_obj_type:
if not ok:
i = 0
while True:
newname = k + '_moved' + str(i)
cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
"WHERE c.relname=%s " \
"AND a.attname=%s " \
"AND c.oid=a.attrelid ", (self._table, newname))
if not cr.fetchone()[0]:
break
i += 1
if f_pg_notnull:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
self._table, k, f_pg_type, f._type, newname)
# if the field is required and hasn't got a NOT NULL constraint
if f.required and f_pg_notnull == 0:
if has_rows:
self._set_default_value_on_column(cr, k, context=context)
# add the NOT NULL constraint
try:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
cr.commit()
_schema.debug("Table '%s': column '%s': added NOT NULL constraint",
self._table, k)
except Exception:
msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_schema.warning(msg, self._table, k, self._table, k)
cr.commit()
elif not f.required and f_pg_notnull == 1:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, k)
# Verify index
indexname = '%s_%s_index' % (self._table, k)
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
res2 = cr.dictfetchall()
if not res2 and f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
cr.commit()
if f._type == 'text':
# FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
"This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
" because there is a length limit for indexable btree values!\n"\
"Use a search view instead if you simply want to make the field searchable."
_schema.warning(msg, self._table, f._type, k)
if res2 and not f.select:
cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
cr.commit()
msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
_schema.debug(msg, self._table, k, f._type)
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
dest_model = self.pool[f._obj]
if dest_model._auto and dest_model._table != 'ir_actions':
self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
# The field doesn't exist in database. Create it if necessary.
else:
if not isinstance(f, fields.function) or f.store:
# add the missing field
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, k, get_pg_type(f)[1])
# initialize it
if has_rows:
self._set_default_value_on_column(cr, k, context=context)
# remember the functions to call for the stored fields
if isinstance(f, fields.function):
order = 10
if f.store is not True: # i.e. if f.store is a dict
order = f.store[f.store.keys()[0]][2]
todo_end.append((order, self._update_store, (f, k)))
# remember new-style stored fields with compute method
if k in self._fields and self._fields[k].depends:
stored_fields.append(self._fields[k])
# and add constraints if needed
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
if f._obj not in self.pool:
raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
# ir_actions is inherited so foreign key doesn't work on it
if dest_model._auto and ref != 'ir_actions':
self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
if f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
if f.required:
try:
cr.commit()
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
_schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
self._table, k)
except Exception:
msg = "WARNING: unable to set column %s of table %s not null !\n"\
"Try to re-run: openerp-server --update=module\n"\
"If it doesn't work, update records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_logger.warning(msg, k, self._table, self._table, k, exc_info=True)
cr.commit()
else:
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
create = not bool(cr.fetchone())
cr.commit() # start a new transaction
if self._auto:
self._add_sql_constraints(cr)
if create:
self._execute_sql(cr)
if store_compute:
self._parent_store_compute(cr)
cr.commit()
if stored_fields:
# trigger computation of new-style stored fields with a compute
def func(cr):
_logger.info("Storing computed values of %s fields %s",
self._name, ', '.join(sorted(f.name for f in stored_fields)))
recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
recs = recs.search([])
if recs:
map(recs._recompute_todo, stored_fields)
recs.recompute()
todo_end.append((1000, func, ()))
return todo_end
def _auto_end(self, cr, context=None):
""" Create the foreign keys recorded by _auto_init. """
for t, k, r, d in self._foreign_keys:
cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
cr.commit()
del self._foreign_keys
def _table_exist(self, cr):
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
return cr.rowcount
def _create_table(self, cr):
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
_schema.debug("Table '%s': created", self._table)
def _parent_columns_exist(self, cr):
cr.execute("""SELECT c.relname
FROM pg_class c, pg_attribute a
WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
""", (self._table, 'parent_left'))
return cr.rowcount
def _create_parent_columns(self, cr):
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
if 'parent_left' not in self._columns:
_logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_left', 'INTEGER')
elif not self._columns['parent_left'].select:
_logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if 'parent_right' not in self._columns:
_logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_right', 'INTEGER')
elif not self._columns['parent_right'].select:
_logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
_logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
self._parent_name, self._name)
cr.commit()
def _select_column_data(self, cr):
# attlen is the number of bytes necessary to represent the type when
# the type has a fixed size. If the type has a varying size attlen is
# -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
"FROM pg_class c,pg_attribute a,pg_type t " \
"WHERE c.relname=%s " \
"AND c.oid=a.attrelid " \
"AND a.atttypid=t.oid", (self._table,))
return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
def _o2m_raise_on_missing_reference(self, cr, f):
# TODO this check should be a method on fields.one2many.
if f._obj in self.pool:
other = self.pool[f._obj]
# TODO the condition could use fields_get_keys().
if f._fields_id not in other._columns.keys():
if f._fields_id not in other._inherit_fields.keys():
raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
def _m2m_raise_or_create_relation(self, cr, f):
m2m_tbl, col1, col2 = f._sql_names(self)
# do not create relations for custom fields as they do not belong to a module
# they will be automatically removed when dropping the corresponding ir.model.field
# table name for custom relation all starts with x_, see __init__
if not m2m_tbl.startswith('x_'):
self._save_relation_table(cr, m2m_tbl)
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
if not cr.dictfetchall():
if f._obj not in self.pool:
raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
# create foreign key references with ondelete=cascade, unless the targets are SQL views
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
cr.commit()
_schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
def _add_sql_constraints(self, cr):
"""
Modify this model's database table constraints so they match the one in
_sql_constraints.
"""
def unify_cons_text(txt):
return txt.lower().replace(', ',',').replace(' (','(')
for (key, con, _) in self._sql_constraints:
conname = '%s_%s' % (self._table, key)
self._save_constraint(cr, conname, 'u')
cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
existing_constraints = cr.dictfetchall()
sql_actions = {
'drop': {
'execute': False,
'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
self._table, conname, con),
'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
'order': 1,
},
'add': {
'execute': False,
'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
self._table, con),
'order': 2,
},
}
if not existing_constraints:
# constraint does not exists:
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
# constraint exists but its definition has changed:
sql_actions['drop']['execute'] = True
sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
# we need to add the constraint:
sql_actions = [item for item in sql_actions.values()]
sql_actions.sort(key=lambda x: x['order'])
for sql_action in [action for action in sql_actions if action['execute']]:
try:
cr.execute(sql_action['query'])
cr.commit()
_schema.debug(sql_action['msg_ok'])
except:
_schema.warning(sql_action['msg_err'])
cr.rollback()
def _execute_sql(self, cr):
""" Execute the SQL code from the _sql attribute (if any)."""
if hasattr(self, "_sql"):
for line in self._sql.split(';'):
line2 = line.replace('\n', '').strip()
if line2:
cr.execute(line2)
cr.commit()
#
# Update objects that uses this one to update their _inherits fields
#
@classmethod
def _inherits_reload_src(cls):
""" Recompute the _inherit_fields mapping on each _inherits'd child model."""
for model in cls.pool.values():
if cls._name in model._inherits:
model._inherits_reload()
@classmethod
def _inherits_reload(cls):
""" Recompute the _inherit_fields mapping.
This will also call itself on each inherits'd child model.
"""
res = {}
for table in cls._inherits:
other = cls.pool[table]
for col in other._columns.keys():
res[col] = (table, cls._inherits[table], other._columns[col], table)
for col in other._inherit_fields.keys():
res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
cls._inherit_fields = res
cls._all_columns = cls._get_column_infos()
# interface columns with new-style fields
for attr, column in cls._columns.items():
if attr not in cls._fields:
cls._add_field(attr, column.to_field())
# interface inherited fields with new-style fields (note that the
# reverse order is for being consistent with _all_columns above)
for parent_model, parent_field in reversed(cls._inherits.items()):
for attr, field in cls.pool[parent_model]._fields.iteritems():
if attr not in cls._fields:
cls._add_field(attr, field.copy(
related=(parent_field, attr),
related_sudo=False,
_origin=field,
))
cls._inherits_reload_src()
@classmethod
def _get_column_infos(cls):
"""Returns a dict mapping all fields names (direct fields and
inherited field via _inherits) to a ``column_info`` struct
giving detailed columns """
result = {}
# do not inverse for loops, since local fields may hide inherited ones!
for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
result[k] = fields.column_info(k, col, parent, m2o, original_parent)
for k, col in cls._columns.iteritems():
result[k] = fields.column_info(k, col)
return result
@classmethod
def _inherits_check(cls):
for table, field_name in cls._inherits.items():
if field_name not in cls._columns:
_logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
required=True, ondelete="cascade")
elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
_logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
cls._columns[field_name].required = True
cls._columns[field_name].ondelete = "cascade"
# reflect fields with delegate=True in dictionary cls._inherits
for field in cls._fields.itervalues():
if field.type == 'many2one' and not field.related and field.delegate:
if not field.required:
_logger.warning("Field %s with delegate=True must be required.", field)
field.required = True
if field.ondelete.lower() not in ('cascade', 'restrict'):
field.ondelete = 'cascade'
cls._inherits[field.comodel_name] = field.name
@api.model
def _prepare_setup_fields(self):
""" Prepare the setup of fields once the models have been loaded. """
for field in self._fields.itervalues():
field.reset()
@api.model
def _setup_fields(self, partial=False):
""" Setup the fields (dependency triggers, etc). """
for field in self._fields.itervalues():
if partial and field.manual and \
field.relational and field.comodel_name not in self.pool:
# do not set up manual fields that refer to unknown models
continue
field.setup(self.env)
# group fields by compute to determine field.computed_fields
fields_by_compute = defaultdict(list)
for field in self._fields.itervalues():
if field.compute:
field.computed_fields = fields_by_compute[field.compute]
field.computed_fields.append(field)
else:
field.computed_fields = []
def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
""" fields_get([fields])
Return the definition of each field.
The returned value is a dictionary (indiced by field name) of
dictionaries. The _inherits'd fields are included. The string, help,
and selection (if present) attributes are translated.
:param cr: database cursor
:param user: current user id
:param allfields: list of fields
:param context: context arguments, like lang, time zone
:return: dictionary of field dictionaries, each one describing a field of the business object
:raise AccessError: * if user has no create/write rights on the requested object
"""
recs = self.browse(cr, user, [], context)
res = {}
for fname, field in self._fields.iteritems():
if allfields and fname not in allfields:
continue
if field.groups and not recs.user_has_groups(field.groups):
continue
res[fname] = field.get_description(recs.env)
# if user cannot create or modify records, make all fields readonly
has_access = functools.partial(recs.check_access_rights, raise_exception=False)
if not (has_access('write') or has_access('create')):
for description in res.itervalues():
description['readonly'] = True
description['states'] = {}
return res
def get_empty_list_help(self, cr, user, help, context=None):
""" Generic method giving the help message displayed when having
no result to display in a list or kanban view. By default it returns
the help given in parameter that is generally the help message
defined in the action.
"""
return help
def check_field_access_rights(self, cr, user, operation, fields, context=None):
"""
Check the user access rights on the given fields. This raises Access
Denied if the user does not have the rights. Otherwise it returns the
fields (as is if the fields is not falsy, or the readable/writable
fields if fields is falsy).
"""
if user == SUPERUSER_ID:
return fields or list(self._fields)
def valid(fname):
""" determine whether user has access to field `fname` """
field = self._fields.get(fname)
if field and field.groups:
return self.user_has_groups(cr, user, groups=field.groups, context=context)
else:
return True
if not fields:
fields = filter(valid, self._fields)
else:
invalid_fields = set(filter(lambda name: not valid(name), fields))
if invalid_fields:
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
operation, user, self._name, ', '.join(invalid_fields))
raise AccessError(
_('The requested operation cannot be completed due to security restrictions. '
'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
return fields
# add explicit old-style implementation to read()
@api.v7
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
records = self.browse(cr, user, ids, context)
result = BaseModel.read(records, fields, load=load)
return result if isinstance(ids, list) else (bool(result) and result[0])
# new-style implementation of read()
@api.v8
def read(self, fields=None, load='_classic_read'):
""" read([fields])
Reads the requested fields for the records in `self`, low-level/RPC
method. In Python code, prefer :meth:`~.browse`.
:param fields: list of field names to return (default is all fields)
:return: a list of dictionaries mapping field names to their values,
with one dictionary per record
:raise AccessError: if user has no read rights on some of the given
records
"""
# check access rights
self.check_access_rights('read')
fields = self.check_field_access_rights('read', fields)
# split fields into stored and computed fields
stored, computed = [], []
for name in fields:
if name in self._columns:
stored.append(name)
elif name in self._fields:
computed.append(name)
else:
_logger.warning("%s.read() with unknown field '%s'", self._name, name)
# fetch stored fields from the database to the cache
self._read_from_database(stored)
# retrieve results from records; this takes values from the cache and
# computes remaining fields
result = []
name_fields = [(name, self._fields[name]) for name in (stored + computed)]
use_name_get = (load == '_classic_read')
for record in self:
try:
values = {'id': record.id}
for name, field in name_fields:
values[name] = field.convert_to_read(record[name], use_name_get)
result.append(values)
except MissingError:
pass
return result
@api.multi
def _prefetch_field(self, field):
""" Read from the database in order to fetch `field` (:class:`Field`
instance) for `self` in cache.
"""
# fetch the records of this model without field_name in their cache
records = self._in_cache_without(field)
if len(records) > PREFETCH_MAX:
records = records[:PREFETCH_MAX] | self
# by default, simply fetch field
fnames = {field.name}
if self.env.in_draft:
# we may be doing an onchange, do not prefetch other fields
pass
elif self.env.field_todo(field):
# field must be recomputed, do not prefetch records to recompute
records -= self.env.field_todo(field)
elif not self._context.get('prefetch_fields', True):
# do not prefetch other fields
pass
elif self._columns[field.name]._prefetch:
# here we can optimize: prefetch all classic and many2one fields
fnames = set(fname
for fname, fcolumn in self._columns.iteritems()
if fcolumn._prefetch
if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
)
# fetch records with read()
assert self in records and field.name in fnames
result = []
try:
result = records.read(list(fnames), load='_classic_write')
except AccessError:
pass
# check the cache, and update it if necessary
if not self._cache.contains(field):
for values in result:
record = self.browse(values.pop('id'))
record._cache.update(record._convert_to_cache(values, validate=False))
if not self._cache.contains(field):
e = AccessError("No value found for %s.%s" % (self, field.name))
self._cache[field] = FailedValue(e)
@api.multi
def _read_from_database(self, field_names):
""" Read the given fields of the records in `self` from the database,
and store them in cache. Access errors are also stored in cache.
"""
env = self.env
cr, user, context = env.args
# FIXME: The query construction needs to be rewritten using the internal Query
# object, as in search(), to avoid ambiguous column references when
# reading/sorting on a table that is auto_joined to another table with
# common columns (e.g. the magical columns)
# Construct a clause for the security rules.
# 'tables' holds the list of tables necessary for the SELECT, including
# the ir.rule clauses, and contains at least self._table.
rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
# determine the fields that are stored as columns in self._table
fields_pre = [f for f in field_names if self._columns[f]._classic_write]
# we need fully-qualified column names in case len(tables) > 1
def qualify(f):
if isinstance(self._columns.get(f), fields.binary) and \
context.get('bin_size_%s' % f, context.get('bin_size')):
# PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
else:
return '%s."%s"' % (self._table, f)
qual_names = map(qualify, set(fields_pre + ['id']))
query = """ SELECT %(qual_names)s FROM %(tables)s
WHERE %(table)s.id IN %%s AND (%(extra)s)
ORDER BY %(order)s
""" % {
'qual_names': ",".join(qual_names),
'tables': ",".join(tables),
'table': self._table,
'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
'order': self._parent_order or self._order,
}
result = []
for sub_ids in cr.split_for_in_conditions(self.ids):
cr.execute(query, [tuple(sub_ids)] + rule_params)
result.extend(cr.dictfetchall())
ids = [vals['id'] for vals in result]
if ids:
# translate the fields if necessary
if context.get('lang'):
ir_translation = env['ir.translation']
for f in fields_pre:
if self._columns[f].translate:
#TODO: optimize out of this loop
res_trans = ir_translation._get_ids(
'%s,%s' % (self._name, f), 'model', context['lang'], ids)
for vals in result:
vals[f] = res_trans.get(vals['id'], False) or vals[f]
# apply the symbol_get functions of the fields we just read
for f in fields_pre:
symbol_get = self._columns[f]._symbol_get
if symbol_get:
for vals in result:
vals[f] = symbol_get(vals[f])
# store result in cache for POST fields
for vals in result:
record = self.browse(vals['id'])
record._cache.update(record._convert_to_cache(vals, validate=False))
# determine the fields that must be processed now
fields_post = [f for f in field_names if not self._columns[f]._classic_write]
# Compute POST fields, grouped by multi
by_multi = defaultdict(list)
for f in fields_post:
by_multi[self._columns[f]._multi].append(f)
for multi, fs in by_multi.iteritems():
if multi:
res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
assert res2 is not None, \
'The function field "%s" on the "%s" model returned None\n' \
'(a dictionary was expected).' % (fs[0], self._name)
for vals in result:
# TOCHECK : why got string instend of dict in python2.6
# if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
multi_fields = res2.get(vals['id'], {})
if multi_fields:
for f in fs:
vals[f] = multi_fields.get(f, [])
else:
for f in fs:
res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
for vals in result:
if res2:
vals[f] = res2[vals['id']]
else:
vals[f] = []
# Warn about deprecated fields now that fields_pre and fields_post are computed
for f in field_names:
column = self._columns[f]
if column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
# store result in cache
for vals in result:
record = self.browse(vals.pop('id'))
record._cache.update(record._convert_to_cache(vals, validate=False))
# store failed values in cache for the records that could not be read
fetched = self.browse(ids)
missing = self - fetched
if missing:
extras = fetched - self
if extras:
raise AccessError(
_("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
', '.join(map(repr, missing._ids)),
', '.join(map(repr, extras._ids)),
))
# store an access error exception in existing records
exc = AccessError(
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._name, 'read')
)
forbidden = missing.exists()
forbidden._cache.update(FailedValue(exc))
# store a missing error exception in non-existing records
exc = MissingError(
_('One of the documents you are trying to access has been deleted, please try again after refreshing.')
)
(missing - forbidden)._cache.update(FailedValue(exc))
@api.multi
def get_metadata(self):
"""
Returns some metadata about the given records.
:return: list of ownership dictionaries for each requested record
:rtype: list of dictionaries with the following keys:
* id: object id
* create_uid: user who created the record
* create_date: date when the record was created
* write_uid: last user who changed the record
* write_date: date of the last change to the record
* xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
"""
fields = ['id']
if self._log_access:
fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
quoted_table = '"%s"' % self._table
fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
query = '''SELECT %s, __imd.module, __imd.name
FROM %s LEFT JOIN ir_model_data __imd
ON (__imd.model = %%s and __imd.res_id = %s.id)
WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
self._cr.execute(query, (self._name, tuple(self.ids)))
res = self._cr.dictfetchall()
uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
names = dict(self.env['res.users'].browse(uids).name_get())
for r in res:
for key in r:
value = r[key] = r[key] or False
if key in ('write_uid', 'create_uid') and value in names:
r[key] = (value, names[value])
r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
del r['name'], r['module']
return res
def _check_concurrency(self, cr, ids, context):
if not context:
return
if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
return
check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
for sub_ids in cr.split_for_in_conditions(ids):
ids_to_check = []
for id in sub_ids:
id_ref = "%s,%s" % (self._name, id)
update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
if update_date:
ids_to_check.extend([id, update_date])
if not ids_to_check:
continue
cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
res = cr.fetchone()
if res:
# mention the first one only to keep the error message readable
raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
"""Verify the returned rows after applying record rules matches
the length of `ids`, and raise an appropriate exception if it does not.
"""
if context is None:
context = {}
ids, result_ids = set(ids), set(result_ids)
missing_ids = ids - result_ids
if missing_ids:
# Attempt to distinguish record rule restriction vs deleted records,
# to provide a more specific error message - check if the missinf
cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
forbidden_ids = [x[0] for x in cr.fetchall()]
if forbidden_ids:
# the missing ids are (at least partially) hidden by access rules
if uid == SUPERUSER_ID:
return
_logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
raise except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
else:
# If we get here, the missing_ids are not in the database
if operation in ('read','unlink'):
# No need to warn about deleting an already deleted record.
# And no error when reading a record that was deleted, to prevent spurious
# errors for non-transactional search/read sequences coming from clients
return
_logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
raise except_orm(_('Missing document(s)'),
_('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
"""Verifies that the operation given by ``operation`` is allowed for the user
according to the access rights."""
return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
"""Verifies that the operation given by ``operation`` is allowed for the user
according to ir.rules.
:param operation: one of ``write``, ``unlink``
:raise except_orm: * if current ir.rules do not permit this operation.
:return: None if the operation is allowed
"""
if uid == SUPERUSER_ID:
return
if self.is_transient():
# Only one single implicit access rule for transient models: owner only!
# This is ok to hardcode because we assert that TransientModels always
# have log_access enabled so that the create_uid column is always there.
# And even with _inherits, these fields are always present in the local
# table too, so no need for JOINs.
cr.execute("""SELECT distinct create_uid
FROM %s
WHERE id IN %%s""" % self._table, (tuple(ids),))
uids = [x[0] for x in cr.fetchall()]
if len(uids) != 1 or uids[0] != uid:
raise except_orm(_('Access Denied'),
_('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
else:
where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
if where_clause:
where_clause = ' and ' + ' and '.join(where_clause)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
' WHERE ' + self._table + '.id IN %s' + where_clause,
[sub_ids] + where_params)
returned_ids = [x['id'] for x in cr.dictfetchall()]
self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
def create_workflow(self, cr, uid, ids, context=None):
"""Create a workflow instance for each given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_create(uid, self._name, res_id, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return True
def delete_workflow(self, cr, uid, ids, context=None):
"""Delete the workflow instances bound to the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_delete(uid, self._name, res_id, cr)
self.invalidate_cache(cr, uid, context=context)
return True
def step_workflow(self, cr, uid, ids, context=None):
"""Reevaluate the workflow instances of the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_write(uid, self._name, res_id, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return True
def signal_workflow(self, cr, uid, ids, signal, context=None):
"""Send given workflow signal and return a dict mapping ids to workflow results"""
from openerp import workflow
result = {}
for res_id in ids:
result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return result
def redirect_workflow(self, cr, uid, old_new_ids, context=None):
""" Rebind the workflow instance bound to the given 'old' record IDs to
the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
"""
from openerp import workflow
for old_id, new_id in old_new_ids:
workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
self.invalidate_cache(cr, uid, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
""" unlink()
Deletes the records of the current set
:raise AccessError: * if user has no unlink rights on the requested object
* if user tries to bypass access rules for unlink on the requested object
:raise UserError: if the record is default property for other records
"""
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
# for recomputing new-style fields
recs = self.browse(cr, uid, ids, context)
recs.modified(self._fields)
self._check_concurrency(cr, ids, context)
self.check_access_rights(cr, uid, 'unlink')
ir_property = self.pool.get('ir.property')
# Check if the records are used as default properties.
domain = [('res_id', '=', False),
('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
]
if ir_property.search(cr, uid, domain, context=context):
raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
# Delete the records' properties.
property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
ir_property.unlink(cr, uid, property_ids, context=context)
self.delete_workflow(cr, uid, ids, context=context)
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
pool_model_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
ir_attachment_obj = self.pool.get('ir.attachment')
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('delete from ' + self._table + ' ' \
'where id IN %s', (sub_ids,))
# Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
# as these are not connected with real database foreign keys, and would be dangling references.
# Note: following steps performed as admin to avoid access rights restrictions, and with no context
# to avoid possible side-effects during admin calls.
# Step 1. Calling unlink of ir_model_data only for the affected IDS
reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
# Step 2. Marching towards the real deletion of referenced records
if reference_ids:
pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
# For the same reason, removing the record relevant to ir_values
ir_value_ids = ir_values_obj.search(cr, uid,
['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
context=context)
if ir_value_ids:
ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
# For the same reason, removing the record relevant to ir_attachment
# The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
if ir_attachment_ids:
ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
# invalidate the *whole* cache, since the orm does not handle all
# changes made in the database, like cascading delete!
recs.invalidate_cache()
for order, obj_name, store_ids, fields in result_store:
if obj_name == self._name:
effective_store_ids = set(store_ids) - set(ids)
else:
effective_store_ids = store_ids
if effective_store_ids:
obj = self.pool[obj_name]
cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
rids = map(lambda x: x[0], cr.fetchall())
if rids:
obj._store_set_values(cr, uid, rids, fields, context)
# recompute new-style fields
recs.recompute()
return True
#
# TODO: Validate
#
@api.multi
def write(self, vals):
""" write(vals)
Updates all records in the current set with the provided values.
:param dict vals: fields to update and the value to set on them e.g::
{'foo': 1, 'bar': "Qux"}
will set the field ``foo`` to ``1`` and the field ``bar`` to
``"Qux"`` if those are valid (otherwise it will trigger an error).
:raise AccessError: * if user has no write rights on the requested object
* if user tries to bypass access rules for write on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
.. _openerp/models/relationals/format:
.. note:: Relational fields use a special "commands" format to manipulate their values
This format is a list of command triplets executed sequentially,
possible command triplets are:
``(0, _, values: dict)``
links to a new record created from the provided values
``(1, id, values: dict)``
updates the already-linked record of id ``id`` with the
provided ``values``
``(2, id, _)``
unlinks and deletes the linked record of id ``id``
``(3, id, _)``
unlinks the linked record of id ``id`` without deleting it
``(4, id, _)``
links to an existing record of id ``id``
``(5, _, _)``
unlinks all records in the relation, equivalent to using
the command ``3`` on every linked record
``(6, _, ids)``
replaces the existing list of linked records by the provoded
ones, equivalent to using ``5`` then ``4`` for each id in
``ids``)
(in command triplets, ``_`` values are ignored and can be
anything, generally ``0`` or ``False``)
Any command can be used on :class:`~openerp.fields.Many2many`,
only ``0``, ``1`` and ``2`` can be used on
:class:`~openerp.fields.One2many`.
"""
if not self:
return True
self._check_concurrency(self._ids)
self.check_access_rights('write')
# No user-driven update of these columns
for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
vals.pop(field, None)
# split up fields into old-style and pure new-style ones
old_vals, new_vals, unknown = {}, {}, []
for key, val in vals.iteritems():
if key in self._columns:
old_vals[key] = val
elif key in self._fields:
new_vals[key] = val
else:
unknown.append(key)
if unknown:
_logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
# write old-style fields with (low-level) method _write
if old_vals:
self._write(old_vals)
# put the values of pure new-style fields into cache, and inverse them
if new_vals:
for record in self:
record._cache.update(record._convert_to_cache(new_vals, update=True))
for key in new_vals:
self._fields[key].determine_inverse(self)
return True
def _write(self, cr, user, ids, vals, context=None):
# low-level implementation of write()
if not context:
context = {}
readonly = None
self.check_field_access_rights(cr, user, 'write', vals.keys())
deleted_related = defaultdict(list)
for field in vals.keys():
fobj = None
if field in self._columns:
fobj = self._columns[field]
elif field in self._inherit_fields:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
if fobj._type in ['one2many', 'many2many'] and vals[field]:
for wtuple in vals[field]:
if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
deleted_related[fobj._obj].append(wtuple[1])
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
if not edit:
vals.pop(field)
result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
# for recomputing new-style fields
recs = self.browse(cr, user, ids, context)
modified_fields = list(vals)
if self._log_access:
modified_fields += ['write_date', 'write_uid']
recs.modified(modified_fields)
parents_changed = []
parent_order = self._parent_order or self._order
if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
# The parent_left/right computation may take up to
# 5 seconds. No need to recompute the values if the
# parent is the same.
# Note: to respect parent_order, nodes must be processed in
# order, so ``parents_changed`` must be ordered properly.
parent_val = vals[self._parent_name]
if parent_val:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
(self._table, self._parent_name, self._parent_name, parent_order)
cr.execute(query, (tuple(ids), parent_val))
else:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
(self._table, self._parent_name, parent_order)
cr.execute(query, (tuple(ids),))
parents_changed = map(operator.itemgetter(0), cr.fetchall())
upd0 = []
upd1 = []
upd_todo = []
updend = []
direct = []
totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
for field in vals:
field_column = self._all_columns.get(field) and self._all_columns.get(field).column
if field_column and field_column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
if field in self._columns:
if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
if (not totranslate) or not self._columns[field].translate:
upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
upd1.append(self._columns[field]._symbol_set[1](vals[field]))
direct.append(field)
else:
upd_todo.append(field)
else:
updend.append(field)
if field in self._columns \
and hasattr(self._columns[field], 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
upd0.append('write_uid=%s')
upd0.append("write_date=(now() at time zone 'UTC')")
upd1.append(user)
if len(upd0):
self.check_access_rule(cr, user, ids, 'write', context=context)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
'where id IN %s', upd1 + [sub_ids])
if cr.rowcount != len(sub_ids):
raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
if totranslate:
# TODO: optimize
for f in direct:
if self._columns[f].translate:
src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
if not src_trans:
src_trans = vals[f]
# Inserting value to DB
context_wo_lang = dict(context, lang=None)
self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
# call the 'set' method of fields which are not classic_write
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
# default element in context must be removed when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
for field in upd_todo:
for id in ids:
result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
unknown_fields = updend[:]
for table in self._inherits:
col = self._inherits[table]
nids = []
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
'where id IN %s', (sub_ids,))
nids.extend([x[0] for x in cr.fetchall()])
v = {}
for val in updend:
if self._inherit_fields[val][0] == table:
v[val] = vals[val]
unknown_fields.remove(val)
if v:
self.pool[table].write(cr, user, nids, v, context)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
# check Python constraints
recs._validate_fields(vals)
# TODO: use _order to set dest at the right position and not first node of parent
# We can't defer parent_store computation because the stored function
# fields that are computer may refer (directly or indirectly) to
# parent_left/right (via a child_of domain)
if parents_changed:
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
order = self._parent_order or self._order
parent_val = vals[self._parent_name]
if parent_val:
clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
else:
clause, params = '%s IS NULL' % (self._parent_name,), ()
for id in parents_changed:
cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
pleft, pright = cr.fetchone()
distance = pright - pleft + 1
# Positions of current siblings, to locate proper insertion point;
# this can _not_ be fetched outside the loop, as it needs to be refreshed
# after each update, in case several nodes are sequentially inserted one
# next to the other (i.e computed incrementally)
cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
parents = cr.fetchall()
# Find Position of the element
position = None
for (parent_pright, parent_id) in parents:
if parent_id == id:
break
position = parent_pright and parent_pright + 1 or 1
# It's the first node of the parent
if not position:
if not parent_val:
position = 1
else:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
position = cr.fetchone()[0] + 1
if pleft < position <= pright:
raise except_orm(_('UserError'), _('Recursivity Detected.'))
if pleft < position:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
else:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
recs.invalidate_cache(['parent_left', 'parent_right'])
result += self._store_get_values(cr, user, ids, vals.keys(), context)
result.sort()
# for recomputing new-style fields
recs.modified(modified_fields)
done = {}
for order, model_name, ids_to_update, fields_to_recompute in result:
key = (model_name, tuple(fields_to_recompute))
done.setdefault(key, {})
# avoid to do several times the same computation
todo = []
for id in ids_to_update:
if id not in done[key]:
done[key][id] = True
if id not in deleted_related[model_name]:
todo.append(id)
self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
# recompute new-style fields
if context.get('recompute', True):
recs.recompute()
self.step_workflow(cr, user, ids, context=context)
return True
#
# TODO: Should set perm to user.xxx
#
@api.model
@api.returns('self', lambda value: value.id)
def create(self, vals):
""" create(vals) -> record
Creates a new record for the model.
The new record is initialized using the values from ``vals`` and
if necessary those from :meth:`~.default_get`.
:param dict vals:
values for the model's fields, as a dictionary::
{'field_name': field_value, ...}
see :meth:`~.write` for details
:return: new record created
:raise AccessError: * if user has no create rights on the requested object
* if user tries to bypass access rules for create on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
"""
self.check_access_rights('create')
# add missing defaults, and drop fields that may not be set by user
vals = self._add_missing_default_values(vals)
for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
vals.pop(field, None)
# split up fields into old-style and pure new-style ones
old_vals, new_vals, unknown = {}, {}, []
for key, val in vals.iteritems():
if key in self._all_columns:
old_vals[key] = val
elif key in self._fields:
new_vals[key] = val
else:
unknown.append(key)
if unknown:
_logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
# create record with old-style fields
record = self.browse(self._create(old_vals))
# put the values of pure new-style fields into cache, and inverse them
record._cache.update(record._convert_to_cache(new_vals))
for key in new_vals:
self._fields[key].determine_inverse(record)
return record
def _create(self, cr, user, vals, context=None):
# low-level implementation of create()
if not context:
context = {}
if self.is_transient():
self._transient_vacuum(cr, user)
tocreate = {}
for v in self._inherits:
if self._inherits[v] not in vals:
tocreate[v] = {}
else:
tocreate[v] = {'id': vals[self._inherits[v]]}
updates = [
# list of column assignments defined as tuples like:
# (column_name, format_string, column_value)
# (column_name, sql_formula)
# Those tuples will be used by the string formatting for the INSERT
# statement below.
('id', "nextval('%s')" % self._sequence),
]
upd_todo = []
unknown_fields = []
for v in vals.keys():
if v in self._inherit_fields and v not in self._columns:
(table, col, col_detail, original_parent) = self._inherit_fields[v]
tocreate[table][v] = vals[v]
del vals[v]
else:
if (v not in self._inherit_fields) and (v not in self._columns):
del vals[v]
unknown_fields.append(v)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
for table in tocreate:
if self._inherits[table] in vals:
del vals[self._inherits[table]]
record_id = tocreate[table].pop('id', None)
if record_id is None or not record_id:
record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
else:
self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
updates.append((self._inherits[table], '%s', record_id))
#Start : Set bool fields to be False if they are not touched(to make search more powerful)
bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
for bool_field in bool_fields:
if bool_field not in vals:
vals[bool_field] = False
#End
for field in vals.keys():
fobj = None
if field in self._columns:
fobj = self._columns[field]
else:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
elif readonly[0][0] == 0:
edit = False
else:
edit = False
if not edit:
vals.pop(field)
for field in vals:
current_field = self._columns[field]
if current_field._classic_write:
updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
#for the function fields that receive a value, we set them directly in the database
#(they may be required), but we also need to trigger the _fct_inv()
if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
#TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
#one week of the release candidate. It seems the only good way to handle correctly this is to add an
#attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
#if, for example, the related has a default value (for usability) then the fct_inv is called and it
#may raise some access rights error. Changing this is a too big change for now, and is thus postponed
#after the release but, definitively, the behavior shouldn't be different for related and function
#fields.
upd_todo.append(field)
else:
#TODO: this `if´ statement should be removed because there is no good reason to special case the fields
#related. See the above TODO comment for further explanations.
if not isinstance(current_field, fields.related):
upd_todo.append(field)
if field in self._columns \
and hasattr(current_field, 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
updates.append(('create_uid', '%s', user))
updates.append(('write_uid', '%s', user))
updates.append(('create_date', "(now() at time zone 'UTC')"))
updates.append(('write_date', "(now() at time zone 'UTC')"))
# the list of tuples used in this formatting corresponds to
# tuple(field_name, format, value)
# In some case, for example (id, create_date, write_date) we does not
# need to read the third value of the tuple, because the real value is
# encoded in the second value (the format).
cr.execute(
"""INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
self._table,
', '.join('"%s"' % u[0] for u in updates),
', '.join(u[1] for u in updates)
),
tuple([u[2] for u in updates if len(u) > 2])
)
id_new, = cr.fetchone()
recs = self.browse(cr, user, id_new, context)
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
if self._parent_store and not context.get('defer_parent_store_computation'):
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
parent = vals.get(self._parent_name, False)
if parent:
cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
pleft_old = None
result_p = cr.fetchall()
for (pleft,) in result_p:
if not pleft:
break
pleft_old = pleft
if not pleft_old:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
pleft_old = cr.fetchone()[0]
pleft = pleft_old
else:
cr.execute('select max(parent_right) from '+self._table)
pleft = cr.fetchone()[0] or 0
cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
recs.invalidate_cache(['parent_left', 'parent_right'])
# default element in context must be remove when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
result = []
for field in upd_todo:
result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
# check Python constraints
recs._validate_fields(vals)
# invalidate and mark new-style fields to recompute
modified_fields = list(vals)
if self._log_access:
modified_fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
recs.modified(modified_fields)
if context.get('recompute', True):
result += self._store_get_values(cr, user, [id_new],
list(set(vals.keys() + self._inherits.values())),
context)
result.sort()
done = []
for order, model_name, ids, fields2 in result:
if not (model_name, ids, fields2) in done:
self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
done.append((model_name, ids, fields2))
# recompute new-style fields
recs.recompute()
if self._log_create and context.get('recompute', True):
message = self._description + \
" '" + \
self.name_get(cr, user, [id_new], context=context)[0][1] + \
"' " + _("created.")
self.log(cr, user, id_new, message, True, context=context)
self.check_access_rule(cr, user, [id_new], 'create', context=context)
self.create_workflow(cr, user, [id_new], context=context)
return id_new
def _store_get_values(self, cr, uid, ids, fields, context):
"""Returns an ordered list of fields.function to call due to
an update operation on ``fields`` of records with ``ids``,
obtained by calling the 'store' triggers of these fields,
as setup by their 'store' attribute.
:return: [(priority, model_name, [record_ids,], [function_fields,])]
"""
if fields is None: fields = []
stored_functions = self.pool._store_function.get(self._name, [])
# use indexed names for the details of the stored_functions:
model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
# only keep store triggers that should be triggered for the ``fields``
# being written to.
triggers_to_compute = (
f for f in stored_functions
if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
)
to_compute_map = {}
target_id_results = {}
for store_trigger in triggers_to_compute:
target_func_id_ = id(store_trigger[target_ids_func_])
if target_func_id_ not in target_id_results:
# use admin user for accessing objects having rules defined on store fields
target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
target_ids = target_id_results[target_func_id_]
# the compound key must consider the priority and model name
key = (store_trigger[priority_], store_trigger[model_name_])
for target_id in target_ids:
to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
# Here to_compute_map looks like:
# { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
# (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
# (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
# }
# Now we need to generate the batch function calls list
# call_map =
# { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
call_map = {}
for ((priority,model), id_map) in to_compute_map.iteritems():
trigger_ids_maps = {}
# function_ids_maps =
# { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
for target_id, triggers in id_map.iteritems():
trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
for triggers, target_ids in trigger_ids_maps.iteritems():
call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
[t[func_field_to_compute_] for t in triggers]))
result = []
if call_map:
result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
return result
def _store_set_values(self, cr, uid, ids, fields, context):
"""Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
respecting ``multi`` attributes), and stores the resulting values in the database directly."""
if not ids:
return True
field_flag = False
field_dict = {}
if self._log_access:
cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[1]:
field_dict.setdefault(r[0], [])
res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
for i in self.pool._store_function.get(self._name, []):
if i[5]:
up_write_date = write_date + datetime.timedelta(hours=i[5])
if datetime.datetime.now() < up_write_date:
if i[1] in fields:
field_dict[r[0]].append(i[1])
if not field_flag:
field_flag = True
todo = {}
keys = []
for f in fields:
if self._columns[f]._multi not in keys:
keys.append(self._columns[f]._multi)
todo.setdefault(self._columns[f]._multi, [])
todo[self._columns[f]._multi].append(f)
for key in keys:
val = todo[key]
if key:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
for id, value in result.items():
if field_flag:
for f in value.keys():
if f in field_dict[id]:
value.pop(f)
upd0 = []
upd1 = []
for v in value:
if v not in val:
continue
if self._columns[v]._type == 'many2one':
try:
value[v] = value[v][0]
except:
pass
upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
upd1.append(self._columns[v]._symbol_set[1](value[v]))
upd1.append(id)
if upd0 and upd1:
cr.execute('update "' + self._table + '" set ' + \
','.join(upd0) + ' where id = %s', upd1)
else:
for f in val:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
for r in result.keys():
if field_flag:
if r in field_dict.keys():
if f in field_dict[r]:
result.pop(r)
for id, value in result.items():
if self._columns[f]._type == 'many2one':
try:
value = value[0]
except:
pass
cr.execute('update "' + self._table + '" set ' + \
'"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
# invalidate and mark new-style fields to recompute
self.browse(cr, uid, ids, context).modified(fields)
return True
# TODO: ameliorer avec NULL
def _where_calc(self, cr, user, domain, active_test=True, context=None):
"""Computes the WHERE clause needed to implement an OpenERP domain.
:param domain: the domain to compute
:type domain: list
:param active_test: whether the default filtering of records with ``active``
field set to ``False`` should be applied.
:return: the query expressing the given domain as provided in domain
:rtype: osv.query.Query
"""
if not context:
context = {}
domain = domain[:]
# if the object has a field named 'active', filter out all inactive
# records unless they were explicitely asked for
if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
if domain:
# the item[0] trick below works for domain items and '&'/'|'/'!'
# operators too
if not any(item[0] == 'active' for item in domain):
domain.insert(0, ('active', '=', 1))
else:
domain = [('active', '=', 1)]
if domain:
e = expression.expression(cr, user, domain, self, context)
tables = e.get_tables()
where_clause, where_params = e.to_sql()
where_clause = where_clause and [where_clause] or []
else:
where_clause, where_params, tables = [], [], ['"%s"' % self._table]
return Query(tables, where_clause, where_params)
def _check_qorder(self, word):
if not regex_order.match(word):
raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
return True
def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
"""Add what's missing in ``query`` to implement all appropriate ir.rules
(using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
:param query: the current query object
"""
if uid == SUPERUSER_ID:
return
def apply_rule(added_clause, added_params, added_tables, parent_model=None):
""" :param parent_model: name of the parent model, if the added
clause comes from a parent model
"""
if added_clause:
if parent_model:
# as inherited rules are being applied, we need to add the missing JOIN
# to reach the parent table (if it was not JOINed yet in the query)
parent_alias = self._inherits_join_add(self, parent_model, query)
# inherited rules are applied on the external table -> need to get the alias and replace
parent_table = self.pool[parent_model]._table
added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
# change references to parent_table to parent_alias, because we now use the alias to refer to the table
new_tables = []
for table in added_tables:
# table is just a table name -> switch to the full alias
if table == '"%s"' % parent_table:
new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
# table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
else:
new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
added_tables = new_tables
query.where_clause += added_clause
query.where_clause_params += added_params
for table in added_tables:
if table not in query.tables:
query.tables.append(table)
return True
return False
# apply main rules on the object
rule_obj = self.pool.get('ir.rule')
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
# apply ir.rules from the parents (through _inherits)
for inherited_model in self._inherits:
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
parent_model=inherited_model)
def _generate_m2o_order_by(self, order_field, query):
"""
Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
either native m2o fields or function/related fields that are stored, including
intermediate JOINs for inheritance if required.
:return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
"""
if order_field not in self._columns and order_field in self._inherit_fields:
# also add missing joins for reaching the table containing the m2o field
qualified_field = self._inherits_join_calc(order_field, query)
order_field_column = self._inherit_fields[order_field][2]
else:
qualified_field = '"%s"."%s"' % (self._table, order_field)
order_field_column = self._columns[order_field]
assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
_logger.debug("Many2one function/related fields must be stored " \
"to be used as ordering fields! Ignoring sorting for %s.%s",
self._name, order_field)
return
# figure out the applicable order_by for the m2o
dest_model = self.pool[order_field_column._obj]
m2o_order = dest_model._order
if not regex_order.match(m2o_order):
# _order is complex, can't use it here, so we default to _rec_name
m2o_order = dest_model._rec_name
else:
# extract the field names, to be able to qualify them and add desc/asc
m2o_order_list = []
for order_part in m2o_order.split(","):
m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
m2o_order = m2o_order_list
# Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
# as we don't want to exclude results that have NULL values for the m2o
src_table, src_field = qualified_field.replace('"', '').split('.', 1)
dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
def _generate_order_by(self, order_spec, query):
"""
Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
:raise" except_orm in case order_spec is malformed
"""
order_by_clause = ''
order_spec = order_spec or self._order
if order_spec:
order_by_elements = []
self._check_qorder(order_spec)
for order_part in order_spec.split(','):
order_split = order_part.strip().split(' ')
order_field = order_split[0].strip()
order_direction = order_split[1].strip() if len(order_split) == 2 else ''
order_column = None
inner_clause = None
if order_field == 'id':
order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
elif order_field in self._columns:
order_column = self._columns[order_field]
if order_column._classic_read:
inner_clause = '"%s"."%s"' % (self._table, order_field)
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
continue # ignore non-readable or "non-joinable" fields
elif order_field in self._inherit_fields:
parent_obj = self.pool[self._inherit_fields[order_field][3]]
order_column = parent_obj._columns[order_field]
if order_column._classic_read:
inner_clause = self._inherits_join_calc(order_field, query)
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
continue # ignore non-readable or "non-joinable" fields
else:
raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
if order_column and order_column._type == 'boolean':
inner_clause = "COALESCE(%s, false)" % inner_clause
if inner_clause:
if isinstance(inner_clause, list):
for clause in inner_clause:
order_by_elements.append("%s %s" % (clause, order_direction))
else:
order_by_elements.append("%s %s" % (inner_clause, order_direction))
if order_by_elements:
order_by_clause = ",".join(order_by_elements)
return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
"""
Private implementation of search() method, allowing specifying the uid to use for the access right check.
This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
This is ok at the security level because this method is private and not callable through XML-RPC.
:param access_rights_uid: optional user ID to use when checking access rights
(not for ir.rules, this is only for ir.model.access)
"""
if context is None:
context = {}
self.check_access_rights(cr, access_rights_uid or user, 'read')
# For transient models, restrict acces to the current user, except for the super-user
if self.is_transient() and self._log_access and user != SUPERUSER_ID:
args = expression.AND(([('create_uid', '=', user)], args or []))
query = self._where_calc(cr, user, args, context=context)
self._apply_ir_rules(cr, user, query, 'read', context=context)
order_by = self._generate_order_by(order, query)
from_clause, where_clause, where_clause_params = query.get_sql()
where_str = where_clause and (" WHERE %s" % where_clause) or ''
if count:
# Ignore order, limit and offset when just counting, they don't make sense and could
# hurt performance
query_str = 'SELECT count(1) FROM ' + from_clause + where_str
cr.execute(query_str, where_clause_params)
res = cr.fetchone()
return res[0]
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
cr.execute(query_str, where_clause_params)
res = cr.fetchall()
# TDE note: with auto_join, we could have several lines about the same result
# i.e. a lead with several unread messages; we uniquify the result using
# a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
def _uniquify_list(seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
return _uniquify_list([x[0] for x in res])
# returns the different values ever entered for one field
# this is used, for example, in the client when the user hits enter on
# a char field
def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
if not args:
args = []
if field in self._inherit_fields:
return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
else:
return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
def copy_data(self, cr, uid, id, default=None, context=None):
"""
Copy given record's data with all its fields values
:param cr: database cursor
:param uid: current user id
:param id: id of the record to copy
:param default: field values to override in the original values of the copied record
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
:return: dictionary containing all the field values
"""
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_data_seen', {})
if id in seen_map.setdefault(self._name, []):
return
seen_map[self._name].append(id)
if default is None:
default = {}
if 'state' not in default:
if 'state' in self._defaults:
if callable(self._defaults['state']):
default['state'] = self._defaults['state'](self, cr, uid, context)
else:
default['state'] = self._defaults['state']
# build a black list of fields that should not be copied
blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
def blacklist_given_fields(obj):
# blacklist the fields that are given by inheritance
for other, field_to_other in obj._inherits.items():
blacklist.add(field_to_other)
if field_to_other in default:
# all the fields of 'other' are given by the record: default[field_to_other],
# except the ones redefined in self
blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
else:
blacklist_given_fields(self.pool[other])
# blacklist deprecated fields
for name, field in obj._columns.items():
if field.deprecated:
blacklist.add(name)
blacklist_given_fields(self)
fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
if fi.column.copy
if f not in default
if f not in blacklist)
data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
if data:
data = data[0]
else:
raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
res = dict(default)
for f, colinfo in fields_to_copy.iteritems():
field = colinfo.column
if field._type == 'many2one':
res[f] = data[f] and data[f][0]
elif field._type == 'one2many':
other = self.pool[field._obj]
# duplicate following the order of the ids because we'll rely on
# it later for copying translations in copy_translation()!
lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
# the lines are duplicated using the wrong (old) parent, but then
# are reassigned to the correct one thanks to the (0, 0, ...)
res[f] = [(0, 0, line) for line in lines if line]
elif field._type == 'many2many':
res[f] = [(6, 0, data[f])]
else:
res[f] = data[f]
return res
def copy_translations(self, cr, uid, old_id, new_id, context=None):
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_translations_seen',{})
if old_id in seen_map.setdefault(self._name,[]):
return
seen_map[self._name].append(old_id)
trans_obj = self.pool.get('ir.translation')
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
fields = self.fields_get(cr, uid, context=context)
for field_name, field_def in fields.items():
# removing the lang to compare untranslated values
context_wo_lang = dict(context, lang=None)
old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
# we must recursively copy the translations for o2o and o2m
if field_def['type'] == 'one2many':
target_obj = self.pool[field_def['relation']]
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
old_children = sorted(r.id for r in old_record[field_name])
new_children = sorted(r.id for r in new_record[field_name])
for (old_child, new_child) in zip(old_children, new_children):
target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
# and for translatable fields we keep them for copy
elif field_def.get('translate'):
if field_name in self._columns:
trans_name = self._name + "," + field_name
target_id = new_id
source_id = old_id
elif field_name in self._inherit_fields:
trans_name = self._inherit_fields[field_name][0] + "," + field_name
# get the id of the parent record to set the translation
inherit_field_name = self._inherit_fields[field_name][1]
target_id = new_record[inherit_field_name].id
source_id = old_record[inherit_field_name].id
else:
continue
trans_ids = trans_obj.search(cr, uid, [
('name', '=', trans_name),
('res_id', '=', source_id)
])
user_lang = context.get('lang')
for record in trans_obj.read(cr, uid, trans_ids, context=context):
del record['id']
# remove source to avoid triggering _set_src
del record['source']
record.update({'res_id': target_id})
if user_lang and user_lang == record['lang']:
# 'source' to force the call to _set_src
# 'value' needed if value is changed in copy(), want to see the new_value
record['source'] = old_record[field_name]
record['value'] = new_record[field_name]
trans_obj.create(cr, uid, record, context=context)
@api.returns('self', lambda value: value.id)
def copy(self, cr, uid, id, default=None, context=None):
""" copy(default=None)
Duplicate record with given id updating it with default values
:param dict default: dictionary of field values to override in the
original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
:returns: new record
"""
if context is None:
context = {}
context = context.copy()
data = self.copy_data(cr, uid, id, default, context)
new_id = self.create(cr, uid, data, context)
self.copy_translations(cr, uid, id, new_id, context)
return new_id
@api.multi
@api.returns('self')
def exists(self):
""" exists() -> records
Returns the subset of records in `self` that exist, and marks deleted
records as such in cache. It can be used as a test on records::
if record.exists():
...
By convention, new records are returned as existing.
"""
ids = filter(None, self._ids) # ids to check in database
if not ids:
return self
query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
self._cr.execute(query, (ids,))
ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
[id for id in self._ids if not id]) # new ids
existing = self.browse(ids)
if len(existing) < len(self):
# mark missing records in cache with a failed value
exc = MissingError(_("Record does not exist or has been deleted."))
(self - existing)._cache.update(FailedValue(exc))
return existing
def check_recursion(self, cr, uid, ids, context=None, parent=None):
_logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
self._name)
assert parent is None or parent in self._columns or parent in self._inherit_fields,\
"The 'parent' parameter passed to check_recursion() must be None or a valid field name"
return self._check_recursion(cr, uid, ids, context, parent)
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param parent: optional parent field name (default: ``self._parent_name = parent_id``)
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
if not parent:
parent = self._parent_name
# must ignore 'active' flag, ir.rules, etc. => direct SQL query
query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
for id in ids:
current_id = id
while current_id is not None:
cr.execute(query, (current_id,))
result = cr.fetchone()
current_id = result[0] if result else None
if current_id == id:
return False
return True
def _check_m2m_recursion(self, cr, uid, ids, field_name):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param field_name: field to check
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
field = self._all_columns.get(field_name)
field = field.column if field else None
if not field or field._type != 'many2many' or field._obj != self._name:
# field must be a many2many on itself
raise ValueError('invalid field_name: %r' % (field_name,))
query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
ids_parent = ids[:]
while ids_parent:
ids_parent2 = []
for i in range(0, len(ids_parent), cr.IN_MAX):
j = i + cr.IN_MAX
sub_ids_parent = ids_parent[i:j]
cr.execute(query, (tuple(sub_ids_parent),))
ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
ids_parent = ids_parent2
for i in ids_parent:
if i in ids:
return False
return True
def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID(s) of any database record.
**Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
:return: map of ids to the list of their fully qualified External IDs
in the form ``module.key``, or an empty list when there's no External
ID for a record, e.g.::
{ 'id': ['module.ext_id', 'module.ext_id_bis'],
'id2': [] }
"""
ir_model_data = self.pool.get('ir.model.data')
data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
result = {}
for id in ids:
# can't use dict.fromkeys() as the list would be shared!
result[id] = []
for record in data_results:
result[record['res_id']].append('%(module)s.%(name)s' % record)
return result
def get_external_id(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID of any database record, if there
is one. This method works as a possible implementation
for a function field, to be able to add it to any
model object easily, referencing it as ``Model.get_external_id``.
When multiple External IDs exist for a record, only one
of them is returned (randomly).
:return: map of ids to their fully qualified XML ID,
defaulting to an empty string when there's none
(to be usable as a function field),
e.g.::
{ 'id': 'module.ext_id',
'id2': '' }
"""
results = self._get_xml_ids(cr, uid, ids)
for k, v in results.iteritems():
if results[k]:
results[k] = v[0]
else:
results[k] = ''
return results
# backwards compatibility
get_xml_id = get_external_id
_get_xml_ids = _get_external_ids
def print_report(self, cr, uid, ids, name, data, context=None):
"""
Render the report `name` for the given IDs. The report must be defined
for this model, not another.
"""
report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
assert self._name == report.table
return report.create(cr, uid, ids, data, context)
# Transience
@classmethod
def is_transient(cls):
""" Return whether the model is transient.
See :class:`TransientModel`.
"""
return cls._transient
def _transient_clean_rows_older_than(self, cr, seconds):
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
# Never delete rows used in last 5 minutes
seconds = max(seconds, 300)
query = ("SELECT id FROM " + self._table + " WHERE"
" COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
" < ((now() at time zone 'UTC') - interval %s)")
cr.execute(query, ("%s seconds" % seconds,))
ids = [x[0] for x in cr.fetchall()]
self.unlink(cr, SUPERUSER_ID, ids)
def _transient_clean_old_rows(self, cr, max_count):
# Check how many rows we have in the table
cr.execute("SELECT count(*) AS row_count FROM " + self._table)
res = cr.fetchall()
if res[0][0] <= max_count:
return # max not reached, nothing to do
self._transient_clean_rows_older_than(cr, 300)
def _transient_vacuum(self, cr, uid, force=False):
"""Clean the transient records.
This unlinks old records from the transient model tables whenever the
"_transient_max_count" or "_max_age" conditions (if any) are reached.
Actual cleaning will happen only once every "_transient_check_time" calls.
This means this method can be called frequently called (e.g. whenever
a new record is created).
Example with both max_hours and max_count active:
Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
- age based vacuum will leave the 22 rows created/changed in the last 12 minutes
- count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
would immediately cause the maximum to be reached again.
- the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
"""
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
_transient_check_time = 20 # arbitrary limit on vacuum executions
self._transient_check_count += 1
if not force and (self._transient_check_count < _transient_check_time):
return True # no vacuum cleaning this time
self._transient_check_count = 0
# Age-based expiration
if self._transient_max_hours:
self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
# Count-based expiration
if self._transient_max_count:
self._transient_clean_old_rows(cr, self._transient_max_count)
return True
def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
""" Serializes one2many and many2many commands into record dictionaries
(as if all the records came from the database via a read()). This
method is aimed at onchange methods on one2many and many2many fields.
Because commands might be creation commands, not all record dicts
will contain an ``id`` field. Commands matching an existing record
will have an ``id``.
:param field_name: name of the one2many or many2many field matching the commands
:type field_name: str
:param commands: one2many or many2many commands to execute on ``field_name``
:type commands: list((int|False, int|False, dict|False))
:param fields: list of fields to read from the database, when applicable
:type fields: list(str)
:returns: records in a shape similar to that returned by ``read()``
(except records may be missing the ``id`` field if they don't exist in db)
:rtype: list(dict)
"""
result = [] # result (list of dict)
record_ids = [] # ids of records to read
updates = {} # {id: dict} of updates on particular records
for command in commands or []:
if not isinstance(command, (list, tuple)):
record_ids.append(command)
elif command[0] == 0:
result.append(command[2])
elif command[0] == 1:
record_ids.append(command[1])
updates.setdefault(command[1], {}).update(command[2])
elif command[0] in (2, 3):
record_ids = [id for id in record_ids if id != command[1]]
elif command[0] == 4:
record_ids.append(command[1])
elif command[0] == 5:
result, record_ids = [], []
elif command[0] == 6:
result, record_ids = [], list(command[2])
# read the records and apply the updates
other_model = self.pool[self._all_columns[field_name].column._obj]
for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
record.update(updates.get(record['id'], {}))
result.append(record)
return result
# for backward compatibility
resolve_o2m_commands_to_record_dicts = resolve_2many_commands
def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
"""
Performs a ``search()`` followed by a ``read()``.
:param cr: database cursor
:param user: current user id
:param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
:param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
:param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
:param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
:param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
:param context: context arguments.
:return: List of dictionaries containing the asked fields.
:rtype: List of dictionaries.
"""
record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
if not record_ids:
return []
if fields and fields == ['id']:
# shortcut read if we only want the ids
return [{'id': id} for id in record_ids]
# read() ignores active_test, but it would forward it to any downstream search call
# (e.g. for x2m or function fields), and this is not the desired behavior, the flag
# was presumably only meant for the main search().
# TODO: Move this to read() directly?
read_ctx = dict(context or {})
read_ctx.pop('active_test', None)
result = self.read(cr, uid, record_ids, fields, context=read_ctx)
if len(result) <= 1:
return result
# reorder read
index = dict((r['id'], r) for r in result)
return [index[x] for x in record_ids if x in index]
def _register_hook(self, cr):
""" stuff to do right after the registry is built """
pass
@classmethod
def _patch_method(cls, name, method):
""" Monkey-patch a method for all instances of this model. This replaces
the method called `name` by `method` in the given class.
The original method is then accessible via ``method.origin``, and it
can be restored with :meth:`~._revert_method`.
Example::
@api.multi
def do_write(self, values):
# do stuff, and call the original method
return do_write.origin(self, values)
# patch method write of model
model._patch_method('write', do_write)
# this will call do_write
records = model.search([...])
records.write(...)
# restore the original method
model._revert_method('write')
"""
origin = getattr(cls, name)
method.origin = origin
# propagate decorators from origin to method, and apply api decorator
wrapped = api.guess(api.propagate(origin, method))
wrapped.origin = origin
setattr(cls, name, wrapped)
@classmethod
def _revert_method(cls, name):
""" Revert the original method called `name` in the given class.
See :meth:`~._patch_method`.
"""
method = getattr(cls, name)
setattr(cls, name, method.origin)
#
# Instance creation
#
# An instance represents an ordered collection of records in a given
# execution environment. The instance object refers to the environment, and
# the records themselves are represented by their cache dictionary. The 'id'
# of each record is found in its corresponding cache dictionary.
#
# This design has the following advantages:
# - cache access is direct and thus fast;
# - one can consider records without an 'id' (see new records);
# - the global cache is only an index to "resolve" a record 'id'.
#
@classmethod
def _browse(cls, env, ids):
""" Create an instance attached to `env`; `ids` is a tuple of record
ids.
"""
records = object.__new__(cls)
records.env = env
records._ids = ids
env.prefetch[cls._name].update(ids)
return records
@api.v7
def browse(self, cr, uid, arg=None, context=None):
ids = _normalize_ids(arg)
#assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
return self._browse(Environment(cr, uid, context or {}), ids)
@api.v8
def browse(self, arg=None):
""" browse([ids]) -> records
Returns a recordset for the ids provided as parameter in the current
environment.
Can take no ids, a single id or a sequence of ids.
"""
ids = _normalize_ids(arg)
#assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
return self._browse(self.env, ids)
#
# Internal properties, for manipulating the instance's implementation
#
@property
def ids(self):
""" List of actual record ids in this recordset (ignores placeholder
ids for records to create)
"""
return filter(None, list(self._ids))
# backward-compatibility with former browse records
_cr = property(lambda self: self.env.cr)
_uid = property(lambda self: self.env.uid)
_context = property(lambda self: self.env.context)
#
# Conversion methods
#
def ensure_one(self):
""" Verifies that the current recorset holds a single record. Raises
an exception otherwise.
"""
if len(self) == 1:
return self
raise except_orm("ValueError", "Expected singleton: %s" % self)
def with_env(self, env):
""" Returns a new version of this recordset attached to the provided
environment
:type env: :class:`~openerp.api.Environment`
"""
return self._browse(env, self._ids)
def sudo(self, user=SUPERUSER_ID):
""" sudo([user=SUPERUSER])
Returns a new version of this recordset attached to the provided
user.
"""
return self.with_env(self.env(user=user))
def with_context(self, *args, **kwargs):
""" with_context([context][, **overrides]) -> records
Returns a new version of this recordset attached to an extended
context.
The extended context is either the provided ``context`` in which
``overrides`` are merged or the *current* context in which
``overrides`` are merged e.g.::
# current context is {'key1': True}
r2 = records.with_context({}, key2=True)
# -> r2._context is {'key2': True}
r2 = records.with_context(key2=True)
# -> r2._context is {'key1': True, 'key2': True}
"""
context = dict(args[0] if args else self._context, **kwargs)
return self.with_env(self.env(context=context))
def _convert_to_cache(self, values, update=False, validate=True):
""" Convert the `values` dictionary into cached values.
:param update: whether the conversion is made for updating `self`;
this is necessary for interpreting the commands of *2many fields
:param validate: whether values must be checked
"""
fields = self._fields
target = self if update else self.browse()
return {
name: fields[name].convert_to_cache(value, target, validate=validate)
for name, value in values.iteritems()
if name in fields
}
def _convert_to_write(self, values):
""" Convert the `values` dictionary into the format of :meth:`write`. """
fields = self._fields
result = {}
for name, value in values.iteritems():
if name in fields:
value = fields[name].convert_to_write(value)
if not isinstance(value, NewId):
result[name] = value
return result
#
# Record traversal and update
#
def _mapped_func(self, func):
""" Apply function `func` on all records in `self`, and return the
result as a list or a recordset (if `func` return recordsets).
"""
vals = [func(rec) for rec in self]
val0 = vals[0] if vals else func(self)
if isinstance(val0, BaseModel):
return reduce(operator.or_, vals, val0)
return vals
def mapped(self, func):
""" Apply `func` on all records in `self`, and return the result as a
list or a recordset (if `func` return recordsets). In the latter
case, the order of the returned recordset is arbritrary.
:param func: a function or a dot-separated sequence of field names
"""
if isinstance(func, basestring):
recs = self
for name in func.split('.'):
recs = recs._mapped_func(operator.itemgetter(name))
return recs
else:
return self._mapped_func(func)
def _mapped_cache(self, name_seq):
""" Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
field names, and only cached values are used.
"""
recs = self
for name in name_seq.split('.'):
field = recs._fields[name]
null = field.null(self.env)
recs = recs.mapped(lambda rec: rec._cache.get(field, null))
return recs
def filtered(self, func):
""" Select the records in `self` such that `func(rec)` is true, and
return them as a recordset.
:param func: a function or a dot-separated sequence of field names
"""
if isinstance(func, basestring):
name = func
func = lambda rec: filter(None, rec.mapped(name))
return self.browse([rec.id for rec in self if func(rec)])
def sorted(self, key=None):
""" Return the recordset `self` ordered by `key` """
if key is None:
return self.search([('id', 'in', self.ids)])
else:
return self.browse(map(int, sorted(self, key=key)))
def update(self, values):
""" Update record `self[0]` with `values`. """
for name, value in values.iteritems():
self[name] = value
#
# New records - represent records that do not exist in the database yet;
# they are used to compute default values and perform onchanges.
#
@api.model
def new(self, values={}):
""" new([values]) -> record
Return a new record instance attached to the current environment and
initialized with the provided ``value``. The record is *not* created
in database, it only exists in memory.
"""
record = self.browse([NewId()])
record._cache.update(record._convert_to_cache(values, update=True))
if record.env.in_onchange:
# The cache update does not set inverse fields, so do it manually.
# This is useful for computing a function field on secondary
# records, if that field depends on the main record.
for name in values:
field = self._fields.get(name)
if field:
for invf in field.inverse_fields:
invf._update(record[name], record)
return record
#
# Dirty flag, to mark records modified (in draft mode)
#
@property
def _dirty(self):
""" Return whether any record in `self` is dirty. """
dirty = self.env.dirty
return any(record in dirty for record in self)
@_dirty.setter
def _dirty(self, value):
""" Mark the records in `self` as dirty. """
if value:
map(self.env.dirty.add, self)
else:
map(self.env.dirty.discard, self)
#
# "Dunder" methods
#
def __nonzero__(self):
""" Test whether `self` is nonempty. """
return bool(getattr(self, '_ids', True))
def __len__(self):
""" Return the size of `self`. """
return len(self._ids)
def __iter__(self):
""" Return an iterator over `self`. """
for id in self._ids:
yield self._browse(self.env, (id,))
def __contains__(self, item):
""" Test whether `item` (record or field name) is an element of `self`.
In the first case, the test is fully equivalent to::
any(item == record for record in self)
"""
if isinstance(item, BaseModel) and self._name == item._name:
return len(item) == 1 and item.id in self._ids
elif isinstance(item, basestring):
return item in self._fields
else:
raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
def __add__(self, other):
""" Return the concatenation of two recordsets. """
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
return self.browse(self._ids + other._ids)
def __sub__(self, other):
""" Return the recordset of all the records in `self` that are not in `other`. """
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
other_ids = set(other._ids)
return self.browse([id for id in self._ids if id not in other_ids])
def __and__(self, other):
""" Return the intersection of two recordsets.
Note that recordset order is not preserved.
"""
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
return self.browse(set(self._ids) & set(other._ids))
def __or__(self, other):
""" Return the union of two recordsets.
Note that recordset order is not preserved.
"""
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
return self.browse(set(self._ids) | set(other._ids))
def __eq__(self, other):
""" Test whether two recordsets are equivalent (up to reordering). """
if not isinstance(other, BaseModel):
if other:
_logger.warning("Comparing apples and oranges: %s == %s", self, other)
return False
return self._name == other._name and set(self._ids) == set(other._ids)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
return set(self._ids) < set(other._ids)
def __le__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
return set(self._ids) <= set(other._ids)
def __gt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
return set(self._ids) > set(other._ids)
def __ge__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
return set(self._ids) >= set(other._ids)
def __int__(self):
return self.id
def __str__(self):
return "%s%s" % (self._name, getattr(self, '_ids', ""))
def __unicode__(self):
return unicode(str(self))
__repr__ = __str__
def __hash__(self):
if hasattr(self, '_ids'):
return hash((self._name, frozenset(self._ids)))
else:
return hash(self._name)
def __getitem__(self, key):
""" If `key` is an integer or a slice, return the corresponding record
selection as an instance (attached to `self.env`).
Otherwise read the field `key` of the first record in `self`.
Examples::
inst = model.search(dom) # inst is a recordset
r4 = inst[3] # fourth record in inst
rs = inst[10:20] # subset of inst
nm = rs['name'] # name of first record in inst
"""
if isinstance(key, basestring):
# important: one must call the field's getter
return self._fields[key].__get__(self, type(self))
elif isinstance(key, slice):
return self._browse(self.env, self._ids[key])
else:
return self._browse(self.env, (self._ids[key],))
def __setitem__(self, key, value):
""" Assign the field `key` to `value` in record `self`. """
# important: one must call the field's setter
return self._fields[key].__set__(self, value)
#
# Cache and recomputation management
#
@lazy_property
def _cache(self):
""" Return the cache of `self`, mapping field names to values. """
return RecordCache(self)
@api.model
def _in_cache_without(self, field):
""" Make sure `self` is present in cache (for prefetching), and return
the records of model `self` in cache that have no value for `field`
(:class:`Field` instance).
"""
env = self.env
prefetch_ids = env.prefetch[self._name]
prefetch_ids.update(self._ids)
ids = filter(None, prefetch_ids - set(env.cache[field]))
return self.browse(ids)
@api.model
def refresh(self):
""" Clear the records cache.
.. deprecated:: 8.0
The record cache is automatically invalidated.
"""
self.invalidate_cache()
@api.model
def invalidate_cache(self, fnames=None, ids=None):
""" Invalidate the record caches after some records have been modified.
If both `fnames` and `ids` are ``None``, the whole cache is cleared.
:param fnames: the list of modified fields, or ``None`` for all fields
:param ids: the list of modified record ids, or ``None`` for all
"""
if fnames is None:
if ids is None:
return self.env.invalidate_all()
fields = self._fields.values()
else:
fields = map(self._fields.__getitem__, fnames)
# invalidate fields and inverse fields, too
spec = [(f, ids) for f in fields] + \
[(invf, None) for f in fields for invf in f.inverse_fields]
self.env.invalidate(spec)
@api.multi
def modified(self, fnames):
""" Notify that fields have been modified on `self`. This invalidates
the cache, and prepares the recomputation of stored function fields
(new-style fields only).
:param fnames: iterable of field names that have been modified on
records `self`
"""
# each field knows what to invalidate and recompute
spec = []
for fname in fnames:
spec += self._fields[fname].modified(self)
cached_fields = {
field
for env in self.env.all
for field in env.cache
}
# invalidate non-stored fields.function which are currently cached
spec += [(f, None) for f in self.pool.pure_function_fields
if f in cached_fields]
self.env.invalidate(spec)
def _recompute_check(self, field):
""" If `field` must be recomputed on some record in `self`, return the
corresponding records that must be recomputed.
"""
return self.env.check_todo(field, self)
def _recompute_todo(self, field):
""" Mark `field` to be recomputed. """
self.env.add_todo(field, self)
def _recompute_done(self, field):
""" Mark `field` as recomputed. """
self.env.remove_todo(field, self)
@api.model
def recompute(self):
""" Recompute stored function fields. The fields and records to
recompute have been determined by method :meth:`modified`.
"""
while self.env.has_todo():
field, recs = self.env.get_todo()
# evaluate the fields to recompute, and save them to database
for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
try:
values = rec._convert_to_write({
f.name: rec[f.name] for f in field.computed_fields
})
rec1._write(values)
except MissingError:
pass
# mark the computed fields as done
map(recs._recompute_done, field.computed_fields)
#
# Generic onchange method
#
def _has_onchange(self, field, other_fields):
""" Return whether `field` should trigger an onchange event in the
presence of `other_fields`.
"""
# test whether self has an onchange method for field, or field is a
# dependency of any field in other_fields
return field.name in self._onchange_methods or \
any(dep in other_fields for dep in field.dependents)
@api.model
def _onchange_spec(self, view_info=None):
""" Return the onchange spec from a view description; if not given, the
result of ``self.fields_view_get()`` is used.
"""
result = {}
# for traversing the XML arch and populating result
def process(node, info, prefix):
if node.tag == 'field':
name = node.attrib['name']
names = "%s.%s" % (prefix, name) if prefix else name
if not result.get(names):
result[names] = node.attrib.get('on_change')
# traverse the subviews included in relational fields
for subinfo in info['fields'][name].get('views', {}).itervalues():
process(etree.fromstring(subinfo['arch']), subinfo, names)
else:
for child in node:
process(child, info, prefix)
if view_info is None:
view_info = self.fields_view_get()
process(etree.fromstring(view_info['arch']), view_info, '')
return result
def _onchange_eval(self, field_name, onchange, result):
""" Apply onchange method(s) for field `field_name` with spec `onchange`
on record `self`. Value assignments are applied on `self`, while
domain and warning messages are put in dictionary `result`.
"""
onchange = onchange.strip()
# onchange V8
if onchange in ("1", "true"):
for method in self._onchange_methods.get(field_name, ()):
method_res = method(self)
if not method_res:
continue
if 'domain' in method_res:
result.setdefault('domain', {}).update(method_res['domain'])
if 'warning' in method_res:
result['warning'] = method_res['warning']
return
# onchange V7
match = onchange_v7.match(onchange)
if match:
method, params = match.groups()
# evaluate params -> tuple
global_vars = {'context': self._context, 'uid': self._uid}
if self._context.get('field_parent'):
class RawRecord(object):
def __init__(self, record):
self._record = record
def __getattr__(self, name):
field = self._record._fields[name]
value = self._record[name]
return field.convert_to_onchange(value)
record = self[self._context['field_parent']]
global_vars['parent'] = RawRecord(record)
field_vars = {
key: self._fields[key].convert_to_onchange(val)
for key, val in self._cache.iteritems()
}
params = eval("[%s]" % params, global_vars, field_vars)
# call onchange method
args = (self._cr, self._uid, self._origin.ids) + tuple(params)
method_res = getattr(self._model, method)(*args)
if not isinstance(method_res, dict):
return
if 'value' in method_res:
method_res['value'].pop('id', None)
self.update(self._convert_to_cache(method_res['value'], validate=False))
if 'domain' in method_res:
result.setdefault('domain', {}).update(method_res['domain'])
if 'warning' in method_res:
result['warning'] = method_res['warning']
@api.multi
def onchange(self, values, field_name, field_onchange):
""" Perform an onchange on the given field.
:param values: dictionary mapping field names to values, giving the
current state of modification
:param field_name: name of the modified field_name
:param field_onchange: dictionary mapping field names to their
on_change attribute
"""
env = self.env
if field_name and field_name not in self._fields:
return {}
# determine subfields for field.convert_to_write() below
secondary = []
subfields = defaultdict(set)
for dotname in field_onchange:
if '.' in dotname:
secondary.append(dotname)
name, subname = dotname.split('.')
subfields[name].add(subname)
# create a new record with values, and attach `self` to it
with env.do_in_onchange():
record = self.new(values)
values = dict(record._cache)
# attach `self` with a different context (for cache consistency)
record._origin = self.with_context(__onchange=True)
# determine which field should be triggered an onchange
todo = set([field_name]) if field_name else set(values)
done = set()
# dummy assignment: trigger invalidations on the record
for name in todo:
value = record[name]
field = self._fields[name]
if not field_name and field.type == 'many2one' and field.delegate and not value:
# do not nullify all fields of parent record for new records
continue
record[name] = value
result = {'value': {}}
while todo:
name = todo.pop()
if name in done:
continue
done.add(name)
with env.do_in_onchange():
# apply field-specific onchange methods
if field_onchange.get(name):
record._onchange_eval(name, field_onchange[name], result)
# force re-evaluation of function fields on secondary records
for field_seq in secondary:
record.mapped(field_seq)
# determine which fields have been modified
for name, oldval in values.iteritems():
field = self._fields[name]
newval = record[name]
if field.type in ('one2many', 'many2many'):
if newval != oldval or newval._dirty:
# put new value in result
result['value'][name] = field.convert_to_write(
newval, record._origin, subfields.get(name),
)
todo.add(name)
else:
# keep result: newval may have been dirty before
pass
else:
if newval != oldval:
# put new value in result
result['value'][name] = field.convert_to_write(
newval, record._origin, subfields.get(name),
)
todo.add(name)
else:
# clean up result to not return another value
result['value'].pop(name, None)
# At the moment, the client does not support updates on a *2many field
# while this one is modified by the user.
if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
result['value'].pop(field_name, None)
return result
class RecordCache(MutableMapping):
""" Implements a proxy dictionary to read/update the cache of a record.
Upon iteration, it looks like a dictionary mapping field names to
values. However, fields may be used as keys as well.
"""
def __init__(self, records):
self._recs = records
def contains(self, field):
""" Return whether `records[0]` has a value for `field` in cache. """
if isinstance(field, basestring):
field = self._recs._fields[field]
return self._recs.id in self._recs.env.cache[field]
def __contains__(self, field):
""" Return whether `records[0]` has a regular value for `field` in cache. """
if isinstance(field, basestring):
field = self._recs._fields[field]
dummy = SpecialValue(None)
value = self._recs.env.cache[field].get(self._recs.id, dummy)
return not isinstance(value, SpecialValue)
def __getitem__(self, field):
""" Return the cached value of `field` for `records[0]`. """
if isinstance(field, basestring):
field = self._recs._fields[field]
value = self._recs.env.cache[field][self._recs.id]
return value.get() if isinstance(value, SpecialValue) else value
def __setitem__(self, field, value):
""" Assign the cached value of `field` for all records in `records`. """
if isinstance(field, basestring):
field = self._recs._fields[field]
values = dict.fromkeys(self._recs._ids, value)
self._recs.env.cache[field].update(values)
def update(self, *args, **kwargs):
""" Update the cache of all records in `records`. If the argument is a
`SpecialValue`, update all fields (except "magic" columns).
"""
if args and isinstance(args[0], SpecialValue):
values = dict.fromkeys(self._recs._ids, args[0])
for name, field in self._recs._fields.iteritems():
if name != 'id':
self._recs.env.cache[field].update(values)
else:
return super(RecordCache, self).update(*args, **kwargs)
def __delitem__(self, field):
""" Remove the cached value of `field` for all `records`. """
if isinstance(field, basestring):
field = self._recs._fields[field]
field_cache = self._recs.env.cache[field]
for id in self._recs._ids:
field_cache.pop(id, None)
def __iter__(self):
""" Iterate over the field names with a regular value in cache. """
cache, id = self._recs.env.cache, self._recs.id
dummy = SpecialValue(None)
for name, field in self._recs._fields.iteritems():
if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
yield name
def __len__(self):
""" Return the number of fields with a regular value in cache. """
return sum(1 for name in self)
class Model(BaseModel):
"""Main super-class for regular database-persisted OpenERP models.
OpenERP models are created by inheriting from this class::
class user(Model):
...
The system will later instantiate the class once per database (on
which the class' module is installed).
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False # True in a TransientModel
class TransientModel(BaseModel):
"""Model super-class for transient records, meant to be temporarily
persisted, and regularly vaccuum-cleaned.
A TransientModel has a simplified access rights management,
all users can create new records, and may only access the
records they created. The super-user has unrestricted access
to all TransientModel records.
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = True
class AbstractModel(BaseModel):
"""Abstract Model super-class for creating an abstract class meant to be
inherited by regular models (Models or TransientModels) but not meant to
be usable on its own, or persisted.
Technical note: we don't want to make AbstractModel the super-class of
Model or BaseModel because it would not make sense to put the main
definition of persistence methods such as create() in it, and still we
should be able to override them within an AbstractModel.
"""
_auto = False # don't create any database backend for AbstractModels
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False
def itemgetter_tuple(items):
""" Fixes itemgetter inconsistency (useful in some cases) of not returning
a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
"""
if len(items) == 0:
return lambda a: ()
if len(items) == 1:
return lambda gettable: (gettable[items[0]],)
return operator.itemgetter(*items)
def convert_pgerror_23502(model, fields, info, e):
m = re.match(r'^null value in column "(?P<field>\w+)" violates '
r'not-null constraint\n',
str(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': unicode(e)}
message = _(u"Missing required value for the field '%s'.") % field_name
field = fields.get(field_name)
if field:
message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
return {
'message': message,
'field': field_name,
}
def convert_pgerror_23505(model, fields, info, e):
m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
str(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': unicode(e)}
message = _(u"The value for the field '%s' already exists.") % field_name
field = fields.get(field_name)
if field:
message = _(u"%s This might be '%s' in the current model, or a field "
u"of the same name in an o2m.") % (message, field['string'])
return {
'message': message,
'field': field_name,
}
PGERROR_TO_OE = defaultdict(
# shape of mapped converters
lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
# not_null_violation
'23502': convert_pgerror_23502,
# unique constraint error
'23505': convert_pgerror_23505,
})
def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
""" Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
Various implementations were tested on the corpus of all browse() calls
performed during a full crawler run (after having installed all website_*
modules) and this one was the most efficient overall.
A possible bit of correctness was sacrificed by not doing any test on
Iterable and just assuming that any non-atomic type was an iterable of
some kind.
:rtype: tuple
"""
# much of the corpus is falsy objects (empty list, tuple or set, None)
if not arg:
return ()
# `type in set` is significantly faster (because more restrictive) than
# isinstance(arg, set) or issubclass(type, set); and for new-style classes
# obj.__class__ is equivalent to but faster than type(obj). Not relevant
# (and looks much worse) in most cases, but over millions of calls it
# does have a very minor effect.
if arg.__class__ in atoms:
return arg,
return tuple(arg)
# keep those imports here to avoid dependency cycle errors
from .osv import expression
from .fields import Field, SpecialValue, FailedValue
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
yippeecw/sfa
|
refs/heads/geni-v3
|
sfa/rspecs/elements/link.py
|
2
|
from sfa.rspecs.elements.element import Element
class Link(Element):
fields = [
'client_id',
'component_id',
'component_name',
'component_manager',
'type',
'interface1',
'interface2',
'capacity',
'latency',
'packet_loss',
'description',
]
|
zak-k/iris
|
refs/heads/master
|
lib/iris/tests/unit/fileformats/ff/test_FF2PP.py
|
11
|
# (C) British Crown Copyright 2013 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :class:`iris.fileformat.ff.FF2PP` class."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import collections
import contextlib
import numpy as np
import warnings
from iris.exceptions import NotYetImplementedError
import iris.fileformats._ff as ff
import iris.fileformats.pp as pp
from iris.fileformats._ff import FF2PP
from iris.tests import mock
# PP-field: LBPACK N1 values.
_UNPACKED = 0
_WGDOS = 1
_CRAY = 2
# PP-field: LBUSER(1) values.
_REAL = 1
_INTEGER = 2
_DummyField = collections.namedtuple('_DummyField',
'lbext lblrec lbnrec raw_lbpack '
'lbuser boundary_packing')
_DummyFieldWithSize = collections.namedtuple('_DummyFieldWithSize',
'lbext lblrec lbnrec raw_lbpack '
'lbuser boundary_packing '
'lbnpt lbrow')
_DummyBoundaryPacking = collections.namedtuple('_DummyBoundaryPacking',
'x_halo y_halo rim_width')
class Test____iter__(tests.IrisTest):
@mock.patch('iris.fileformats._ff.FFHeader')
def test_call_structure(self, _FFHeader):
# Check that the iter method calls the two necessary utility
# functions
extract_result = mock.Mock()
interpret_patch = mock.patch('iris.fileformats.pp._interpret_fields',
autospec=True, return_value=iter([]))
extract_patch = mock.patch('iris.fileformats._ff.FF2PP._extract_field',
autospec=True, return_value=extract_result)
FF2PP_instance = ff.FF2PP('mock')
with interpret_patch as interpret, extract_patch as extract:
list(iter(FF2PP_instance))
interpret.assert_called_once_with(extract_result)
extract.assert_called_once_with(FF2PP_instance)
class Test__extract_field__LBC_format(tests.IrisTest):
@contextlib.contextmanager
def mock_for_extract_field(self, fields, x=None, y=None):
"""
A context manager to ensure FF2PP._extract_field gets a field
instance looking like the next one in the "fields" iterable from
the "make_pp_field" call.
"""
with mock.patch('iris.fileformats._ff.FFHeader'):
ff2pp = ff.FF2PP('mock')
ff2pp._ff_header.lookup_table = [0, 0, len(fields)]
# Fake level constants, with shape specifying just one model-level.
ff2pp._ff_header.level_dependent_constants = np.zeros(1)
grid = mock.Mock()
grid.vectors = mock.Mock(return_value=(x, y))
ff2pp._ff_header.grid = mock.Mock(return_value=grid)
if six.PY3:
open_func = 'builtins.open'
else:
open_func = '__builtin__.open'
with mock.patch('numpy.fromfile', return_value=[0]), \
mock.patch(open_func), \
mock.patch('struct.unpack_from', return_value=[4]), \
mock.patch('iris.fileformats.pp.make_pp_field',
side_effect=fields), \
mock.patch('iris.fileformats._ff.FF2PP._payload',
return_value=(0, 0)):
yield ff2pp
def _mock_lbc(self, **kwargs):
"""Return a Mock object representing an LBC field."""
# Default kwargs for a valid LBC field mapping just 1 model-level.
field_kwargs = dict(lbtim=0, lblev=7777, lbvc=0, lbhem=101)
# Apply provided args (replacing any defaults if specified).
field_kwargs.update(kwargs)
# Return a mock with just those properties pre-defined.
return mock.Mock(**field_kwargs)
def test_LBC_header(self):
bzx, bzy = -10, 15
# stash m01s00i001
lbuser = [None, None, 121416, 1, None, None, 1]
field = self._mock_lbc(lbegin=0,
lbrow=10, lbnpt=12,
bdx=1, bdy=1, bzx=bzx, bzy=bzy,
lbuser=lbuser)
with self.mock_for_extract_field([field]) as ff2pp:
ff2pp._ff_header.dataset_type = 5
result = list(ff2pp._extract_field())
self.assertEqual([field], result)
self.assertEqual(field.lbrow, 10 + 14 * 2)
self.assertEqual(field.lbnpt, 12 + 16 * 2)
name_mapping_dict = dict(rim_width=slice(4, 6), y_halo=slice(2, 4),
x_halo=slice(0, 2))
boundary_packing = pp.SplittableInt(121416, name_mapping_dict)
self.assertEqual(field.boundary_packing, boundary_packing)
self.assertEqual(field.bzy, bzy - boundary_packing.y_halo * field.bdy)
self.assertEqual(field.bzx, bzx - boundary_packing.x_halo * field.bdx)
def check_non_trivial_coordinate_warning(self, field):
field.lbegin = 0
field.lbrow = 10
field.lbnpt = 12
# stash m01s31i020
field.lbuser = [None, None, 121416, 20, None, None, 1]
orig_bdx, orig_bdy = field.bdx, field.bdy
x = np.array([1, 2, 6])
y = np.array([1, 2, 6])
with self.mock_for_extract_field([field], x, y) as ff2pp:
ff2pp._ff_header.dataset_type = 5
with mock.patch('warnings.warn') as warn:
list(ff2pp._extract_field())
# Check the values are unchanged.
self.assertEqual(field.bdy, orig_bdy)
self.assertEqual(field.bdx, orig_bdx)
# Check a warning was raised with a suitable message.
warn_error_tmplt = 'Unexpected warning message: {}'
non_trivial_coord_warn_msg = warn.call_args[0][0]
msg = ('The x or y coordinates of your boundary condition field may '
'be incorrect, not having taken into account the boundary '
'size.')
self.assertTrue(non_trivial_coord_warn_msg.startswith(msg),
warn_error_tmplt.format(non_trivial_coord_warn_msg))
def test_LBC_header_non_trivial_coords_both(self):
# Check a warning is raised when both bdx and bdy are bad.
field = self._mock_lbc(bdx=0, bdy=0, bzx=10, bzy=10)
self.check_non_trivial_coordinate_warning(field)
field.bdy = field.bdx = field.bmdi
self.check_non_trivial_coordinate_warning(field)
def test_LBC_header_non_trivial_coords_x(self):
# Check a warning is raised when bdx is bad.
field = self._mock_lbc(bdx=0, bdy=10, bzx=10, bzy=10)
self.check_non_trivial_coordinate_warning(field)
field.bdx = field.bmdi
self.check_non_trivial_coordinate_warning(field)
def test_LBC_header_non_trivial_coords_y(self):
# Check a warning is raised when bdy is bad.
field = self._mock_lbc(bdx=10, bdy=0, bzx=10, bzy=10)
self.check_non_trivial_coordinate_warning(field)
field.bdy = field.bmdi
self.check_non_trivial_coordinate_warning(field)
def test_negative_bdy(self):
# Check a warning is raised when bdy is negative,
# we don't yet know what "north" means in this case.
field = self._mock_lbc(bdx=10, bdy=-10, bzx=10, bzy=10, lbegin=0,
lbuser=[0, 0, 121416, 0, None, None, 0],
lbrow=10, lbnpt=12)
with self.mock_for_extract_field([field]) as ff2pp:
ff2pp._ff_header.dataset_type = 5
with mock.patch('warnings.warn') as warn:
list(ff2pp._extract_field())
msg = 'The LBC has a bdy less than 0.'
self.assertTrue(warn.call_args[0][0].startswith(msg),
'Northwards bdy warning not correctly raised.')
class Test__payload(tests.IrisTest):
def setUp(self):
# Create a mock LBC type PPField.
self.mock_field = mock.Mock()
field = self.mock_field
field.raw_lbpack = _UNPACKED
field.lbuser = [_REAL]
field.lblrec = 777
field.lbext = 222
field.lbnrec = 50
field.boundary_packing = None
def _test(self, mock_field, expected_depth, expected_dtype,
word_depth=None):
with mock.patch('iris.fileformats._ff.FFHeader', return_value=None):
kwargs = {}
if word_depth is not None:
kwargs['word_depth'] = word_depth
ff2pp = FF2PP('dummy_filename', **kwargs)
data_depth, data_dtype = ff2pp._payload(mock_field)
self.assertEqual(data_depth, expected_depth)
self.assertEqual(data_dtype, expected_dtype)
def test_unpacked_real(self):
mock_field = _DummyField(lbext=0, lblrec=100, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_REAL], boundary_packing=None)
self._test(mock_field, 800, '>f8')
def test_unpacked_real_ext(self):
mock_field = _DummyField(lbext=5, lblrec=100, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_REAL], boundary_packing=None)
self._test(mock_field, 760, '>f8')
def test_unpacked_integer(self):
mock_field = _DummyField(lbext=0, lblrec=200, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_INTEGER], boundary_packing=None)
self._test(mock_field, 1600, '>i8')
def test_unpacked_integer_ext(self):
mock_field = _DummyField(lbext=10, lblrec=200, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_INTEGER], boundary_packing=None)
self._test(mock_field, 1520, '>i8')
def test_unpacked_real_ext_different_word_depth(self):
mock_field = _DummyField(lbext=5, lblrec=100, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_REAL], boundary_packing=None)
self._test(mock_field, 380, '>f4', word_depth=4)
def test_wgdos_real(self):
mock_field = _DummyField(lbext=0, lblrec=-1, lbnrec=100,
raw_lbpack=_WGDOS,
lbuser=[_REAL], boundary_packing=None)
self._test(mock_field, 800, '>f4')
def test_wgdos_real_ext(self):
mock_field = _DummyField(lbext=5, lblrec=-1, lbnrec=100,
raw_lbpack=_WGDOS,
lbuser=[_REAL], boundary_packing=None)
self._test(mock_field, 800, '>f4')
def test_wgdos_integer(self):
mock_field = _DummyField(lbext=0, lblrec=-1, lbnrec=200,
raw_lbpack=_WGDOS,
lbuser=[_INTEGER], boundary_packing=None)
self._test(mock_field, 1600, '>i4')
def test_wgdos_integer_ext(self):
mock_field = _DummyField(lbext=10, lblrec=-1, lbnrec=200,
raw_lbpack=_WGDOS,
lbuser=[_INTEGER], boundary_packing=None)
self._test(mock_field, 1600, '>i4')
def test_cray_real(self):
mock_field = _DummyField(lbext=0, lblrec=100, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_REAL], boundary_packing=None)
self._test(mock_field, 400, '>f4')
def test_cray_real_ext(self):
mock_field = _DummyField(lbext=5, lblrec=100, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_REAL], boundary_packing=None)
self._test(mock_field, 380, '>f4')
def test_cray_integer(self):
mock_field = _DummyField(lbext=0, lblrec=200, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_INTEGER], boundary_packing=None)
self._test(mock_field, 800, '>i4')
def test_cray_integer_ext(self):
mock_field = _DummyField(lbext=10, lblrec=200, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_INTEGER], boundary_packing=None)
self._test(mock_field, 760, '>i4')
def test_lbpack_unsupported(self):
mock_field = _DummyField(lbext=10, lblrec=200, lbnrec=-1,
raw_lbpack=1239,
lbuser=[_INTEGER], boundary_packing=None)
with self.assertRaisesRegexp(
NotYetImplementedError,
'PP fields with LBPACK of 1239 are not supported.'):
self._test(mock_field, None, None)
def test_lbc_unpacked(self):
boundary_packing = _DummyBoundaryPacking(x_halo=11, y_halo=7,
rim_width=3)
mock_field = _DummyFieldWithSize(lbext=10, lblrec=200, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_REAL],
boundary_packing=boundary_packing,
lbnpt=47, lbrow=34)
self._test(mock_field, ((47 * 34) - (19 * 14)) * 8, '>f8')
def test_lbc_wgdos_unsupported(self):
mock_field = _DummyField(lbext=5, lblrec=-1, lbnrec=100,
raw_lbpack=_WGDOS,
lbuser=[_REAL],
# Anything not None will do here.
boundary_packing=0)
with self.assertRaisesRegexp(ValueError,
'packed LBC data is not supported'):
self._test(mock_field, None, None)
def test_lbc_cray(self):
boundary_packing = _DummyBoundaryPacking(x_halo=11, y_halo=7,
rim_width=3)
mock_field = _DummyFieldWithSize(lbext=10, lblrec=200, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_REAL],
boundary_packing=boundary_packing,
lbnpt=47, lbrow=34)
self._test(mock_field, ((47 * 34) - (19 * 14)) * 4, '>f4')
class Test__det_border(tests.IrisTest):
def setUp(self):
_FFH_patch = mock.patch('iris.fileformats._ff.FFHeader')
_FFH_patch.start()
self.addCleanup(_FFH_patch.stop)
def test_unequal_spacing_eitherside(self):
# Ensure that we do not interpret the case where there is not the same
# spacing on the lower edge as the upper edge.
ff2pp = FF2PP('dummy')
field_x = np.array([1, 2, 10])
msg = ('The x or y coordinates of your boundary condition field may '
'be incorrect, not having taken into account the boundary '
'size.')
with mock.patch('warnings.warn') as warn:
result = ff2pp._det_border(field_x, None)
warn.assert_called_with(msg)
self.assertIs(result, field_x)
def test_increasing_field_values(self):
# Field where its values a increasing.
ff2pp = FF2PP('dummy')
field_x = np.array([1, 2, 3])
com = np.array([0, 1, 2, 3, 4])
result = ff2pp._det_border(field_x, 1)
self.assertArrayEqual(result, com)
def test_decreasing_field_values(self):
# Field where its values a decreasing.
ff2pp = FF2PP('dummy')
field_x = np.array([3, 2, 1])
com = np.array([4, 3, 2, 1, 0])
result = ff2pp._det_border(field_x, 1)
self.assertArrayEqual(result, com)
class Test__adjust_field_for_lbc(tests.IrisTest):
def setUp(self):
# Patch FFHeader to produce a mock header instead of opening a file.
self.mock_ff_header = mock.Mock()
self.mock_ff_header.dataset_type = 5
self.mock_ff = self.patch('iris.fileformats._ff.FFHeader',
return_value=self.mock_ff_header)
# Create a mock LBC type PPField.
self.mock_field = mock.Mock()
field = self.mock_field
field.lbtim = 0
field.lblev = 7777
field.lbvc = 0
field.lbnpt = 1001
field.lbrow = 2001
field.lbuser = (None, None, 80504)
field.lbpack = pp.SplittableInt(0)
field.boundary_packing = None
field.bdx = 1.0
field.bzx = 0.0
field.bdy = 1.0
field.bzy = 0.0
def test__basic(self):
ff2pp = FF2PP('dummy_filename')
field = self.mock_field
ff2pp._adjust_field_for_lbc(field)
self.assertEqual(field.lbtim, 11)
self.assertEqual(field.lbvc, 65)
self.assertEqual(field.boundary_packing.rim_width, 8)
self.assertEqual(field.boundary_packing.y_halo, 5)
self.assertEqual(field.boundary_packing.x_halo, 4)
self.assertEqual(field.lbnpt, 1009)
self.assertEqual(field.lbrow, 2011)
def test__bad_lbtim(self):
self.mock_field.lbtim = 717
ff2pp = FF2PP('dummy_filename')
with self.assertRaisesRegexp(ValueError,
'LBTIM of 717, expected only 0 or 11'):
ff2pp._adjust_field_for_lbc(self.mock_field)
def test__bad_lbvc(self):
self.mock_field.lbvc = 312
ff2pp = FF2PP('dummy_filename')
with self.assertRaisesRegexp(ValueError,
'LBVC of 312, expected only 0 or 65'):
ff2pp._adjust_field_for_lbc(self.mock_field)
class Test__fields_over_all_levels(tests.IrisTest):
def setUp(self):
# Patch FFHeader to produce a mock header instead of opening a file.
self.mock_ff_header = mock.Mock()
self.mock_ff_header.dataset_type = 5
# Fake the level constants to look like 3 model levels.
self.n_all_levels = 3
self.mock_ff_header.level_dependent_constants = \
np.zeros((self.n_all_levels))
self.mock_ff = self.patch('iris.fileformats._ff.FFHeader',
return_value=self.mock_ff_header)
# Create a simple mock for a test field.
self.mock_field = mock.Mock()
field = self.mock_field
field.lbhem = 103
self.original_lblev = mock.sentinel.untouched_lbev
field.lblev = self.original_lblev
def _check_expected_levels(self, results, n_levels):
if n_levels is 0:
self.assertEqual(len(results), 1)
self.assertEqual(results[0].lblev, self.original_lblev)
else:
self.assertEqual(len(results), n_levels)
self.assertEqual([fld.lblev for fld in results],
list(range(n_levels)))
def test__is_lbc(self):
ff2pp = FF2PP('dummy_filename')
field = self.mock_field
results = list(ff2pp._fields_over_all_levels(field))
self._check_expected_levels(results, 3)
def test__lbhem_too_small(self):
ff2pp = FF2PP('dummy_filename')
field = self.mock_field
field.lbhem = 100
with self.assertRaisesRegexp(
ValueError,
'hence >= 101'):
_ = list(ff2pp._fields_over_all_levels(field))
def test__lbhem_too_large(self):
ff2pp = FF2PP('dummy_filename')
field = self.mock_field
field.lbhem = 105
with self.assertRaisesRegexp(
ValueError,
'more than the total number of levels in the file = 3'):
_ = list(ff2pp._fields_over_all_levels(field))
if __name__ == "__main__":
tests.main()
|
aristotle-tek/cuny-bdif
|
refs/heads/master
|
AWS/ec2/lib/boto-2.34.0/boto/s3/deletemarker.py
|
170
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.s3.user import User
class DeleteMarker(object):
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.version_id = None
self.is_latest = False
self.last_modified = None
self.owner = None
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
|
mforets/polyhedron_tools
|
refs/heads/master
|
docs/source/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# sample documentation build configuration file,
# inspried by slabbe configuration file created sphinx-quickstart
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# General information about the project.
project = u"Tools for working with polytopes, with a focus on computational geometry"
copyright = u'2017, Marcelo Forets'
package_name = 'polyhedron_tools'
package_folder = "../../polyhedron_tools"
authors = u"Marcelo Forets"
import sys
import os
from sage.env import SAGE_DOC_SRC, SAGE_DOC, SAGE_SRC
try:
import sage.all
except ImportError:
raise RuntimeError("to build the documentation you need to be inside a Sage shell (run first the command 'sage -sh' in a shell")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath(package_folder))
sys.path.append(os.path.join(SAGE_SRC, "sage_setup", "docbuild", "ext"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
#'sphinx.ext.autodoc',
'sage_autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.extlinks',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
templates_path = [os.path.join(SAGE_DOC_SRC, 'common', 'templates'), '_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = open("../../VERSION").read().strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'math'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
pythonversion = sys.version.split(' ')[0]
# Python and Sage trac ticket shortcuts. For example, :trac:`7549` .
extlinks = {
'python': ('https://docs.python.org/release/'+pythonversion+'/%s', ''),
'trac': ('http://trac.sagemath.org/%s', 'trac ticket #'),
'wikipedia': ('https://en.wikipedia.org/wiki/%s', 'Wikipedia article '),
'arxiv': ('http://arxiv.org/abs/%s', 'Arxiv '),
'oeis': ('https://oeis.org/%s', 'OEIS sequence '),
'doi': ('https://dx.doi.org/%s', 'doi:'),
'mathscinet': ('http://www.ams.org/mathscinet-getitem?mr=%s', 'MathSciNet ')
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
#html_theme_path = [os.path.join(SAGE_DOC_SRC, 'common', 'themes')]
html_theme_path = [os.path.join(SAGE_DOC_SRC, 'common', 'themes', 'sage')]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = package_name + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', package_name + '.tex', u'Documentation of ' + unicode(package_name),
authors, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', package_name, unicode(package_name) + u" documentation",
[authors], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', package_name, unicode(package_name) + u" documentation",
authors, package_name, project,
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options copied from Sagemath conf.py file -------------------------------
# We use MathJax to build the documentation unless the environment
# variable SAGE_DOC_MATHJAX is set to "no" or "False". (Note that if
# the user does not set this variable, then the script sage-env sets
# it to "True".)
if (os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'no'
and os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'False'):
extensions.append('sphinx.ext.mathjax')
mathjax_path = 'MathJax.js?config=TeX-AMS_HTML-full,../mathjax_sage.js'
from sage.misc.latex_macros import sage_mathjax_macros
# this is broken for now
# html_theme_options['mathjax_macros'] = sage_mathjax_macros()
from pkg_resources import Requirement, working_set
sagenb_path = working_set.find(Requirement.parse('sagenb')).location
mathjax_relative = os.path.join('sagenb','data','mathjax')
# It would be really nice if sphinx would copy the entire mathjax directory,
# (so we could have a _static/mathjax directory), rather than the contents of the directory
mathjax_static = os.path.join(sagenb_path, mathjax_relative)
html_static_path.append(mathjax_static)
exclude_patterns=['**/'+os.path.join(mathjax_relative, i) for i in ('docs', 'README*', 'test',
'unpacked', 'LICENSE')]
else:
extensions.append('sphinx.ext.pngmath')
# This is to make the verbatim font smaller;
# Verbatim environment is not breaking long lines
from sphinx.highlighting import PygmentsBridge
from pygments.formatters.latex import LatexFormatter
class CustomLatexFormatter(LatexFormatter):
def __init__(self, **options):
super(CustomLatexFormatter, self).__init__(**options)
self.verboptions = r"formatcom=\footnotesize"
PygmentsBridge.latex_formatter = CustomLatexFormatter
latex_elements['preamble'] += r'''
% One-column index
\makeatletter
\renewenvironment{theindex}{
\chapter*{\indexname}
\markboth{\MakeUppercase\indexname}{\MakeUppercase\indexname}
\setlength{\parskip}{0.1em}
\relax
\let\item\@idxitem
}{}
\makeatother
\renewcommand{\ttdefault}{txtt}
'''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.