repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
LucasRoesler/django-encrypted-json
|
refs/heads/master
|
tests/test_app/migrations/0003_testmodel_partial_encrypt_w_default.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_encrypted_json.fields
class Migration(migrations.Migration):
dependencies = [
('test_app', '0002_testmodel_partial_encrypt'),
]
operations = [
migrations.AddField(
model_name='testmodel',
name='partial_encrypt_w_default',
field=django_encrypted_json.fields.EncryptedValueJsonField(default=[], blank=True),
),
]
|
kimimj/scrapy
|
refs/heads/master
|
scrapy/contrib/debug.py
|
144
|
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.debug` is deprecated, "
"use `scrapy.extensions.debug` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.extensions.debug import *
|
iuliat/nova
|
refs/heads/master
|
nova/tests/unit/network/test_api.py
|
37
|
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for network API."""
import contextlib
import itertools
import uuid
import mock
from mox3 import mox
from nova.compute import flavors
from nova import context
from nova import exception
from nova import network
from nova.network import api
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import fields
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_fixed_ip
from nova.tests.unit.objects import test_virtual_interface
FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
fake_info_cache = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'instance_uuid': 'fake-uuid',
'network_info': '[]',
}
class NetworkPolicyTestCase(test.TestCase):
def setUp(self):
super(NetworkPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(NetworkPolicyTestCase, self).tearDown()
policy.reset()
def test_check_policy(self):
self.mox.StubOutWithMock(policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
policy.enforce(self.context, 'network:get_all', target)
self.mox.ReplayAll()
api.check_policy(self.context, 'get_all')
def test_skip_policy(self):
policy.reset()
rules = {'network:get_all': common_policy.parse_rule('!')}
policy.set_rules(common_policy.Rules(rules))
api = network.API()
self.assertRaises(exception.PolicyNotAuthorized,
api.get_all, self.context)
api = network.API(skip_policy_check=True)
api.get_all(self.context)
class ApiTestCase(test.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.network_api = network.API()
self.context = context.RequestContext('fake-user',
'fake-project')
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all(self, mock_get_all):
mock_get_all.return_value = mock.sentinel.get_all
self.assertEqual(mock.sentinel.get_all,
self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all_liberal(self, mock_get_all):
self.flags(network_manager='nova.network.manager.FlatDHCPManaager')
mock_get_all.return_value = mock.sentinel.get_all
self.assertEqual(mock.sentinel.get_all,
self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only="allow_none")
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all_no_networks(self, mock_get_all):
mock_get_all.side_effect = exception.NoNetworksFound
self.assertEqual([], self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.Network.get_by_uuid')
def test_get(self, mock_get):
mock_get.return_value = mock.sentinel.get_by_uuid
self.assertEqual(mock.sentinel.get_by_uuid,
self.network_api.get(self.context, 'fake-uuid'))
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.virtual_interface_get_by_instance')
def test_get_vifs_by_instance(self, mock_get_by_instance,
mock_get_by_id):
mock_get_by_instance.return_value = [
dict(test_virtual_interface.fake_vif,
network_id=123)]
mock_get_by_id.return_value = objects.Network()
mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid
instance = objects.Instance(uuid=mock.sentinel.inst_uuid)
vifs = self.network_api.get_vifs_by_instance(self.context,
instance)
self.assertEqual(1, len(vifs))
self.assertEqual(123, vifs[0].network_id)
self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid)
mock_get_by_instance.assert_called_once_with(
self.context, str(mock.sentinel.inst_uuid), use_slave=False)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.virtual_interface_get_by_address')
def test_get_vif_by_mac_address(self, mock_get_by_address,
mock_get_by_id):
mock_get_by_address.return_value = dict(
test_virtual_interface.fake_vif, network_id=123)
mock_get_by_id.return_value = objects.Network(
uuid=mock.sentinel.network_uuid)
vif = self.network_api.get_vif_by_mac_address(self.context,
mock.sentinel.mac)
self.assertEqual(123, vif.network_id)
self.assertEqual(str(mock.sentinel.network_uuid), vif.net_uuid)
mock_get_by_address.assert_called_once_with(self.context,
mock.sentinel.mac)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
def test_allocate_for_instance_handles_macs_passed(self):
# If a macs argument is supplied to the 'nova-network' API, it is just
# ignored. This test checks that the call down to the rpcapi layer
# doesn't pass macs down: nova-network doesn't support hypervisor
# mac address limits (today anyhow).
macs = set(['ab:cd:ef:01:23:34'])
self.mox.StubOutWithMock(
self.network_api.network_rpcapi, "allocate_for_instance")
kwargs = dict(zip(['host', 'instance_id', 'project_id',
'requested_networks', 'rxtx_factor', 'vpn', 'macs',
'dhcp_options'],
itertools.repeat(mox.IgnoreArg())))
self.network_api.network_rpcapi.allocate_for_instance(
mox.IgnoreArg(), **kwargs).AndReturn([])
self.mox.ReplayAll()
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 0
instance = objects.Instance(id=1, uuid='uuid', project_id='project_id',
host='host', system_metadata={},
flavor=flavor)
self.network_api.allocate_for_instance(
self.context, instance, 'vpn', 'requested_networks', macs=macs)
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic."""
new_instance = objects.Instance(uuid=FAKE_UUID)
def fake_associate(*args, **kwargs):
return orig_instance_uuid
self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip',
fake_associate)
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join=None,
use_slave=None):
return fake_instance.fake_db_instance(uuid=instance_uuid)
self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
def fake_get_nw_info(ctxt, instance):
class FakeNWInfo(object):
def json(self):
pass
return FakeNWInfo()
self.stubs.Set(self.network_api, '_get_instance_nw_info',
fake_get_nw_info)
if orig_instance_uuid:
expected_updated_instances = [new_instance.uuid,
orig_instance_uuid]
else:
expected_updated_instances = [new_instance.uuid]
def fake_instance_info_cache_update(context, instance_uuid, cache):
self.assertEqual(instance_uuid,
expected_updated_instances.pop())
return fake_info_cache
self.stubs.Set(self.network_api.db, 'instance_info_cache_update',
fake_instance_info_cache_update)
def fake_update_instance_cache_with_nw_info(api, context, instance,
nw_info=None,
update_cells=True):
return
self.stubs.Set(base_api, "update_instance_cache_with_nw_info",
fake_update_instance_cache_with_nw_info)
self.network_api.associate_floating_ip(self.context,
new_instance,
'172.24.4.225',
'10.0.0.2')
def test_associate_preassociated_floating_ip(self):
self._do_test_associate_floating_ip('orig-uuid')
def test_associate_unassociated_floating_ip(self):
self._do_test_associate_floating_ip(None)
def test_get_floating_ip_invalid_id(self):
self.assertRaises(exception.InvalidID,
self.network_api.get_floating_ip,
self.context, '123zzz')
@mock.patch('nova.objects.FloatingIP.get_by_id')
def test_get_floating_ip(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip(self.context, 123))
mock_get.assert_called_once_with(self.context, 123)
@mock.patch('nova.objects.FloatingIP.get_pool_names')
def test_get_floating_ip_pools(self, mock_get):
pools = ['foo', 'bar']
mock_get.return_value = pools
self.assertEqual(pools,
self.network_api.get_floating_ip_pools(
self.context))
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip_by_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context,
mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
floatings = mock.sentinel.floating_ips
mock_get.return_value = floatings
self.assertEqual(floatings,
self.network_api.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context,
self.context.project_id)
def _stub_migrate_instance_calls(self, method, multi_host, info):
fake_flavor = flavors.get_default_flavor()
fake_flavor['rxtx_factor'] = 1.21
fake_instance = objects.Instance(
uuid=uuid.uuid4().hex,
project_id='fake_project_id',
instance_type_id=fake_flavor['id'],
flavor=fake_flavor,
system_metadata={})
fake_migration = {'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest'}
def fake_mig_inst_method(*args, **kwargs):
info['kwargs'] = kwargs
def fake_get_multi_addresses(*args, **kwargs):
return multi_host, ['fake_float1', 'fake_float2']
self.stubs.Set(network_rpcapi.NetworkAPI, method,
fake_mig_inst_method)
self.stubs.Set(self.network_api, '_get_multi_addresses',
fake_get_multi_addresses)
expected = {'instance_uuid': fake_instance.uuid,
'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest',
'rxtx_factor': 1.21,
'project_id': 'fake_project_id',
'floating_addresses': None}
if multi_host:
expected['floating_addresses'] = ['fake_float1', 'fake_float2']
return fake_instance, fake_migration, expected
def test_migrate_instance_start_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', True, info)
expected['host'] = 'fake_compute_source'
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_start_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', False, info)
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', True, info)
expected['host'] = 'fake_compute_dest'
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', False, info)
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_is_multi_host_instance_has_no_fixed_ip(self):
def fake_fixed_ip_get_by_instance(ctxt, uuid):
raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid)
self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
fake_fixed_ip_get_by_instance)
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertFalse(result)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def _test_is_multi_host_network_has_no_project_id(self, is_multi_host,
fip_get):
network = objects.Network(
id=123, project_id=None,
multi_host=is_multi_host)
fip_get.return_value = [
objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
floating_ips=objects.FloatingIPList())]
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_no_project_id_multi(self):
self._test_is_multi_host_network_has_no_project_id(True)
def test_is_multi_host_network_has_no_project_id_non_multi(self):
self._test_is_multi_host_network_has_no_project_id(False)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def _test_is_multi_host_network_has_project_id(self, is_multi_host,
fip_get):
network = objects.Network(
id=123, project_id=self.context.project_id,
multi_host=is_multi_host)
fip_get.return_value = [
objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
floating_ips=objects.FloatingIPList())]
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_project_id_multi(self):
self._test_is_multi_host_network_has_project_id(True)
def test_is_multi_host_network_has_project_id_non_multi(self):
self._test_is_multi_host_network_has_project_id(False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_project(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, project=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=False, project=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_host(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, host=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=True, project=False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.associate')
def test_network_associate_project(self, mock_associate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
project = mock.sentinel.project
self.network_api.associate(self.context, FAKE_UUID, project=project)
mock_associate.assert_called_once_with(self.context, project,
network_id=net_obj.id,
force=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.save')
def test_network_associate_host(self, mock_save, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
host = str(mock.sentinel.host)
self.network_api.associate(self.context, FAKE_UUID, host=host)
mock_save.assert_called_once_with()
self.assertEqual(host, net_obj.host)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate(self, mock_disassociate, mock_get):
mock_get.return_value = objects.Network(context=self.context, id=123)
self.network_api.disassociate(self.context, FAKE_UUID)
mock_disassociate.assert_called_once_with(self.context, 123,
project=True, host=True)
def _test_refresh_cache(self, method, *args, **kwargs):
# This test verifies that no call to get_instance_nw_info() is made
# from the @refresh_cache decorator for the tested method.
with contextlib.nested(
mock.patch.object(self.network_api.network_rpcapi, method),
mock.patch.object(self.network_api.network_rpcapi,
'get_instance_nw_info'),
mock.patch.object(network_model.NetworkInfo, 'hydrate'),
mock.patch.object(objects.InstanceInfoCache, 'save'),
) as (
method_mock, nwinfo_mock, hydrate_mock, save_mock
):
nw_info = network_model.NetworkInfo([])
method_mock.return_value = nw_info
hydrate_mock.return_value = nw_info
getattr(self.network_api, method)(*args, **kwargs)
hydrate_mock.assert_called_once_with(nw_info)
self.assertFalse(nwinfo_mock.called)
def test_allocate_for_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
vpn = 'fake-vpn'
requested_networks = 'fake-networks'
self._test_refresh_cache('allocate_for_instance', self.context,
instance, vpn, requested_networks)
def test_add_fixed_ip_to_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
network_id = 'fake-network-id'
self._test_refresh_cache('add_fixed_ip_to_instance', self.context,
instance, network_id)
def test_remove_fixed_ip_from_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
address = 'fake-address'
self._test_refresh_cache('remove_fixed_ip_from_instance', self.context,
instance, address)
@mock.patch('nova.db.fixed_ip_get_by_address')
def test_get_fixed_ip_by_address(self, fip_get):
fip_get.return_value = test_fixed_ip.fake_fixed_ip
fip = self.network_api.get_fixed_ip_by_address(self.context,
'fake-addr')
self.assertIsInstance(fip, objects.FixedIP)
@mock.patch('nova.objects.FixedIP.get_by_id')
def test_get_fixed_ip(self, mock_get_by_id):
mock_get_by_id.return_value = mock.sentinel.fixed_ip
self.assertEqual(mock.sentinel.fixed_ip,
self.network_api.get_fixed_ip(self.context,
mock.sentinel.id))
mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address(self, mock_get_by_floating):
mock_get_by_floating.return_value = objects.FixedIP(
instance_uuid = mock.sentinel.instance_uuid)
self.assertEqual(str(mock.sentinel.instance_uuid),
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address_none(self, mock_get_by_floating):
mock_get_by_floating.return_value = None
self.assertIsNone(
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.network.api.API.migrate_instance_start')
def test_cleanup_instance_network_on_host(self, fake_migrate_start):
instance = fake_instance.fake_instance_obj(self.context)
self.network_api.cleanup_instance_network_on_host(
self.context, instance, 'fake_compute_source')
fake_migrate_start.assert_called_once_with(
self.context, instance,
{'source_compute': 'fake_compute_source', 'dest_compute': None})
@mock.patch('nova.network.api.API.migrate_instance_finish')
def test_setup_instance_network_on_host(self, fake_migrate_finish):
instance = fake_instance.fake_instance_obj(self.context)
self.network_api.setup_instance_network_on_host(
self.context, instance, 'fake_compute_source')
fake_migrate_finish.assert_called_once_with(
self.context, instance,
{'source_compute': None, 'dest_compute': 'fake_compute_source'})
@mock.patch('oslo_concurrency.lockutils.lock')
@mock.patch.object(api.API, '_get_instance_nw_info')
@mock.patch('nova.network.base_api.update_instance_cache_with_nw_info')
def test_get_instance_nw_info(self, mock_update, mock_get, mock_lock):
fake_result = mock.sentinel.get_nw_info_result
mock_get.return_value = fake_result
instance = fake_instance.fake_instance_obj(self.context)
result = self.network_api.get_instance_nw_info(self.context, instance)
mock_get.assert_called_once_with(self.context, instance)
mock_update.assert_called_once_with(self.network_api, self.context,
instance, nw_info=fake_result,
update_cells=False)
self.assertEqual(fake_result, result)
@mock.patch('nova.network.api.API')
@mock.patch('nova.db.instance_info_cache_update', return_value=fake_info_cache)
class TestUpdateInstanceCache(test.NoDBTestCase):
def setUp(self):
super(TestUpdateInstanceCache, self).setUp()
self.context = context.get_admin_context()
self.instance = objects.Instance(uuid=FAKE_UUID)
vifs = [network_model.VIF(id='super_vif')]
self.nw_info = network_model.NetworkInfo(vifs)
self.nw_json = fields.NetworkModel.to_primitive(self, 'network_info',
self.nw_info)
def test_update_nw_info_none(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, None)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
def test_update_nw_info_one_network(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, self.nw_info)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
def test_update_nw_info_empty_list(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance,
network_model.NetworkInfo([]))
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': '[]'})
def test_decorator_return_object(self, db_mock, api_mock):
@base_api.refresh_cache
def func(self, context, instance):
return network_model.NetworkInfo([])
func(api_mock, self.context, self.instance)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': '[]'})
def test_decorator_return_none(self, db_mock, api_mock):
@base_api.refresh_cache
def func(self, context, instance):
pass
api_mock._get_instance_nw_info.return_value = self.nw_info
func(api_mock, self.context, self.instance)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
class NetworkHooksTestCase(test.BaseHookTestCase):
def test_instance_network_info_hook(self):
info_func = base_api.update_instance_cache_with_nw_info
self.assert_has_hook('instance_network_info', info_func)
|
czXiaoxi/oh-my-zsh
|
refs/heads/master
|
plugins/git-prompt/gitstatus.py
|
343
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from subprocess import Popen, PIPE
import re
# change those symbols to whatever you prefer
symbols = {
'ahead of': '↑',
'behind': '↓',
'staged': '♦',
'changed': '‣',
'untracked': '…',
'clean': '⚡',
'unmerged': '≠',
'sha1': ':'
}
output, error = Popen(
['git', 'status'], stdout=PIPE, stderr=PIPE, universal_newlines=True).communicate()
if error:
import sys
sys.exit(0)
lines = output.splitlines()
behead_re = re.compile(
r"^# Your branch is (ahead of|behind) '(.*)' by (\d+) commit")
diverge_re = re.compile(r"^# and have (\d+) and (\d+) different")
status = ''
staged = re.compile(r'^# Changes to be committed:$', re.MULTILINE)
changed = re.compile(r'^# Changed but not updated:$', re.MULTILINE)
untracked = re.compile(r'^# Untracked files:$', re.MULTILINE)
unmerged = re.compile(r'^# Unmerged paths:$', re.MULTILINE)
def execute(*command):
out, err = Popen(stdout=PIPE, stderr=PIPE, *command).communicate()
if not err:
nb = len(out.splitlines())
else:
nb = '?'
return nb
if staged.search(output):
nb = execute(
['git', 'diff', '--staged', '--name-only', '--diff-filter=ACDMRT'])
status += '%s%s' % (symbols['staged'], nb)
if unmerged.search(output):
nb = execute(['git', 'diff', '--staged', '--name-only', '--diff-filter=U'])
status += '%s%s' % (symbols['unmerged'], nb)
if changed.search(output):
nb = execute(['git', 'diff', '--name-only', '--diff-filter=ACDMRT'])
status += '%s%s' % (symbols['changed'], nb)
if untracked.search(output):
status += symbols['untracked']
if status == '':
status = symbols['clean']
remote = ''
bline = lines[0]
if bline.find('Not currently on any branch') != -1:
branch = symbols['sha1'] + Popen([
'git',
'rev-parse',
'--short',
'HEAD'], stdout=PIPE).communicate()[0][:-1]
else:
branch = bline.split(' ')[-1]
bstatusline = lines[1]
match = behead_re.match(bstatusline)
if match:
remote = symbols[match.groups()[0]]
remote += match.groups()[2]
elif lines[2:]:
div_match = diverge_re.match(lines[2])
if div_match:
remote = "{behind}{1}{ahead of}{0}".format(
*div_match.groups(), **symbols)
print('\n'.join([branch, remote, status]))
|
maxim5/hyper-engine
|
refs/heads/master
|
hyperengine/spec/sugar.py
|
1
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
import math
from scipy import stats
from .nodes import *
def wrap(node, transform):
if transform is not None:
return MergeNode(transform, node)
return node
def uniform(start=0.0, end=1.0, transform=None, name=None):
node = UniformNode(start, end).with_name(name)
return wrap(node, transform)
def normal(mean=0.0, stdev=1.0, name=None):
return NonUniformNode(ppf=stats.norm.ppf, loc=mean, scale=stdev).with_name(name)
def choice(array, transform=None, name=None):
if not [item for item in array if isinstance(item, BaseNode)]:
node = ChoiceNode(*array).with_name(name)
else:
node = MergeChoiceNode(*array).with_name(name)
return wrap(node, transform)
def merge(nodes, function, name=None):
if callable(nodes) and not callable(function):
nodes, function = function, nodes
if isinstance(nodes, BaseNode):
nodes = [nodes]
return MergeNode(function, *nodes).with_name(name)
def random_bit():
return choice([0, 1])
def random_bool():
return choice([False, True])
def random_int(n):
return choice(range(n))
def exp(node): return merge([node], math.exp)
def expm1(node): return merge([node], math.expm1)
def frexp(node): return merge([node], math.frexp)
def ldexp(node, i): return merge([node], lambda x: math.ldexp(x, i))
def sqrt(node): return merge([node], math.sqrt)
def pow(a, b): return a ** b
def log(node, base=None): return merge([node], lambda x: math.log(x, base))
def log1p(node): return merge([node], math.log1p)
def log10(node): return merge([node], math.log10)
def sin(node): return merge([node], math.sin)
def cos(node): return merge([node], math.cos)
def tan(node): return merge([node], math.tan)
def sinh(node): return merge([node], math.sinh)
def cosh(node): return merge([node], math.cosh)
def tanh(node): return merge([node], math.tanh)
def asin(node): return merge([node], math.asin)
def acos(node): return merge([node], math.acos)
def atan(node): return merge([node], math.atan)
def atan2(node): return merge([node], math.atan2)
def asinh(node): return merge([node], math.asinh)
def acosh(node): return merge([node], math.acosh)
def atanh(node): return merge([node], math.atanh)
def min_(*array):
nodes = [item for item in array if isinstance(item, BaseNode)]
if len(nodes) == 0:
return min(*array) if len(array) > 1 else array[0]
node = merge(nodes, min) if len(nodes) > 1 else nodes[0]
rest = [item for item in array if not isinstance(item, BaseNode)]
if rest:
node = merge([node], lambda x: min(x, *rest))
return node
def max_(*array):
nodes = [item for item in array if isinstance(item, BaseNode)]
if len(nodes) == 0:
return max(*array) if len(array) > 1 else array[0]
node = merge(nodes, max) if len(nodes) > 1 else nodes[0]
rest = [item for item in array if not isinstance(item, BaseNode)]
if rest:
node = merge([node], lambda x: max(x, *rest))
return node
def new(*args, **kwargs):
from ..base import NamedDict
if len(args) == 1 and len(kwargs) == 0:
return NamedDict(args[0])
assert len(args) == 0, 'Failed to created a NamedDict with arguments: %s' % str(args)
return NamedDict(kwargs)
|
rogerscristo/BotFWD
|
refs/heads/master
|
env/lib/python3.6/site-packages/telegram/inline/inputcontactmessagecontent.py
|
1
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram InputContactMessageContent."""
from telegram import InputMessageContent
class InputContactMessageContent(InputMessageContent):
"""Represents the content of a contact message to be sent as the result of an inline query.
Attributes:
phone_number (:obj:`str`): Contact's phone number.
first_name (:obj:`str`): Contact's first name.
last_name (:obj:`str`): Optional. Contact's last name.
Args:
phone_number (:obj:`str`): Contact's phone number.
first_name (:obj:`str`): Contact's first name.
last_name (:obj:`str`, optional): Contact's last name.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self, phone_number, first_name, last_name=None, **kwargs):
# Required
self.phone_number = phone_number
self.first_name = first_name
# Optionals
self.last_name = last_name
|
adlnet/ADL_LRS
|
refs/heads/master
|
lrs/managers/AgentProfileManager.py
|
2
|
import json
import datetime
from django.core.files.base import ContentFile
from django.core.exceptions import ValidationError
from django.utils.timezone import utc
from ..models import AgentProfile
from ..exceptions import IDNotFoundError, ParamError
from ..utils import etag
class AgentProfileManager():
def __init__(self, agent):
self.Agent = agent
def save_non_json_profile(self, p, profile, request_dict):
p.content_type = request_dict['headers']['CONTENT_TYPE']
p.etag = etag.create_tag(profile.read())
if 'updated' in request_dict['headers'] and request_dict['headers']['updated']:
p.updated = request_dict['headers']['updated']
else:
p.updated = datetime.datetime.utcnow().replace(tzinfo=utc)
# Go to beginning of file
profile.seek(0)
fn = "%s_%s" % (p.agent_id, request_dict.get('filename', p.id))
p.profile.save(fn, profile)
p.save()
def post_profile(self, request_dict):
# get/create profile
p, created = AgentProfile.objects.get_or_create(
profile_id=request_dict['params']['profileId'], agent=self.Agent)
post_profile = request_dict['profile']
# If incoming profile is application/json and if a profile didn't
# already exist with the same agent and profileId
if created:
p.json_profile = post_profile
p.content_type = "application/json"
p.etag = etag.create_tag(post_profile)
# If incoming profile is application/json and if a profile already
# existed with the same agent and profileId
else:
orig_prof = json.loads(p.json_profile)
post_profile = json.loads(post_profile)
merged = json.dumps(
dict(orig_prof.items() + post_profile.items()))
p.json_profile = merged
p.etag = etag.create_tag(merged)
# Set updated
if 'updated' in request_dict['headers'] and request_dict['headers']['updated']:
p.updated = request_dict['headers']['updated']
else:
p.updated = datetime.datetime.utcnow().replace(tzinfo=utc)
p.save()
def put_profile(self, request_dict):
# get/create profile
p, created = AgentProfile.objects.get_or_create(
profile_id=request_dict['params']['profileId'], agent=self.Agent)
# Profile being PUT is not json
if "application/json" not in request_dict['headers']['CONTENT_TYPE']:
try:
profile = ContentFile(request_dict['profile'].read())
except:
try:
profile = ContentFile(request_dict['profile'])
except:
profile = ContentFile(str(request_dict['profile']))
etag.check_preconditions(request_dict, p, created)
# If it already exists delete it
if p.profile:
try:
p.profile.delete()
except OSError:
# probably was json before
p.json_profile = {}
self.save_non_json_profile(p, profile, request_dict)
# Profile being PUT is json
else:
# (overwrite existing profile data)
etag.check_preconditions(request_dict, p, created)
the_profile = request_dict['profile']
p.json_profile = the_profile
p.content_type = request_dict['headers']['CONTENT_TYPE']
p.etag = etag.create_tag(the_profile)
# Set updated
if 'updated' in request_dict['headers'] and request_dict['headers']['updated']:
p.updated = request_dict['headers']['updated']
else:
p.updated = datetime.datetime.utcnow().replace(tzinfo=utc)
p.save()
def get_profile(self, profile_id):
try:
return self.Agent.agentprofile_set.get(profile_id=profile_id)
except:
err_msg = 'There is no agent profile associated with the id: %s' % profile_id
raise IDNotFoundError(err_msg)
def get_profile_ids(self, since=None):
ids = []
if since:
try:
# this expects iso6801 date/time format
# "2013-02-15T12:00:00+00:00"
profs = self.Agent.agentprofile_set.filter(updated__gt=since)
except ValidationError:
err_msg = 'Since field is not in correct format for retrieval of agent profiles'
raise ParamError(err_msg)
ids = [p.profile_id for p in profs]
else:
ids = self.Agent.agentprofile_set.values_list(
'profile_id', flat=True)
return ids
def delete_profile(self, profile_id):
try:
self.get_profile(profile_id).delete()
# we don't want it anyway
except AgentProfile.DoesNotExist:
pass
except IDNotFoundError:
pass
|
DISKonnectd/phantom_apps
|
refs/heads/master
|
sharepoint/dependencies/lxml/html/soupparser.py
|
8
|
"""External interface to the BeautifulSoup HTML parser.
"""
__all__ = ["fromstring", "parse", "convert_tree"]
import re
from lxml import etree, html
try:
from bs4 import (
BeautifulSoup, Tag, Comment, ProcessingInstruction, NavigableString,
Declaration, Doctype)
_DECLARATION_OR_DOCTYPE = (Declaration, Doctype)
except ImportError:
from BeautifulSoup import (
BeautifulSoup, Tag, Comment, ProcessingInstruction, NavigableString,
Declaration)
_DECLARATION_OR_DOCTYPE = Declaration
def fromstring(data, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a string of HTML data into an Element tree using the
BeautifulSoup parser.
Returns the root ``<html>`` Element of the tree.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
return _parse(data, beautifulsoup, makeelement, **bsargs)
def parse(file, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a file into an ElemenTree using the BeautifulSoup parser.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
if not hasattr(file, 'read'):
file = open(file)
root = _parse(file, beautifulsoup, makeelement, **bsargs)
return etree.ElementTree(root)
def convert_tree(beautiful_soup_tree, makeelement=None):
"""Convert a BeautifulSoup tree to a list of Element trees.
Returns a list instead of a single root Element to support
HTML-like soup with more than one root element.
You can pass a different Element factory through the `makeelement`
keyword.
"""
root = _convert_tree(beautiful_soup_tree, makeelement)
children = root.getchildren()
for child in children:
root.remove(child)
return children
# helpers
def _parse(source, beautifulsoup, makeelement, **bsargs):
if beautifulsoup is None:
beautifulsoup = BeautifulSoup
if hasattr(beautifulsoup, "HTML_ENTITIES"): # bs3
if 'convertEntities' not in bsargs:
bsargs['convertEntities'] = 'html'
if hasattr(beautifulsoup, "DEFAULT_BUILDER_FEATURES"): # bs4
if 'features' not in bsargs:
bsargs['features'] = ['html.parser'] # use Python html parser
tree = beautifulsoup(source, **bsargs)
root = _convert_tree(tree, makeelement)
# from ET: wrap the document in a html root element, if necessary
if len(root) == 1 and root[0].tag == "html":
return root[0]
root.tag = "html"
return root
_parse_doctype_declaration = re.compile(
r'(?:\s|[<!])*DOCTYPE\s*HTML'
r'(?:\s+PUBLIC)?(?:\s+(\'[^\']*\'|"[^"]*"))?'
r'(?:\s+(\'[^\']*\'|"[^"]*"))?',
re.IGNORECASE).match
class _PseudoTag:
# Minimal imitation of BeautifulSoup.Tag
def __init__(self, contents):
self.name = 'html'
self.attrs = []
self.contents = contents
def __iter__(self):
return self.contents.__iter__()
def _convert_tree(beautiful_soup_tree, makeelement):
if makeelement is None:
makeelement = html.html_parser.makeelement
# Split the tree into three parts:
# i) everything before the root element: document type
# declaration, comments, processing instructions, whitespace
# ii) the root(s),
# iii) everything after the root: comments, processing
# instructions, whitespace
first_element_idx = last_element_idx = None
html_root = declaration = None
for i, e in enumerate(beautiful_soup_tree):
if isinstance(e, Tag):
if first_element_idx is None:
first_element_idx = i
last_element_idx = i
if html_root is None and e.name and e.name.lower() == 'html':
html_root = e
elif declaration is None and isinstance(e, _DECLARATION_OR_DOCTYPE):
declaration = e
# For a nice, well-formatted document, the variable roots below is
# a list consisting of a single <html> element. However, the document
# may be a soup like '<meta><head><title>Hello</head><body>Hi
# all<\p>'. In this example roots is a list containing meta, head
# and body elements.
pre_root = beautiful_soup_tree.contents[:first_element_idx]
roots = beautiful_soup_tree.contents[first_element_idx:last_element_idx+1]
post_root = beautiful_soup_tree.contents[last_element_idx+1:]
# Reorganize so that there is one <html> root...
if html_root is not None:
# ... use existing one if possible, ...
i = roots.index(html_root)
html_root.contents = roots[:i] + html_root.contents + roots[i+1:]
else:
# ... otherwise create a new one.
html_root = _PseudoTag(roots)
convert_node = _init_node_converters(makeelement)
# Process pre_root
res_root = convert_node(html_root)
prev = res_root
for e in reversed(pre_root):
converted = convert_node(e)
if converted is not None:
prev.addprevious(converted)
prev = converted
# ditto for post_root
prev = res_root
for e in post_root:
converted = convert_node(e)
if converted is not None:
prev.addnext(converted)
prev = converted
if declaration is not None:
try:
# bs4 provides full Doctype string
doctype_string = declaration.output_ready()
except AttributeError:
doctype_string = declaration.string
match = _parse_doctype_declaration(doctype_string)
if not match:
# Something is wrong if we end up in here. Since soupparser should
# tolerate errors, do not raise Exception, just let it pass.
pass
else:
external_id, sys_uri = match.groups()
docinfo = res_root.getroottree().docinfo
# strip quotes and update DOCTYPE values (any of None, '', '...')
docinfo.public_id = external_id and external_id[1:-1]
docinfo.system_url = sys_uri and sys_uri[1:-1]
return res_root
def _init_node_converters(makeelement):
converters = {}
ordered_node_types = []
def converter(*types):
def add(handler):
for t in types:
converters[t] = handler
ordered_node_types.append(t)
return handler
return add
def find_best_converter(node):
for t in ordered_node_types:
if isinstance(node, t):
return converters[t]
return None
def convert_node(bs_node, parent=None):
# duplicated in convert_tag() below
try:
handler = converters[type(bs_node)]
except KeyError:
handler = converters[type(bs_node)] = find_best_converter(bs_node)
if handler is None:
return None
return handler(bs_node, parent)
def map_attrs(bs_attrs):
if isinstance(bs_attrs, dict): # bs4
attribs = {}
for k, v in bs_attrs.items():
if isinstance(v, list):
v = " ".join(v)
attribs[k] = unescape(v)
else:
attribs = dict((k, unescape(v)) for k, v in bs_attrs)
return attribs
def append_text(parent, text):
if len(parent) == 0:
parent.text = (parent.text or '') + text
else:
parent[-1].tail = (parent[-1].tail or '') + text
# converters are tried in order of their definition
@converter(Tag, _PseudoTag)
def convert_tag(bs_node, parent):
attrs = bs_node.attrs
if parent is not None:
attribs = map_attrs(attrs) if attrs else None
res = etree.SubElement(parent, bs_node.name, attrib=attribs)
else:
attribs = map_attrs(attrs) if attrs else {}
res = makeelement(bs_node.name, attrib=attribs)
for child in bs_node:
# avoid double recursion by inlining convert_node(), see above
try:
handler = converters[type(child)]
except KeyError:
pass
else:
if handler is not None:
handler(child, res)
continue
convert_node(child, res)
return res
@converter(Comment)
def convert_comment(bs_node, parent):
res = etree.Comment(bs_node)
if parent is not None:
parent.append(res)
return res
@converter(ProcessingInstruction)
def convert_pi(bs_node, parent):
if bs_node.endswith('?'):
# The PI is of XML style (<?as df?>) but BeautifulSoup
# interpreted it as being SGML style (<?as df>). Fix.
bs_node = bs_node[:-1]
res = etree.ProcessingInstruction(*bs_node.split(' ', 1))
if parent is not None:
parent.append(res)
return res
@converter(NavigableString)
def convert_text(bs_node, parent):
if parent is not None:
append_text(parent, unescape(bs_node))
return None
return convert_node
# copied from ET's ElementSoup
try:
from html.entities import name2codepoint # Python 3
except ImportError:
from htmlentitydefs import name2codepoint
handle_entities = re.compile(r"&(\w+);").sub
try:
unichr
except NameError:
# Python 3
unichr = chr
def unescape(string):
if not string:
return ''
# work around oddities in BeautifulSoup's entity handling
def unescape_entity(m):
try:
return unichr(name2codepoint[m.group(1)])
except KeyError:
return m.group(0) # use as is
return handle_entities(unescape_entity, string)
|
puzan/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_tempfile.py
|
15
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2017 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = r'''
---
module: win_tempfile
version_added: "2.3"
author: Dag Wieers (@dagwieers)
short_description: Creates temporary files and directories.
description:
- Creates temporary files and directories.
options:
state:
description:
- Whether to create file or directory.
choices: [ file, directory ]
default: file
path:
description:
- Location where temporary file or directory should be created.
- If path is not specified default system temporary directory (%TEMP%) will be used.
default: '%TEMP%'
prefix:
description:
- Prefix of file/directory name created by module.
default: ansible.
suffix:
description:
- Suffix of file/directory name created by module.
default: ''
'''
EXAMPLES = r"""
- name: Create temporary build directory
win_tempfile:
state: directory
suffix: build
- name: Create temporary file
win_tempfile:
state: file
suffix: temp
"""
RETURN = r'''
path:
description: Path to created file or directory
returned: success
type: string
sample: C:\Users\Administrator\AppData\Local\Temp\ansible.bMlvdk
'''
|
mluo613/osf.io
|
refs/heads/develop
|
website/conferences/views.py
|
3
|
# -*- coding: utf-8 -*-
import httplib
import logging
from django.db import transaction
from django.utils import timezone
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth import get_or_create_user
from framework.exceptions import HTTPError
from framework.flask import redirect
from framework.transactions.handlers import no_auto_transaction
from website import settings
from website.models import Node, Tag
from website.util import web_url_for
from website.mails import send_mail
from website.files.models import StoredFileNode
from website.mails import CONFERENCE_SUBMITTED, CONFERENCE_INACTIVE, CONFERENCE_FAILED
from website.conferences import utils, signals
from website.conferences.message import ConferenceMessage, ConferenceError
from website.conferences.model import Conference
logger = logging.getLogger(__name__)
@no_auto_transaction
def meeting_hook():
"""View function for email conference submission.
"""
message = ConferenceMessage()
try:
message.verify()
except ConferenceError as error:
logger.error(error)
raise HTTPError(httplib.NOT_ACCEPTABLE)
try:
conference = Conference.get_by_endpoint(message.conference_name, active=False)
except ConferenceError as error:
logger.error(error)
raise HTTPError(httplib.NOT_ACCEPTABLE)
if not conference.active:
send_mail(
message.sender_email,
CONFERENCE_INACTIVE,
fullname=message.sender_display,
presentations_url=web_url_for('conference_view', _absolute=True),
)
raise HTTPError(httplib.NOT_ACCEPTABLE)
add_poster_by_email(conference=conference, message=message)
def add_poster_by_email(conference, message):
"""
:param Conference conference:
:param ConferenceMessage message:
"""
# Fail if no attachments
if not message.attachments:
return send_mail(
message.sender_email,
CONFERENCE_FAILED,
fullname=message.sender_display,
)
nodes_created = []
users_created = []
with transaction.atomic():
user, user_created = get_or_create_user(
message.sender_display,
message.sender_email,
is_spam=message.is_spam,
)
if user_created:
user.save() # need to save in order to access m2m fields (e.g. tags)
users_created.append(user)
user.add_system_tag('osf4m')
user.date_last_login = timezone.now()
user.save()
# must save the user first before accessing user._id
set_password_url = web_url_for(
'reset_password_get',
uid=user._id,
token=user.verification_key_v2['token'],
_absolute=True,
)
else:
set_password_url = None
node, node_created = utils.get_or_create_node(message.subject, user)
if node_created:
nodes_created.append(node)
node.add_system_tag('osf4m')
node.save()
utils.provision_node(conference, message, node, user)
utils.record_message(message, nodes_created, users_created)
# Prevent circular import error
from framework.auth import signals as auth_signals
if user_created:
auth_signals.user_confirmed.send(user)
utils.upload_attachments(user, node, message.attachments)
download_url = node.web_url_for(
'addon_view_or_download_file',
path=message.attachments[0].filename,
provider='osfstorage',
action='download',
_absolute=True,
)
# Send confirmation email
send_mail(
message.sender_email,
CONFERENCE_SUBMITTED,
conf_full_name=conference.name,
conf_view_url=web_url_for(
'conference_results',
meeting=message.conference_name,
_absolute=True,
),
fullname=message.sender_display,
user_created=user_created,
set_password_url=set_password_url,
profile_url=user.absolute_url,
node_url=node.absolute_url,
file_url=download_url,
presentation_type=message.conference_category.lower(),
is_spam=message.is_spam,
)
if node_created and user_created:
signals.osf4m_user_created.send(user, conference=conference, node=node)
def _render_conference_node(node, idx, conf):
try:
record = next(
x for x in
StoredFileNode.find(
Q('node', 'eq', node) &
Q('is_file', 'eq', True)
).limit(1)
).wrapped()
download_count = record.get_download_count()
download_url = node.web_url_for(
'addon_view_or_download_file',
path=record.path.strip('/'),
provider='osfstorage',
action='download',
_absolute=True,
)
except StopIteration:
download_url = ''
download_count = 0
author = node.visible_contributors[0]
tags = list(node.tags.values_list('name', flat=True))
return {
'id': idx,
'title': node.title,
'nodeUrl': node.url,
'author': author.family_name if author.family_name else author.fullname,
'authorUrl': node.creator.url,
'category': conf.field_names['submission1'] if conf.field_names['submission1'] in node.system_tags else conf.field_names['submission2'],
'download': download_count,
'downloadUrl': download_url,
'dateCreated': node.date_created.isoformat(),
'confName': conf.name,
'confUrl': web_url_for('conference_results', meeting=conf.endpoint),
'tags': ' '.join(tags)
}
def conference_data(meeting):
try:
conf = Conference.find_one(Q('endpoint', 'iexact', meeting))
except ModularOdmException:
raise HTTPError(httplib.NOT_FOUND)
nodes = Node.find(
Q('tags__name', 'iexact', meeting) &
Q('is_public', 'eq', True) &
Q('is_deleted', 'eq', False)
)
ret = [
_render_conference_node(each, idx, conf)
for idx, each in enumerate(nodes)
]
return ret
def redirect_to_meetings(**kwargs):
return redirect('/meetings/')
def serialize_conference(conf):
return {
'active': conf.active,
'admins': list(conf.admins.all().values_list('guids___id', flat=True)),
'end_date': conf.end_date,
'endpoint': conf.endpoint,
'field_names': conf.field_names,
'info_url': conf.info_url,
'is_meeting': conf.is_meeting,
'location': conf.location,
'logo_url': conf.logo_url,
'name': conf.name,
'num_submissions': conf.num_submissions,
'poster': conf.poster,
'public_projects': conf.public_projects,
'start_date': conf.start_date,
'talk': conf.talk,
}
def conference_results(meeting):
"""Return the data for the grid view for a conference.
:param str meeting: Endpoint name for a conference.
"""
try:
conf = Conference.find_one(Q('endpoint', 'iexact', meeting))
except ModularOdmException:
raise HTTPError(httplib.NOT_FOUND)
data = conference_data(meeting)
return {
'data': data,
'label': meeting,
'meeting': serialize_conference(conf),
# Needed in order to use base.mako namespace
'settings': settings,
}
def conference_submissions(**kwargs):
"""Return data for all OSF4M submissions.
The total number of submissions for each meeting is calculated and cached
in the Conference.num_submissions field.
"""
submissions = []
# TODO: Revisit this loop, there has to be a way to optimize it
for conf in Conference.find():
if (hasattr(conf, 'is_meeting') and (conf.is_meeting is False)):
continue
# For efficiency, we filter by tag first, then node
# instead of doing a single Node query
projects = set()
tags = Tag.find(Q('name', 'iexact', conf.endpoint.lower())).values_list('pk', flat=True)
nodes = Node.find(
Q('tags', 'in', tags) &
Q('is_public', 'eq', True) &
Q('is_deleted', 'ne', True)
)
projects.update(list(nodes))
for idx, node in enumerate(projects):
submissions.append(_render_conference_node(node, idx, conf))
num_submissions = len(projects)
# Cache the number of submissions
conf.num_submissions = num_submissions
conf.save()
if num_submissions < settings.CONFERENCE_MIN_COUNT:
continue
submissions.sort(key=lambda submission: submission['dateCreated'], reverse=True)
return {'submissions': submissions}
def conference_view(**kwargs):
meetings = []
for conf in Conference.find():
if conf.num_submissions < settings.CONFERENCE_MIN_COUNT:
continue
if (hasattr(conf, 'is_meeting') and (conf.is_meeting is False)):
continue
meetings.append({
'name': conf.name,
'location': conf.location,
'end_date': conf.end_date.strftime('%b %d, %Y') if conf.end_date else None,
'start_date': conf.start_date.strftime('%b %d, %Y') if conf.start_date else None,
'url': web_url_for('conference_results', meeting=conf.endpoint),
'count': conf.num_submissions,
})
meetings.sort(key=lambda meeting: meeting['count'], reverse=True)
return {'meetings': meetings}
|
humanlongevity/luigi
|
refs/heads/master
|
luigi/mrrunner.py
|
65
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The hadoop runner.
This module contains the main() method which will be used to run the
mapper and reducer on the Hadoop nodes.
"""
from __future__ import print_function
try:
import cPickle as pickle
except ImportError:
import pickle
import logging
import os
import sys
import tarfile
import traceback
class Runner(object):
"""
Run the mapper or reducer on hadoop nodes.
"""
def __init__(self, job=None):
self.extract_packages_archive()
self.job = job or pickle.load(open("job-instance.pickle", "rb"))
self.job._setup_remote()
def run(self, kind, stdin=sys.stdin, stdout=sys.stdout):
if kind == "map":
self.job.run_mapper(stdin, stdout)
elif kind == "combiner":
self.job.run_combiner(stdin, stdout)
elif kind == "reduce":
self.job.run_reducer(stdin, stdout)
else:
raise Exception('weird command: %s' % kind)
def extract_packages_archive(self):
if not os.path.exists("packages.tar"):
return
tar = tarfile.open("packages.tar")
for tarinfo in tar:
tar.extract(tarinfo)
tar.close()
if '' not in sys.path:
sys.path.insert(0, '')
def print_exception(exc):
tb = traceback.format_exc()
print('luigi-exc-hex=%s' % tb.encode('hex'), file=sys.stderr)
def main(args=None, stdin=sys.stdin, stdout=sys.stdout, print_exception=print_exception):
"""
Run either the mapper or the reducer from the class instance in the file "job-instance.pickle".
Arguments:
kind -- is either map or reduce
"""
try:
# Set up logging.
logging.basicConfig(level=logging.WARN)
kind = args is not None and args[1] or sys.argv[1]
Runner().run(kind, stdin=stdin, stdout=stdout)
except Exception as exc:
# Dump encoded data that we will try to fetch using mechanize
print_exception(exc)
raise
if __name__ == '__main__':
main()
|
schelleg/PYNQ
|
refs/heads/master
|
pynq/lib/logictools/tests/test_waveform.py
|
4
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from copy import deepcopy
import pytest
from pynq.lib.logictools import Waveform
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
correct_data = {'signal': [
['stimulus'],
{},
['analysis']],
'foot': {'tock': 1},
'head': {'text': 'Loopback Test'}}
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
all_pins = [k for k in list(pin_dict.keys())[:interface_width]]
for i in range(interface_width):
wavelane1 = dict()
wavelane2 = dict()
wavelane1['name'] = 'clk{}'.format(i)
wavelane2['name'] = 'clk{}'.format(i)
wavelane1['pin'] = all_pins[i]
wavelane2['pin'] = all_pins[i]
correct_data['signal'][-1].append(wavelane2)
if i % 4 == 0:
wavelane1['wave'] = 'lh' * 64
elif i % 4 == 1:
wavelane1['wave'] = 'l.h.' * 32
elif i % 4 == 2:
wavelane1['wave'] = 'l...h...' * 16
else:
wavelane1['wave'] = 'l.......h.......' * 8
correct_data['signal'][0].append(wavelane1)
def test_waveform_correct():
"""Test for the Waveform class.
Test whether a correct waveform data can be displayed without any
exception.
"""
waveform = Waveform(correct_data,
stimulus_group_name='stimulus',
analysis_group_name='analysis')
waveform.display()
def test_waveform_names():
"""Test for the Waveform class.
Should raise exception when wavelane names are not unique.
"""
exception_raised = False
wrong_data = deepcopy(correct_data)
wrong_data['signal'][0][2]['name'] = wrong_data['signal'][0][1]['name']
try:
waveform = Waveform(wrong_data,
stimulus_group_name='stimulus',
analysis_group_name='analysis')
waveform.display()
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception on duplicated wavelane names.'
def test_waveform_pin_labels1():
"""Test for the Waveform class.
Should raise exception when wavelane pin labels are not unique.
"""
exception_raised = False
wrong_data = deepcopy(correct_data)
wrong_data['signal'][0][2]['pin'] = wrong_data['signal'][0][1]['pin']
try:
waveform = Waveform(wrong_data,
stimulus_group_name='stimulus',
analysis_group_name='analysis')
waveform.display()
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception on duplicated pin labels.'
def test_waveform_pin_labels2():
"""Test for the Waveform class.
Should raise exception when wavelane pin labels are not valid.
"""
exception_raised = False
wrong_data = deepcopy(correct_data)
wrong_data['signal'][0][1]['pin'] = 'INVALID'
try:
waveform = Waveform(wrong_data,
stimulus_group_name='stimulus',
analysis_group_name='analysis')
waveform.display()
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception on invalid pin labels.'
|
fxfitz/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/netscaler/netscaler_cs_action.py
|
72
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_cs_action
short_description: Manage content switching actions
description:
- Manage content switching actions
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
name:
description:
- >-
Name for the content switching action. Must begin with an ASCII alphanumeric or underscore C(_)
character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon
C(:), at sign C(@), equal sign C(=), and hyphen C(-) characters. Can be changed after the content
switching action is created.
targetlbvserver:
description:
- "Name of the load balancing virtual server to which the content is switched."
targetvserver:
description:
- "Name of the VPN virtual server to which the content is switched."
targetvserverexpr:
description:
- "Information about this content switching action."
comment:
description:
- "Comments associated with this cs action."
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
# lb_vserver_1 must have been already created with the netscaler_lb_vserver module
- name: Configure netscaler content switching action
delegate_to: localhost
netscaler_cs_action:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
validate_certs: no
state: present
name: action-1
targetlbvserver: lb_vserver_1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
msg:
description: Message detailing the failure reason
returned: failure
type: string
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dictionary
sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }"
'''
import json
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.cs.csaction import csaction
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log, loglines,
ensure_feature_is_enabled,
get_immutables_intersection
)
def action_exists(client, module):
if csaction.count_filtered(client, 'name:%s' % module.params['name']) > 0:
return True
else:
return False
def action_identical(client, module, csaction_proxy):
if len(diff_list(client, module, csaction_proxy)) == 0:
return True
else:
return False
def diff_list(client, module, csaction_proxy):
action_list = csaction.get_filtered(client, 'name:%s' % module.params['name'])
diff_list = csaction_proxy.diff_object(action_list[0])
if False and 'targetvserverexpr' in diff_list:
json_value = json.loads(action_list[0].targetvserverexpr)
if json_value == module.params['targetvserverexpr']:
del diff_list['targetvserverexpr']
return diff_list
def main():
module_specific_arguments = dict(
name=dict(type='str'),
targetlbvserver=dict(type='str'),
targetvserverexpr=dict(type='str'),
comment=dict(type='str'),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'name',
'targetlbvserver',
'targetvserverexpr',
'comment',
]
readonly_attrs = [
'hits',
'referencecount',
'undefhits',
'builtin',
]
immutable_attrs = [
'name',
'targetvserverexpr',
]
transforms = {
}
# Instantiate config proxy
csaction_proxy = ConfigProxy(
actual=csaction(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
ensure_feature_is_enabled(client, 'CS')
# Apply appropriate state
if module.params['state'] == 'present':
log('Applying actions for state present')
if not action_exists(client, module):
if not module.check_mode:
csaction_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not action_identical(client, module, csaction_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(csaction_proxy, diff_list(client, module, csaction_proxy).keys())
if immutables_changed != []:
module.fail_json(
msg='Cannot update immutable attributes %s' % (immutables_changed,),
diff=diff_list(client, module, csaction_proxy),
**module_result
)
if not module.check_mode:
csaction_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
log('Sanity checks for state present')
if not module.check_mode:
if not action_exists(client, module):
module.fail_json(msg='Content switching action does not exist', **module_result)
if not action_identical(client, module, csaction_proxy):
module.fail_json(
msg='Content switching action differs from configured',
diff=diff_list(client, module, csaction_proxy),
**module_result
)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if action_exists(client, module):
if not module.check_mode:
csaction_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if action_exists(client, module):
module.fail_json(msg='Content switching action still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
|
serviceagility/boto
|
refs/heads/develop
|
boto/glacier/job.py
|
153
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import math
import socket
from boto.glacier.exceptions import TreeHashDoesNotMatchError, \
DownloadArchiveError
from boto.glacier.utils import tree_hash_from_str
class Job(object):
DefaultPartSize = 4 * 1024 * 1024
ResponseDataElements = (('Action', 'action', None),
('ArchiveId', 'archive_id', None),
('ArchiveSizeInBytes', 'archive_size', 0),
('Completed', 'completed', False),
('CompletionDate', 'completion_date', None),
('CreationDate', 'creation_date', None),
('InventorySizeInBytes', 'inventory_size', 0),
('JobDescription', 'description', None),
('JobId', 'id', None),
('SHA256TreeHash', 'sha256_treehash', None),
('SNSTopic', 'sns_topic', None),
('StatusCode', 'status_code', None),
('StatusMessage', 'status_message', None),
('VaultARN', 'arn', None))
def __init__(self, vault, response_data=None):
self.vault = vault
if response_data:
for response_name, attr_name, default in self.ResponseDataElements:
setattr(self, attr_name, response_data[response_name])
else:
for response_name, attr_name, default in self.ResponseDataElements:
setattr(self, attr_name, default)
def __repr__(self):
return 'Job(%s)' % self.arn
def get_output(self, byte_range=None, validate_checksum=False):
"""
This operation downloads the output of the job. Depending on
the job type you specified when you initiated the job, the
output will be either the content of an archive or a vault
inventory.
You can download all the job output or download a portion of
the output by specifying a byte range. In the case of an
archive retrieval job, depending on the byte range you
specify, Amazon Glacier returns the checksum for the portion
of the data. You can compute the checksum on the client and
verify that the values match to ensure the portion you
downloaded is the correct data.
:type byte_range: tuple
:param range: A tuple of integer specifying the slice (in bytes)
of the archive you want to receive
:type validate_checksum: bool
:param validate_checksum: Specify whether or not to validate
the associate tree hash. If the response does not contain
a TreeHash, then no checksum will be verified.
"""
response = self.vault.layer1.get_job_output(self.vault.name,
self.id,
byte_range)
if validate_checksum and 'TreeHash' in response:
data = response.read()
actual_tree_hash = tree_hash_from_str(data)
if response['TreeHash'] != actual_tree_hash:
raise TreeHashDoesNotMatchError(
"The calculated tree hash %s does not match the "
"expected tree hash %s for the byte range %s" % (
actual_tree_hash, response['TreeHash'], byte_range))
return response
def _calc_num_chunks(self, chunk_size):
return int(math.ceil(self.archive_size / float(chunk_size)))
def download_to_file(self, filename, chunk_size=DefaultPartSize,
verify_hashes=True, retry_exceptions=(socket.error,)):
"""Download an archive to a file by name.
:type filename: str
:param filename: The name of the file where the archive
contents will be saved.
:type chunk_size: int
:param chunk_size: The chunk size to use when downloading
the archive.
:type verify_hashes: bool
:param verify_hashes: Indicates whether or not to verify
the tree hashes for each downloaded chunk.
"""
num_chunks = self._calc_num_chunks(chunk_size)
with open(filename, 'wb') as output_file:
self._download_to_fileob(output_file, num_chunks, chunk_size,
verify_hashes, retry_exceptions)
def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize,
verify_hashes=True,
retry_exceptions=(socket.error,)):
"""Download an archive to a file object.
:type output_file: file
:param output_file: The file object where the archive
contents will be saved.
:type chunk_size: int
:param chunk_size: The chunk size to use when downloading
the archive.
:type verify_hashes: bool
:param verify_hashes: Indicates whether or not to verify
the tree hashes for each downloaded chunk.
"""
num_chunks = self._calc_num_chunks(chunk_size)
self._download_to_fileob(output_file, num_chunks, chunk_size,
verify_hashes, retry_exceptions)
def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes,
retry_exceptions):
for i in range(num_chunks):
byte_range = ((i * chunk_size), ((i + 1) * chunk_size) - 1)
data, expected_tree_hash = self._download_byte_range(
byte_range, retry_exceptions)
if verify_hashes:
actual_tree_hash = tree_hash_from_str(data)
if expected_tree_hash != actual_tree_hash:
raise TreeHashDoesNotMatchError(
"The calculated tree hash %s does not match the "
"expected tree hash %s for the byte range %s" % (
actual_tree_hash, expected_tree_hash, byte_range))
fileobj.write(data)
def _download_byte_range(self, byte_range, retry_exceptions):
# You can occasionally get socket.errors when downloading
# chunks from Glacier, so each chunk can be retried up
# to 5 times.
for _ in range(5):
try:
response = self.get_output(byte_range)
data = response.read()
expected_tree_hash = response['TreeHash']
return data, expected_tree_hash
except retry_exceptions as e:
continue
else:
raise DownloadArchiveError("There was an error downloading"
"byte range %s: %s" % (byte_range,
e))
|
GitHubFriction/pywinrm
|
refs/heads/master
|
winrm/tests/kerberos/test_cffi_full.py
|
4
|
import os
from cffi import FFI
ffi = FFI()
ffi.cdef("""
// Original source could be found here:
// https://github.com/krb5/krb5/blob/master/src/lib/gssapi/generic/gssapi.hin
/* -*- mode: c; indent-tabs-mode: nil -*- */
/*
* Copyright 1993 by OpenVision Technologies, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appears in all copies and
* that both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of OpenVision not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. OpenVision makes no
* representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty.
*
* OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* $Id$
*/
/*
* First, define the three platform-dependent pointer types.
*/
struct gss_name_struct;
typedef struct gss_name_struct * gss_name_t;
struct gss_cred_id_struct;
typedef struct gss_cred_id_struct * gss_cred_id_t;
struct gss_ctx_id_struct;
typedef struct gss_ctx_id_struct * gss_ctx_id_t;
/*
* The following type must be defined as the smallest natural unsigned integer
* supported by the platform that has at least 32 bits of precision.
*/
typedef uint32_t gss_uint32;
typedef int32_t gss_int32;
// TODO Reference implementation defines gss_OID_desc, *gss_OID using
// using the definition for OM_object identifier.
typedef gss_uint32 OM_uint32;
typedef struct gss_OID_desc_struct {
OM_uint32 length;
void *elements;
} gss_OID_desc, *gss_OID;
typedef struct gss_OID_set_desc_struct {
size_t count;
gss_OID elements;
} gss_OID_set_desc, *gss_OID_set;
typedef struct gss_buffer_desc_struct {
size_t length;
void *value;
} gss_buffer_desc, *gss_buffer_t;
typedef struct gss_channel_bindings_struct {
OM_uint32 initiator_addrtype;
gss_buffer_desc initiator_address;
OM_uint32 acceptor_addrtype;
gss_buffer_desc acceptor_address;
gss_buffer_desc application_data;
} *gss_channel_bindings_t;
/*
* For now, define a QOP-type as an OM_uint32 (pending resolution of ongoing
* discussions).
*/
typedef OM_uint32 gss_qop_t;
typedef int gss_cred_usage_t;
/*
* Flag bits for context-level services.
*/
#define GSS_C_DELEG_FLAG ...
#define GSS_C_MUTUAL_FLAG ...
#define GSS_C_REPLAY_FLAG ...
#define GSS_C_SEQUENCE_FLAG ...
#define GSS_C_CONF_FLAG ...
#define GSS_C_INTEG_FLAG ...
#define GSS_C_ANON_FLAG ...
#define GSS_C_PROT_READY_FLAG ...
#define GSS_C_TRANS_FLAG ...
#define GSS_C_DELEG_POLICY_FLAG ...
/*
* Credential usage options
*/
#define GSS_C_BOTH ...
#define GSS_C_INITIATE ...
#define GSS_C_ACCEPT ...
/*
* Status code types for gss_display_status
*/
#define GSS_C_GSS_CODE ...
#define GSS_C_MECH_CODE ...
/*
* The constant definitions for channel-bindings address families
*/
#define GSS_C_AF_UNSPEC ...
#define GSS_C_AF_LOCAL ...
#define GSS_C_AF_INET ...
#define GSS_C_AF_IMPLINK ...
#define GSS_C_AF_PUP ...
#define GSS_C_AF_CHAOS ...
#define GSS_C_AF_NS ...
#define GSS_C_AF_NBS ...
#define GSS_C_AF_ECMA ...
#define GSS_C_AF_DATAKIT ...
#define GSS_C_AF_CCITT ...
#define GSS_C_AF_SNA ...
#define GSS_C_AF_DECnet ...
#define GSS_C_AF_DLI ...
#define GSS_C_AF_LAT ...
#define GSS_C_AF_HYLINK ...
#define GSS_C_AF_APPLETALK ...
#define GSS_C_AF_BSC ...
#define GSS_C_AF_DSS ...
#define GSS_C_AF_OSI ...
#define GSS_C_AF_NETBIOS ...
#define GSS_C_AF_X25 ...
#define GSS_C_AF_NULLADDR ...
/*
* Various Null values.
*/
#define GSS_C_NO_NAME ...
#define GSS_C_NO_BUFFER ...
#define GSS_C_NO_OID ...
#define GSS_C_NO_OID_SET ...
#define GSS_C_NO_CONTEXT ...
#define GSS_C_NO_CREDENTIAL ...
#define GSS_C_NO_CHANNEL_BINDINGS ...
// NOTE: CFFI supports only integer macros, so we declare value as const
// FIXME: Unable to compile declaration below
// static const gss_buffer_t GSS_C_EMPTY_BUFFER;
/*
* Some alternate names for a couple of the above values. These are defined
* for V1 compatibility.
*/
#define GSS_C_NULL_OID ...
#define GSS_C_NULL_OID_SET ...
/*
* Define the default Quality of Protection for per-message services. Note
* that an implementation that offers multiple levels of QOP may either reserve
* a value (for example zero, as assumed here) to mean "default protection", or
* alternatively may simply equate GSS_C_QOP_DEFAULT to a specific explicit
* QOP value. However a value of 0 should always be interpreted by a GSSAPI
* implementation as a request for the default protection level.
*/
#define GSS_C_QOP_DEFAULT ...
/*
* Expiration time of 2^32-1 seconds means infinite lifetime for a
* credential or security context
*/
#define GSS_C_INDEFINITE ...
/* Major status codes */
#define GSS_S_COMPLETE ...
/*
* Some "helper" definitions to make the status code macros obvious.
*/
#define GSS_C_CALLING_ERROR_OFFSET ...
#define GSS_C_ROUTINE_ERROR_OFFSET ...
#define GSS_C_SUPPLEMENTARY_OFFSET ...
#define GSS_C_CALLING_ERROR_MASK ...
#define GSS_C_ROUTINE_ERROR_MASK ...
#define GSS_C_SUPPLEMENTARY_MASK ...
/*
* The macros that test status codes for error conditions. Note that the
* GSS_ERROR() macro has changed slightly from the V1 GSSAPI so that it now
* evaluates its argument only once.
* NOTE: CFFI can not parse calling macros but allows declare them as functions
*/
OM_uint32 GSS_CALLING_ERROR(OM_uint32);
OM_uint32 GSS_ROUTINE_ERROR(OM_uint32);
OM_uint32 GSS_SUPPLEMENTARY_INFO(OM_uint32);
OM_uint32 GSS_ERROR(OM_uint32);
/*
* Now the actual status code definitions
*/
/*
* Calling errors:
*/
#define GSS_S_CALL_INACCESSIBLE_READ ...
#define GSS_S_CALL_INACCESSIBLE_WRITE ...
#define GSS_S_CALL_BAD_STRUCTURE ...
/*
* Routine errors:
*/
#define GSS_S_BAD_MECH ...
#define GSS_S_BAD_NAME ...
#define GSS_S_BAD_NAMETYPE ...
#define GSS_S_BAD_BINDINGS ...
#define GSS_S_BAD_STATUS ...
#define GSS_S_BAD_SIG ...
#define GSS_S_NO_CRED ...
#define GSS_S_NO_CONTEXT ...
#define GSS_S_DEFECTIVE_TOKEN ...
#define GSS_S_DEFECTIVE_CREDENTIAL ...
#define GSS_S_CREDENTIALS_EXPIRED ...
#define GSS_S_CONTEXT_EXPIRED ...
#define GSS_S_FAILURE ...
#define GSS_S_BAD_QOP ...
#define GSS_S_UNAUTHORIZED ...
#define GSS_S_UNAVAILABLE ...
#define GSS_S_DUPLICATE_ELEMENT ...
#define GSS_S_NAME_NOT_MN ...
#define GSS_S_BAD_MECH_ATTR ...
/*
* Supplementary info bits:
*/
#define GSS_S_CONTINUE_NEEDED ...
#define GSS_S_DUPLICATE_TOKEN ...
#define GSS_S_OLD_TOKEN ...
#define GSS_S_UNSEQ_TOKEN ...
#define GSS_S_GAP_TOKEN ...
/*
* Finally, function prototypes for the GSSAPI routines.
*/
/* Reserved static storage for GSS_oids. Comments are quotes from RFC 2744.
*
* The implementation must reserve static storage for a
* gss_OID_desc object containing the value
* {10, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x01"},
* corresponding to an object-identifier value of
* {iso(1) member-body(2) United States(840) mit(113554)
* infosys(1) gssapi(2) generic(1) user_name(1)}. The constant
* GSS_C_NT_USER_NAME should be initialized to point
* to that gss_OID_desc.
*/
extern gss_OID GSS_C_NT_USER_NAME;
/*
* The implementation must reserve static storage for a
* gss_OID_desc object containing the value
* {10, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x02"},
* corresponding to an object-identifier value of
* {iso(1) member-body(2) United States(840) mit(113554)
* infosys(1) gssapi(2) generic(1) machine_uid_name(2)}.
* The constant GSS_C_NT_MACHINE_UID_NAME should be
* initialized to point to that gss_OID_desc.
*/
extern gss_OID GSS_C_NT_MACHINE_UID_NAME;
/*
* The implementation must reserve static storage for a
* gss_OID_desc object containing the value
* {10, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x03"},
* corresponding to an object-identifier value of
* {iso(1) member-body(2) United States(840) mit(113554)
* infosys(1) gssapi(2) generic(1) string_uid_name(3)}.
* The constant GSS_C_NT_STRING_UID_NAME should be
* initialized to point to that gss_OID_desc.
*/
extern gss_OID GSS_C_NT_STRING_UID_NAME;
/*
* The implementation must reserve static storage for a
* gss_OID_desc object containing the value
* {6, (void *)"\x2b\x06\x01\x05\x06\x02"},
* corresponding to an object-identifier value of
* {iso(1) org(3) dod(6) internet(1) security(5)
* nametypes(6) gss-host-based-services(2)). The constant
* GSS_C_NT_HOSTBASED_SERVICE_X should be initialized to point
* to that gss_OID_desc. This is a deprecated OID value, and
* implementations wishing to support hostbased-service names
* should instead use the GSS_C_NT_HOSTBASED_SERVICE OID,
* defined below, to identify such names;
* GSS_C_NT_HOSTBASED_SERVICE_X should be accepted a synonym
* for GSS_C_NT_HOSTBASED_SERVICE when presented as an input
* parameter, but should not be emitted by GSS-API
* implementations
*/
extern gss_OID GSS_C_NT_HOSTBASED_SERVICE_X;
/*
* The implementation must reserve static storage for a
* gss_OID_desc object containing the value
* {10, (void *)"\x2a\x86\x48\x86\xf7\x12"
* "\x01\x02\x01\x04"}, corresponding to an
* object-identifier value of {iso(1) member-body(2)
* Unites States(840) mit(113554) infosys(1) gssapi(2)
* generic(1) service_name(4)}. The constant
* GSS_C_NT_HOSTBASED_SERVICE should be initialized
* to point to that gss_OID_desc.
*/
extern gss_OID GSS_C_NT_HOSTBASED_SERVICE;
/*
* The implementation must reserve static storage for a
* gss_OID_desc object containing the value
* {6, (void *)"\x2b\x06\01\x05\x06\x03"},
* corresponding to an object identifier value of
* {1(iso), 3(org), 6(dod), 1(internet), 5(security),
* 6(nametypes), 3(gss-anonymous-name)}. The constant
* and GSS_C_NT_ANONYMOUS should be initialized to point
* to that gss_OID_desc.
*/
extern gss_OID GSS_C_NT_ANONYMOUS;
/*
* The implementation must reserve static storage for a
* gss_OID_desc object containing the value
* {6, (void *)"\x2b\x06\x01\x05\x06\x04"},
* corresponding to an object-identifier value of
* {1(iso), 3(org), 6(dod), 1(internet), 5(security),
* 6(nametypes), 4(gss-api-exported-name)}. The constant
* GSS_C_NT_EXPORT_NAME should be initialized to point
* to that gss_OID_desc.
*/
extern gss_OID GSS_C_NT_EXPORT_NAME;
/* Function Prototypes */
OM_uint32
gss_acquire_cred(
OM_uint32 *, /* minor_status */
gss_name_t, /* desired_name */
OM_uint32, /* time_req */
gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 *); /* time_rec */
OM_uint32
gss_release_cred(
OM_uint32 *, /* minor_status */
gss_cred_id_t *); /* cred_handle */
OM_uint32
gss_init_sec_context(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* claimant_cred_handle */
gss_ctx_id_t *, /* context_handle */
gss_name_t, /* target_name */
gss_OID, /* mech_type (used to be const) */
OM_uint32, /* req_flags */
OM_uint32, /* time_req */
gss_channel_bindings_t, /* input_chan_bindings */
gss_buffer_t, /* input_token */
gss_OID *, /* actual_mech_type */
gss_buffer_t, /* output_token */
OM_uint32 *, /* ret_flags */
OM_uint32 *); /* time_rec */
OM_uint32
gss_accept_sec_context(
OM_uint32 *, /* minor_status */
gss_ctx_id_t *, /* context_handle */
gss_cred_id_t, /* acceptor_cred_handle */
gss_buffer_t, /* input_token_buffer */
gss_channel_bindings_t, /* input_chan_bindings */
gss_name_t *, /* src_name */
gss_OID *, /* mech_type */
gss_buffer_t, /* output_token */
OM_uint32 *, /* ret_flags */
OM_uint32 *, /* time_rec */
gss_cred_id_t *); /* delegated_cred_handle */
OM_uint32
gss_process_context_token(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t); /* token_buffer */
OM_uint32
gss_delete_sec_context(
OM_uint32 *, /* minor_status */
gss_ctx_id_t *, /* context_handle */
gss_buffer_t); /* output_token */
OM_uint32
gss_context_time(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
OM_uint32 *); /* time_rec */
/* New for V2 */
OM_uint32
gss_get_mic(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_buffer_t, /* message_buffer */
gss_buffer_t); /* message_token */
/* New for V2 */
OM_uint32
gss_verify_mic(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* message_buffer */
gss_buffer_t, /* message_token */
gss_qop_t * /* qop_state */
);
/* New for V2 */
OM_uint32
gss_wrap(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
gss_buffer_t, /* input_message_buffer */
int *, /* conf_state */
gss_buffer_t); /* output_message_buffer */
/* New for V2 */
OM_uint32
gss_unwrap(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* input_message_buffer */
gss_buffer_t, /* output_message_buffer */
int *, /* conf_state */
gss_qop_t *); /* qop_state */
OM_uint32
gss_display_status(
OM_uint32 *, /* minor_status */
OM_uint32, /* status_value */
int, /* status_type */
gss_OID, /* mech_type (used to be const) */
OM_uint32 *, /* message_context */
gss_buffer_t); /* status_string */
OM_uint32
gss_indicate_mechs(
OM_uint32 *, /* minor_status */
gss_OID_set *); /* mech_set */
OM_uint32
gss_compare_name(
OM_uint32 *, /* minor_status */
gss_name_t, /* name1 */
gss_name_t, /* name2 */
int *); /* name_equal */
OM_uint32
gss_display_name(
OM_uint32 *, /* minor_status */
gss_name_t, /* input_name */
gss_buffer_t, /* output_name_buffer */
gss_OID *); /* output_name_type */
OM_uint32
gss_import_name(
OM_uint32 *, /* minor_status */
gss_buffer_t, /* input_name_buffer */
gss_OID, /* input_name_type(used to be const) */
gss_name_t *); /* output_name */
OM_uint32
gss_release_name(
OM_uint32 *, /* minor_status */
gss_name_t *); /* input_name */
OM_uint32
gss_release_buffer(
OM_uint32 *, /* minor_status */
gss_buffer_t); /* buffer */
OM_uint32
gss_release_oid_set(
OM_uint32 *, /* minor_status */
gss_OID_set *); /* set */
OM_uint32
gss_inquire_cred(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_name_t *, /* name */
OM_uint32 *, /* lifetime */
gss_cred_usage_t *, /* cred_usage */
gss_OID_set *); /* mechanisms */
/* Last argument new for V2 */
OM_uint32
gss_inquire_context(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_name_t *, /* src_name */
gss_name_t *, /* targ_name */
OM_uint32 *, /* lifetime_rec */
gss_OID *, /* mech_type */
OM_uint32 *, /* ctx_flags */
int *, /* locally_initiated */
int *); /* open */
/* New for V2 */
OM_uint32
gss_wrap_size_limit(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
OM_uint32, /* req_output_size */
OM_uint32 *); /* max_input_size */
/* FIXME: gss_import_name_object, gss_export_name_object declarations
* was excluded because libgssapi_krb5.so does not export this function
*/
/* New for V2 */
OM_uint32
gss_add_cred(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* input_cred_handle */
gss_name_t, /* desired_name */
gss_OID, /* desired_mech */
gss_cred_usage_t, /* cred_usage */
OM_uint32, /* initiator_time_req */
OM_uint32, /* acceptor_time_req */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 *, /* initiator_time_rec */
OM_uint32 *); /* acceptor_time_rec */
/* New for V2 */
OM_uint32
gss_inquire_cred_by_mech(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_OID, /* mech_type */
gss_name_t *, /* name */
OM_uint32 *, /* initiator_lifetime */
OM_uint32 *, /* acceptor_lifetime */
gss_cred_usage_t *); /* cred_usage */
/* New for V2 */
OM_uint32
gss_export_sec_context(
OM_uint32 *, /* minor_status */
gss_ctx_id_t *, /* context_handle */
gss_buffer_t); /* interprocess_token */
/* New for V2 */
OM_uint32
gss_import_sec_context(
OM_uint32 *, /* minor_status */
gss_buffer_t, /* interprocess_token */
gss_ctx_id_t *); /* context_handle */
/* New for V2 */
OM_uint32
gss_release_oid(
OM_uint32 *, /* minor_status */
gss_OID *); /* oid */
/* New for V2 */
OM_uint32
gss_create_empty_oid_set(
OM_uint32 *, /* minor_status */
gss_OID_set *); /* oid_set */
/* New for V2 */
OM_uint32
gss_add_oid_set_member(
OM_uint32 *, /* minor_status */
gss_OID, /* member_oid */
gss_OID_set *); /* oid_set */
/* New for V2 */
OM_uint32
gss_test_oid_set_member(
OM_uint32 *, /* minor_status */
gss_OID, /* member */
gss_OID_set, /* set */
int *); /* present */
/* New for V2 */
OM_uint32
gss_str_to_oid(
OM_uint32 *, /* minor_status */
gss_buffer_t, /* oid_str */
gss_OID *); /* oid */
/* New for V2 */
OM_uint32
gss_oid_to_str(
OM_uint32 *, /* minor_status */
gss_OID, /* oid */
gss_buffer_t); /* oid_str */
/* New for V2 */
OM_uint32
gss_inquire_names_for_mech(
OM_uint32 *, /* minor_status */
gss_OID, /* mechanism */
gss_OID_set *); /* name_types */
/* New for V2 */
OM_uint32
gss_inquire_mechs_for_name(
OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
gss_OID_set *); /* mech_types */
/*
* The following routines are obsolete variants of gss_get_mic, gss_wrap,
* gss_verify_mic and gss_unwrap. They should be provided by GSSAPI V2
* implementations for backwards compatibility with V1 applications. Distinct
* entrypoints (as opposed to #defines) should be provided, to allow GSSAPI
* V1 applications to link against GSSAPI V2 implementations.
*/
OM_uint32
gss_sign(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* qop_req */
gss_buffer_t, /* message_buffer */
gss_buffer_t); /* message_token */
OM_uint32
gss_verify(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* message_buffer */
gss_buffer_t, /* token_buffer */
int *); /* qop_state */
OM_uint32
gss_seal(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
int, /* qop_req */
gss_buffer_t, /* input_message_buffer */
int *, /* conf_state */
gss_buffer_t); /* output_message_buffer */
OM_uint32
gss_unseal(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* input_message_buffer */
gss_buffer_t, /* output_message_buffer */
int *, /* conf_state */
int *); /* qop_state */
/* New for V2 */
OM_uint32
gss_export_name(
OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
gss_buffer_t); /* exported_name */
/* New for V2 */
OM_uint32
gss_duplicate_name(
OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
gss_name_t *); /* dest_name */
/* New for V2 */
OM_uint32
gss_canonicalize_name(
OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
const gss_OID, /* mech_type */
gss_name_t *); /* output_name */
/* RFC 4401 */
#define GSS_C_PRF_KEY_FULL ...
#define GSS_C_PRF_KEY_PARTIAL ...
OM_uint32
gss_pseudo_random(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context */
int, /* prf_key */
const gss_buffer_t, /* prf_in */
ssize_t, /* desired_output_len */
gss_buffer_t); /* prf_out */
OM_uint32
gss_store_cred(
OM_uint32 *, /* minor_status */
const gss_cred_id_t,/* input_cred_handle */
gss_cred_usage_t, /* input_usage */
const gss_OID, /* desired_mech */
OM_uint32, /* overwrite_cred */
OM_uint32, /* default_cred */
gss_OID_set *, /* elements_stored */
gss_cred_usage_t *);/* cred_usage_stored */
OM_uint32
gss_set_neg_mechs(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
const gss_OID_set); /* mech_set */
/* XXXX these are not part of the GSSAPI C bindings! (but should be)
* NOTE: CFFI can not parse calling macros but allows declare them as functions
*/
OM_uint32 GSS_CALLING_ERROR_FIELD(OM_uint32);
OM_uint32 GSS_ROUTINE_ERROR_FIELD(OM_uint32);
OM_uint32 GSS_SUPPLEMENTARY_INFO_FIELD(OM_uint32);
/* XXXX This is a necessary evil until the spec is fixed */
#define GSS_S_CRED_UNAVAIL ...
/*
* RFC 5587
*/
typedef const gss_buffer_desc *gss_const_buffer_t;
typedef const struct gss_channel_bindings_struct *gss_const_channel_bindings_t;
typedef const struct gss_ctx_id_struct gss_const_ctx_id_t;
typedef const struct gss_cred_id_struct gss_const_cred_id_t;
typedef const struct gss_name_struct gss_const_name_t;
typedef const gss_OID_desc *gss_const_OID;
typedef const gss_OID_set_desc *gss_const_OID_set;
OM_uint32
gss_indicate_mechs_by_attrs(
OM_uint32 *, /* minor_status */
gss_const_OID_set, /* desired_mech_attrs */
gss_const_OID_set, /* except_mech_attrs */
gss_const_OID_set, /* critical_mech_attrs */
gss_OID_set *); /* mechs */
OM_uint32
gss_inquire_attrs_for_mech(
OM_uint32 *, /* minor_status */
gss_const_OID, /* mech */
gss_OID_set *, /* mech_attrs */
gss_OID_set *); /* known_mech_attrs */
OM_uint32
gss_display_mech_attr(
OM_uint32 *, /* minor_status */
gss_const_OID, /* mech_attr */
gss_buffer_t, /* name */
gss_buffer_t, /* short_desc */
gss_buffer_t); /* long_desc */
extern gss_const_OID GSS_C_MA_MECH_CONCRETE;
extern gss_const_OID GSS_C_MA_MECH_PSEUDO;
extern gss_const_OID GSS_C_MA_MECH_COMPOSITE;
extern gss_const_OID GSS_C_MA_MECH_NEGO;
extern gss_const_OID GSS_C_MA_MECH_GLUE;
extern gss_const_OID GSS_C_MA_NOT_MECH;
extern gss_const_OID GSS_C_MA_DEPRECATED;
extern gss_const_OID GSS_C_MA_NOT_DFLT_MECH;
extern gss_const_OID GSS_C_MA_ITOK_FRAMED;
extern gss_const_OID GSS_C_MA_AUTH_INIT;
extern gss_const_OID GSS_C_MA_AUTH_TARG;
extern gss_const_OID GSS_C_MA_AUTH_INIT_INIT;
extern gss_const_OID GSS_C_MA_AUTH_TARG_INIT;
extern gss_const_OID GSS_C_MA_AUTH_INIT_ANON;
extern gss_const_OID GSS_C_MA_AUTH_TARG_ANON;
extern gss_const_OID GSS_C_MA_DELEG_CRED;
extern gss_const_OID GSS_C_MA_INTEG_PROT;
extern gss_const_OID GSS_C_MA_CONF_PROT;
extern gss_const_OID GSS_C_MA_MIC;
extern gss_const_OID GSS_C_MA_WRAP;
extern gss_const_OID GSS_C_MA_PROT_READY;
extern gss_const_OID GSS_C_MA_REPLAY_DET;
extern gss_const_OID GSS_C_MA_OOS_DET;
extern gss_const_OID GSS_C_MA_CBINDINGS;
extern gss_const_OID GSS_C_MA_PFS;
extern gss_const_OID GSS_C_MA_COMPRESS;
extern gss_const_OID GSS_C_MA_CTX_TRANS;
/*
* RFC 5801
*/
OM_uint32
gss_inquire_saslname_for_mech(
OM_uint32 *, /* minor_status */
const gss_OID, /* desired_mech */
gss_buffer_t, /* sasl_mech_name */
gss_buffer_t, /* mech_name */
gss_buffer_t /* mech_description */
);
OM_uint32
gss_inquire_mech_for_saslname(
OM_uint32 *, /* minor_status */
const gss_buffer_t, /* sasl_mech_name */
gss_OID * /* mech_type */
);
""")
C = ffi.verify(
"""
#include <gssapi/gssapi.h>
#include <gssapi/gssapi_generic.h>
#include <gssapi/gssapi_krb5.h>
""",
# include_dirs=['/usr/include/gssapi'], # This is not required
libraries=['gssapi_krb5'])
class GSSInternalError(Exception):
pass
class GSSError(Exception):
pass
class CredentialsCacheNotFound(GSSError):
pass
# TODO find better name
class ServerNotFoundInKerberosDatabase(GSSError):
pass
class KerberosServerNotFound(GSSError):
"""Usually have following message: Cannot resolve servers for KDC in realm
'SOME.REALM'"""
pass
def _gss_buffer_to_str(gss_buffer):
out_str = ffi.string(ffi.cast('char *', gss_buffer.value))
C.gss_release_buffer(ffi.new('OM_uint32 *'), gss_buffer)
return out_str
def _str_to_gss_buffer(in_str):
return ffi.new('gss_buffer_t', [len(in_str), ffi.new('char[]', in_str)])
def validate_gss_status(major_value, minor_value):
if major_value == 0:
return
minor_status_p = ffi.new('OM_uint32 *')
message_ctx_p = ffi.new('OM_uint32 *')
status_str_buf = ffi.new('gss_buffer_t')
mech_type = ffi.new('gss_OID', [C.GSS_C_NO_OID])
major_status = C.gss_display_status(
minor_status_p, major_value, C.GSS_C_GSS_CODE, mech_type,
message_ctx_p, status_str_buf)
if major_status != 0:
raise GSSInternalError(
'Failed to get GSS major display status for last API call')
major_status_str = _gss_buffer_to_str(status_str_buf)
mech_type = ffi.new('gss_OID', [C.GSS_C_NULL_OID])
major_status = C.gss_display_status(
minor_status_p, minor_value, C.GSS_C_MECH_CODE, mech_type,
message_ctx_p, status_str_buf)
if major_status != 0:
raise GSSInternalError(
'Failed to get GSS minor display status for last API call')
minor_status_str = _gss_buffer_to_str(status_str_buf)
# TODO investigate how to de-allocate memory
assert C.GSS_S_CALL_INACCESSIBLE_READ == 16777216
assert C.GSS_S_CALL_INACCESSIBLE_WRITE == 33554432
assert C.GSS_S_CALL_BAD_STRUCTURE == 50331648
assert C.GSS_S_BAD_MECH == 65536
assert C.GSS_S_BAD_NAME == 131072
assert C.GSS_S_BAD_NAMETYPE == 196608
assert C.GSS_S_BAD_BINDINGS == 262144
assert C.GSS_S_BAD_STATUS == 327680
assert C.GSS_S_BAD_SIG == 393216
assert C.GSS_S_NO_CRED == 458752
assert C.GSS_S_NO_CONTEXT == 524288
assert C.GSS_S_DEFECTIVE_TOKEN == 589824
# TODO replace hardcoded integers into constants/flags from cffi
if major_value == 851968 and minor_value == 2529639107:
# TODO In addition to minor_value check we need to check that kerberos
# client is installed.
raise CredentialsCacheNotFound(
minor_status_str
+ '. Make sure that Kerberos Linux Client was installed. '
+ 'Run "sudo apt-get install krb5-user" for Debian/Ubuntu Linux.')
elif major_value == 851968 and minor_value == 2529638919:
raise ServerNotFoundInKerberosDatabase(minor_status_str)
elif major_value == 851968 and minor_value == 2529639132:
raise KerberosServerNotFound(
minor_status_str
+ '. Make sure that Kerberos Server is reachable over network. '
+ 'Try use ping or telnet tools in order to check that.')
else:
# __main__.GSSError: (('An unsupported mechanism was requested', 65536)
# ,('Unknown error', 0))
# __main__.GSSError: ((
# 'A required output parameter could not be written', 34078720),
# ('Unknown error', 0))
raise GSSError((major_status_str, major_value), (
minor_status_str, minor_value))
def authenticate_gss_client_init(service, principal):
if not service:
raise GSSError('Service was not provided. Please specify '
'service in "service@server-host" format')
if not principal:
raise GSSError('Principal was not provided. Please specify '
'principal in "username@realm" format')
minor_status_p = ffi.new('OM_uint32 *')
service_buf = _str_to_gss_buffer(service)
out_server_name_p = ffi.new('gss_name_t *')
major_status = C.gss_import_name(
minor_status_p, service_buf,
C.GSS_C_NT_HOSTBASED_SERVICE, # ffi.cast('gss_OID', C.GSS_C_NO_OID),
out_server_name_p)
validate_gss_status(major_status, minor_status_p[0])
gss_flags = C.GSS_C_MUTUAL_FLAG | C.GSS_C_SEQUENCE_FLAG | \
C.GSS_C_CONF_FLAG | C.GSS_C_INTEG_FLAG
input_token = ffi.new('gss_buffer_t')
output_token = ffi.new('gss_buffer_t')
ret_flags = ffi.new('OM_uint32 *')
major_status = C.gss_init_sec_context(
minor_status_p, ffi.NULL, ffi.cast(
'gss_ctx_id_t *', C.GSS_C_NO_CONTEXT), out_server_name_p[0],
ffi.cast('gss_OID', C.GSS_C_NO_OID),
gss_flags,
0,
# ffi.cast('gss_channel_bindings_t', C.GSS_C_NO_CHANNEL_BINDINGS),
ffi.NULL,
input_token,
# ffi.cast('gss_OID *', C.GSS_C_NO_OID),
ffi.NULL,
output_token,
ret_flags,
# ffi.cast('OM_uint32 *', C.GSS_C_INDEFINITE))
ffi.NULL)
validate_gss_status(major_status, minor_status_p[0])
if __name__ == '__main__':
krb_service = os.environ.get('WINRM_KRB_SERVICE', 'HTTP@server-host')
krb_principal = os.environ.get('WINRM_KRB_PRINCIPAL', 'username@realm')
# FIXME: Investigate how to pass server name and fix following error
# __main__.GSSError: (('A required output parameter could not be written',
# 34078720), ('Unknown error', 0))
authenticate_gss_client_init(krb_service, krb_principal)
|
fabiopereira/fabiopereirame_jekyll_mickey_blog
|
refs/heads/master
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/tools/pretty_sln.py
|
1831
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
glenn124f/treeherder
|
refs/heads/master
|
treeherder/perf/migrations/0009_refactor_perfalerts.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('model', '0008__add_best_fields_to_failureline'),
('perf', '0008_delete_perfalerts'),
]
operations = [
migrations.CreateModel(
name='PerformanceAlert',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('is_regression', models.BooleanField()),
('status', models.IntegerField(default=0, choices=[(0, b'Untriaged'), (1, b'Downstream'), (2, b'Reassigned'), (3, b'Invalid'), (4, b'Acknowledged')])),
('amount_pct', models.FloatField(help_text=b'Amount in percentage that series has changed')),
('amount_abs', models.FloatField(help_text=b'Absolute amount that series has changed')),
('prev_value', models.FloatField(help_text=b'Previous value of series before change')),
('new_value', models.FloatField(help_text=b'New value of series after change')),
('t_value', models.FloatField(help_text=b"t value out of analysis indicating confidence that change is 'real'")),
],
options={
'db_table': 'performance_alert',
},
),
migrations.CreateModel(
name='PerformanceAlertSummary',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('prev_result_set_id', models.PositiveIntegerField(null=True)),
('result_set_id', models.PositiveIntegerField()),
('last_updated', models.DateTimeField(db_index=True)),
('status', models.IntegerField(default=0, choices=[(0, b'Untriaged'), (1, b'Downstream'), (3, b'Invalid'), (4, b'Improvement'), (5, b'Investigating'), (6, b"Won't fix"), (7, b'Resolved')])),
('bug_number', models.PositiveIntegerField(null=True)),
('framework', models.ForeignKey(to='perf.PerformanceFramework', null=True)),
('repository', models.ForeignKey(to='model.Repository')),
],
options={
'db_table': 'performance_alert_summary',
},
),
migrations.AddField(
model_name='performancealert',
name='related_summary',
field=models.ForeignKey(related_name='related_alerts', to='perf.PerformanceAlertSummary', null=True),
),
migrations.AddField(
model_name='performancealert',
name='series_signature',
field=models.ForeignKey(to='perf.PerformanceSignature'),
),
migrations.AddField(
model_name='performancealert',
name='summary',
field=models.ForeignKey(related_name='alerts', to='perf.PerformanceAlertSummary'),
),
migrations.AlterUniqueTogether(
name='performancealertsummary',
unique_together=set([('repository', 'framework', 'prev_result_set_id', 'result_set_id')]),
),
migrations.AlterUniqueTogether(
name='performancealert',
unique_together=set([('summary', 'series_signature')]),
),
]
|
cloudera/hue
|
refs/heads/master
|
desktop/core/ext-py/django-openid-auth-0.14/django_openid_auth/management/commands/__init__.py
|
100
|
# django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
neharejanjeva/techstitution
|
refs/heads/master
|
app/logs/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jpcntx.py
|
1776
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
self.charset_name = "SHIFT_JIS"
def get_charset_name(self):
return self.charset_name
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self.charset_name = "CP932"
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
|
wdaher/zulip
|
refs/heads/master
|
confirmation/util.py
|
126
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
__revision__ = '$Id: util.py 3 2008-11-18 07:33:52Z jarek.zgoda $'
from django.conf import settings
def get_status_field(app_label, model_name):
model = '%s.%s' % (app_label, model_name)
mapping = getattr(settings, 'STATUS_FIELDS', {})
return mapping.get(model, 'status')
|
lduarte1991/edx-platform
|
refs/heads/master
|
common/djangoapps/student/tests/test_reset_password.py
|
4
|
"""
Test the various password reset flows
"""
import json
import re
import unittest
import ddt
from django.conf import settings
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.http import int_to_base36
from edx_oauth2_provider.tests.factories import AccessTokenFactory, ClientFactory, RefreshTokenFactory
from mock import Mock, patch
from oauth2_provider import models as dot_models
from provider.oauth2 import models as dop_models
from openedx.core.djangoapps.oauth_dispatch.tests import factories as dot_factories
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory
from student.tests.test_email import mock_render_to_string
from student.views import SETTING_CHANGE_INITIATED, password_reset, password_reset_confirm_wrapper
from util.testing import EventTestMixin
from .test_configuration_overrides import fake_get_value
@unittest.skipUnless(
settings.ROOT_URLCONF == "lms.urls",
"reset password tests should only run in LMS"
)
@ddt.ddt
class ResetPasswordTests(EventTestMixin, CacheIsolationTestCase):
""" Tests that clicking reset password sends email, and doesn't activate the user
"""
request_factory = RequestFactory()
ENABLED_CACHES = ['default']
def setUp(self):
super(ResetPasswordTests, self).setUp('student.views.tracker')
self.user = UserFactory.create()
self.user.is_active = False
self.user.save()
self.token = default_token_generator.make_token(self.user)
self.uidb36 = int_to_base36(self.user.id)
self.user_bad_passwd = UserFactory.create()
self.user_bad_passwd.is_active = False
self.user_bad_passwd.password = UNUSABLE_PASSWORD_PREFIX
self.user_bad_passwd.save()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_user_bad_password_reset(self):
"""Tests password reset behavior for user with password marked UNUSABLE_PASSWORD_PREFIX"""
bad_pwd_req = self.request_factory.post('/password_reset/', {'email': self.user_bad_passwd.email})
bad_pwd_resp = password_reset(bad_pwd_req)
# If they've got an unusable password, we return a successful response code
self.assertEquals(bad_pwd_resp.status_code, 200)
obj = json.loads(bad_pwd_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_nonexist_email_password_reset(self):
"""Now test the exception cases with of reset_password called with invalid email."""
bad_email_req = self.request_factory.post('/password_reset/', {'email': self.user.email + "makeItFail"})
bad_email_resp = password_reset(bad_email_req)
# Note: even if the email is bad, we return a successful response code
# This prevents someone potentially trying to "brute-force" find out which
# emails are and aren't registered with edX
self.assertEquals(bad_email_resp.status_code, 200)
obj = json.loads(bad_email_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_password_reset_ratelimited(self):
""" Try (and fail) resetting password 30 times in a row on an non-existant email address """
cache.clear()
for i in xrange(30):
good_req = self.request_factory.post('/password_reset/', {
'email': 'thisdoesnotexist{0}@foo.com'.format(i)
})
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
# then the rate limiter should kick in and give a HttpForbidden response
bad_req = self.request_factory.post('/password_reset/', {'email': 'thisdoesnotexist@foo.com'})
bad_resp = password_reset(bad_req)
self.assertEquals(bad_resp.status_code, 403)
self.assert_no_events_were_emitted()
cache.clear()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_reset_password_email(self, send_email):
"""Tests contents of reset password email, and that user is not active"""
good_req = self.request_factory.post('/password_reset/', {'email': self.user.email})
good_req.user = self.user
dop_client = ClientFactory()
dop_access_token = AccessTokenFactory(user=self.user, client=dop_client)
RefreshTokenFactory(user=self.user, client=dop_client, access_token=dop_access_token)
dot_application = dot_factories.ApplicationFactory(user=self.user)
dot_access_token = dot_factories.AccessTokenFactory(user=self.user, application=dot_application)
dot_factories.RefreshTokenFactory(user=self.user, application=dot_application, access_token=dot_access_token)
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
self.assertFalse(dop_models.AccessToken.objects.filter(user=self.user).exists())
self.assertFalse(dop_models.RefreshToken.objects.filter(user=self.user).exists())
self.assertFalse(dot_models.AccessToken.objects.filter(user=self.user).exists())
self.assertFalse(dot_models.RefreshToken.objects.filter(user=self.user).exists())
obj = json.loads(good_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
(subject, msg, from_addr, to_addrs) = send_email.call_args[0]
self.assertIn("Password reset", subject)
self.assertIn("You're receiving this e-mail because you requested a password reset", msg)
self.assertEquals(from_addr, configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL))
self.assertEquals(len(to_addrs), 1)
self.assertIn(self.user.email, to_addrs)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None,
)
#test that the user is not active
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
re.search(r'password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/', msg).groupdict()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data((False, 'http://'), (True, 'https://'))
@ddt.unpack
def test_reset_password_email_https(self, is_secure, protocol, send_email):
"""
Tests that the right url protocol is included in the reset password link
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.is_secure = Mock(return_value=is_secure)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
expected_msg = "Please go to the following page and choose a new password:\n\n" + protocol
self.assertIn(expected_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data(('Crazy Awesome Site', 'Crazy Awesome Site'), ('edX', 'edX'))
@ddt.unpack
def test_reset_password_email_site(self, site_name, platform_name, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
with patch("django.conf.settings.PLATFORM_NAME", platform_name):
with patch("django.conf.settings.SITE_NAME", site_name):
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
reset_msg = "you requested a password reset for your user account at {}"
reset_msg = reset_msg.format(site_name)
self.assertIn(reset_msg, msg)
sign_off = "The {} Team".format(platform_name)
self.assertIn(sign_off, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_get_value)
@patch('django.core.mail.send_mail')
def test_reset_password_email_configuration_override(self, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.get_host = Mock(return_value=None)
req.user = self.user
password_reset(req)
_, msg, from_addr, _ = send_email.call_args[0]
reset_msg = "you requested a password reset for your user account at {}".format(fake_get_value('platform_name'))
self.assertIn(reset_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
self.assertEqual(from_addr, "no-reply@fakeuniversity.com")
@ddt.data(
('invalidUid', 'invalid_token'),
(None, 'invalid_token'),
('invalidUid', None),
)
@ddt.unpack
def test_reset_password_bad_token(self, uidb36, token):
"""Tests bad token and uidb36 in password reset"""
if uidb36 is None:
uidb36 = self.uidb36
if token is None:
token = self.token
bad_request = self.request_factory.get(
reverse(
"password_reset_confirm",
kwargs={"uidb36": uidb36, "token": token}
)
)
password_reset_confirm_wrapper(bad_request, uidb36, token)
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
def test_reset_password_good_token(self):
"""Tests good token and uidb36 in password reset"""
url = reverse(
"password_reset_confirm",
kwargs={"uidb36": self.uidb36, "token": self.token}
)
good_reset_req = self.request_factory.get(url)
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
def test_password_reset_fail(self):
"""Tests that if we provide mismatched passwords, user is not marked as active."""
self.assertFalse(self.user.is_active)
url = reverse(
'password_reset_confirm',
kwargs={'uidb36': self.uidb36, 'token': self.token}
)
request_params = {'new_password1': 'password1', 'new_password2': 'password2'}
confirm_request = self.request_factory.post(url, data=request_params)
# Make a password reset request with mismatching passwords.
resp = password_reset_confirm_wrapper(confirm_request, self.uidb36, self.token)
# Verify the response status code is: 200 with password reset fail and also verify that
# the user is not marked as active.
self.assertEqual(resp.status_code, 200)
self.assertFalse(User.objects.get(pk=self.user.pk).is_active)
@override_settings(PASSWORD_MIN_LENGTH=2)
@override_settings(PASSWORD_MAX_LENGTH=10)
@ddt.data(
{
'password': '1',
'error_message': 'Password: Invalid Length (must be 2 characters or more)',
},
{
'password': '01234567891',
'error_message': 'Password: Invalid Length (must be 10 characters or fewer)'
}
)
def test_password_reset_with_invalid_length(self, password_dict):
"""Tests that if we provide password characters less then PASSWORD_MIN_LENGTH,
or more than PASSWORD_MAX_LENGTH, password reset will fail with error message.
"""
url = reverse(
'password_reset_confirm',
kwargs={'uidb36': self.uidb36, 'token': self.token}
)
request_params = {'new_password1': password_dict['password'], 'new_password2': password_dict['password']}
confirm_request = self.request_factory.post(url, data=request_params)
# Make a password reset request with minimum/maximum passwords characters.
response = password_reset_confirm_wrapper(confirm_request, self.uidb36, self.token)
self.assertEqual(response.context_data['err_msg'], password_dict['error_message'])
@patch('student.views.password_reset_confirm')
@patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_get_value)
def test_reset_password_good_token_configuration_override(self, reset_confirm):
"""Tests password reset confirmation page for site configuration override."""
url = reverse(
"password_reset_confirm",
kwargs={"uidb36": self.uidb36, "token": self.token}
)
good_reset_req = self.request_factory.get(url)
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['extra_context']['platform_name'], 'Fake University')
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data('Crazy Awesome Site', 'edX')
def test_reset_password_email_subject(self, platform_name, send_email):
"""
Tests that the right platform name is included in
the reset password email subject
"""
with patch("django.conf.settings.PLATFORM_NAME", platform_name):
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.user = self.user
password_reset(req)
subj, _, _, _ = send_email.call_args[0]
self.assertIn(platform_name, subj)
|
jmartinezchaine/OpenERP
|
refs/heads/master
|
openerp/addons/project/wizard/project_task_reevaluate.py
|
9
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
from osv import fields, osv
from tools.translate import _
class project_task_reevaluate(osv.osv_memory):
_name = 'project.task.reevaluate'
def _get_remaining(self,cr, uid, context=None):
if context is None:
context = {}
active_id = context.get('active_id', False)
res = False
if active_id:
res = self.pool.get('project.task').browse(cr, uid, active_id, context=context).remaining_hours
return res
_columns = {
'remaining_hours' : fields.float('Remaining Hours', digits=(16,2), help="Put here the remaining hours required to close the task."),
}
_defaults = {
'remaining_hours': _get_remaining,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(project_task_reevaluate, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu=submenu)
users_pool = self.pool.get('res.users')
time_mode = users_pool.browse(cr, uid, uid, context).company_id.project_time_mode_id
time_mode_name = time_mode and time_mode.name or 'Hours'
if time_mode_name in ['Hours','Hour']:
return res
eview = etree.fromstring(res['arch'])
def _check_rec(eview):
if eview.attrib.get('widget','') == 'float_time':
eview.set('widget','float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
for field in res['fields']:
if 'Hours' in res['fields'][field]['string']:
res['fields'][field]['string'] = res['fields'][field]['string'].replace('Hours',time_mode_name)
return res
def compute_hours(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
task_id = context.get('active_id')
if task_id:
task_pool = self.pool.get('project.task')
task_pool.write(cr, uid, task_id, {'remaining_hours': data.remaining_hours})
if context.get('button_reactivate'):
task_pool.do_reopen(cr, uid, [task_id], context=context)
return {'type': 'ir.actions.act_window_close'}
project_task_reevaluate()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
egafford/sahara
|
refs/heads/master
|
sahara/plugins/cdh/abstractversionhandler.py
|
2
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
from sahara import conductor
from sahara import context
from sahara.plugins.cdh import db_helper as dh
from sahara.plugins.cdh import health
@six.add_metaclass(abc.ABCMeta)
class AbstractVersionHandler(object):
@abc.abstractmethod
def get_node_processes(self):
return
@abc.abstractmethod
def get_plugin_configs(self):
return
@abc.abstractmethod
def configure_cluster(self, cluster):
return
@abc.abstractmethod
def start_cluster(self, cluster):
return
@abc.abstractmethod
def validate(self, cluster):
return
@abc.abstractmethod
def scale_cluster(self, cluster, instances):
return
@abc.abstractmethod
def decommission_nodes(self, cluster, instances):
return
@abc.abstractmethod
def validate_scaling(self, cluster, existing, additional):
return
@abc.abstractmethod
def get_edp_engine(self, cluster, job_type):
return
@abc.abstractmethod
def get_edp_job_types(self):
return []
@abc.abstractmethod
def get_edp_config_hints(self, job_type):
return {}
@abc.abstractmethod
def get_open_ports(self, node_group):
return
def on_terminate_cluster(self, cluster):
dh.delete_passwords_from_keymanager(cluster)
class BaseVersionHandler(AbstractVersionHandler):
def __init__(self):
# Need to be specified in subclass
self.config_helper = None # config helper
self.cloudera_utils = None # ClouderaUtils
self.deploy = None # to deploy
self.edp_engine = None
self.plugin_utils = None # PluginUtils
self.validation = None # to validate
def get_plugin_configs(self):
return self.config_helper.get_plugin_configs()
def get_node_processes(self):
raise NotImplementedError()
def validate(self, cluster):
self.validation.validate_cluster_creating(cluster)
def configure_cluster(self, cluster):
self.deploy.configure_cluster(cluster)
conductor.API.cluster_update(
context.ctx(), cluster, {
'info':
self.cloudera_utils.get_cloudera_manager_info(cluster)})
def start_cluster(self, cluster):
self.deploy.start_cluster(cluster)
self._set_cluster_info(cluster)
def decommission_nodes(self, cluster, instances):
self.deploy.decommission_cluster(cluster, instances)
def validate_scaling(self, cluster, existing, additional):
self.validation.validate_existing_ng_scaling(cluster, existing)
self.validation.validate_additional_ng_scaling(cluster, additional)
def scale_cluster(self, cluster, instances):
self.deploy.scale_cluster(cluster, instances)
def _set_cluster_info(self, cluster):
info = self.cloudera_utils.get_cloudera_manager_info(cluster)
hue = self.cloudera_utils.pu.get_hue(cluster)
if hue:
info['Hue Dashboard'] = {
'Web UI': 'http://%s:8888' % hue.get_ip_or_dns_name()
}
ctx = context.ctx()
conductor.API.cluster_update(ctx, cluster, {'info': info})
def get_edp_engine(self, cluster, job_type):
oozie_type = self.edp_engine.EdpOozieEngine.get_supported_job_types()
if job_type in oozie_type:
return self.edp_engine.EdpOozieEngine(cluster)
return None
def get_edp_job_types(self):
return self.edp_engine.EdpOozieEngine.get_supported_job_types()
def get_edp_config_hints(self, job_type):
return self.edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
def get_open_ports(self, node_group):
return self.deploy.get_open_ports(node_group)
def recommend_configs(self, cluster, scaling):
self.plugin_utils.recommend_configs(
cluster, self.get_plugin_configs(), scaling)
def get_health_checks(self, cluster):
return health.get_health_checks(cluster, self.cloudera_utils)
|
BanBxda/Sense_4.3
|
refs/heads/master
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
Johnzero/OE7
|
refs/heads/master
|
OE-debug文件/会计科目/l10n_br/__init__.py
|
430
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import account
|
holmes/intellij-community
|
refs/heads/master
|
python/testData/override/py3k_after.py
|
83
|
class A:
def m(self):
pass
class B(A):
def m(self):
<selection>super().m()</selection>
|
alexlo03/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/eos/eos_eapi.py
|
69
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_eapi
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage and configure Arista EOS eAPI.
requirements:
- "EOS v4.12 or greater"
description:
- Use to enable or disable eAPI access, and set the port and state
of http, https, local_http and unix-socket servers.
- When enabling eAPI access the default is to enable HTTP on port
80, enable HTTPS on port 443, disable local HTTP, and disable
Unix socket server. Use the options listed below to override the
default configuration.
- Requires EOS v4.12 or greater.
extends_documentation_fragment: eos
options:
http:
description:
- The C(http) argument controls the operating state of the HTTP
transport protocol when eAPI is present in the running-config.
When the value is set to True, the HTTP protocol is enabled and
when the value is set to False, the HTTP protocol is disabled.
By default, when eAPI is first configured, the HTTP protocol is
disabled.
type: bool
default: 'no'
aliases: ['enable_http']
http_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
default: 80
https:
description:
- The C(https) argument controls the operating state of the HTTPS
transport protocol when eAPI is present in the running-config.
When the value is set to True, the HTTPS protocol is enabled and
when the value is set to False, the HTTPS protocol is disabled.
By default, when eAPI is first configured, the HTTPS protocol is
enabled.
type: bool
default: 'yes'
aliases: ['enable_https']
https_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
default: 443
local_http:
description:
- The C(local_http) argument controls the operating state of the
local HTTP transport protocol when eAPI is present in the
running-config. When the value is set to True, the HTTP protocol
is enabled and restricted to connections from localhost only. When
the value is set to False, the HTTP local protocol is disabled.
- Note is value is independent of the C(http) argument
type: bool
default: 'no'
aliases: ['enable_local_http']
local_http_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
default: 8080
socket:
description:
- The C(socket) argument controls the operating state of the UNIX
Domain Socket used to receive eAPI requests. When the value
of this argument is set to True, the UDS will listen for eAPI
requests. When the value is set to False, the UDS will not be
available to handle requests. By default when eAPI is first
configured, the UDS is disabled.
type: bool
default: 'no'
aliases: ['enable_socket']
vrf:
description:
- The C(vrf) argument will configure eAPI to listen for connections
in the specified VRF. By default, eAPI transports will listen
for connections in the global table. This value requires the
VRF to already be created otherwise the task will fail.
default: default
version_added: "2.2"
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
version_added: "2.2"
state:
description:
- The C(state) argument controls the operational state of eAPI
on the remote device. When this argument is set to C(started),
eAPI is enabled to receive requests and when this argument is
C(stopped), eAPI is disabled and will not receive requests.
default: started
choices: ['started', 'stopped']
"""
EXAMPLES = """
- name: Enable eAPI access with default configuration
eos_eapi:
state: started
- name: Enable eAPI with no HTTP, HTTPS at port 9443, local HTTP at port 80, and socket enabled
eos_eapi:
state: started
http: false
https_port: 9443
local_http: yes
local_http_port: 80
socket: yes
- name: Shutdown eAPI access
eos_eapi:
state: stopped
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- management api http-commands
- protocol http port 81
- no protocol https
urls:
description: Hash of URL endpoints eAPI is listening on per interface
returned: when eAPI is started
type: dict
sample: {'Management1': ['http://172.26.10.1:80']}
session_name:
description: The EOS config session name used to load the configuration
returned: when changed is True
type: str
sample: ansible_1479315771
"""
import re
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.eos.eos import run_commands, load_config
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
def check_transport(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
if 'eapi' in (transport, provider_transport):
module.fail_json(msg='eos_eapi module is only supported over cli transport')
def validate_http_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
def validate_https_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
def validate_local_http_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
def validate_vrf(value, module):
out = run_commands(module, ['show vrf'])
configured_vrfs = []
lines = out[0].strip().splitlines()[3:]
for l in lines:
if not l:
continue
splitted_line = re.split(r'\s{2,}', l.strip())
if len(splitted_line) > 2:
configured_vrfs.append(splitted_line[0])
configured_vrfs.append('default')
if value not in configured_vrfs:
module.fail_json(msg='vrf `%s` is not configured on the system' % value)
def map_obj_to_commands(updates, module, warnings):
commands = list()
want, have = updates
def needs_update(x):
return want.get(x) is not None and (want.get(x) != have.get(x))
def add(cmd):
if 'management api http-commands' not in commands:
commands.insert(0, 'management api http-commands')
commands.append(cmd)
if any((needs_update('http'), needs_update('http_port'))):
if want['http'] is False:
add('no protocol http')
else:
if have['http'] is False and want['http'] in (False, None):
warnings.append('protocol http is not enabled, not configuring http port value')
else:
port = want['http_port'] or 80
add('protocol http port %s' % port)
if any((needs_update('https'), needs_update('https_port'))):
if want['https'] is False:
add('no protocol https')
else:
if have['https'] is False and want['https'] in (False, None):
warnings.append('protocol https is not enabled, not configuring https port value')
else:
port = want['https_port'] or 443
add('protocol https port %s' % port)
if any((needs_update('local_http'), needs_update('local_http_port'))):
if want['local_http'] is False:
add('no protocol http localhost')
else:
if have['local_http'] is False and want['local_http'] in (False, None):
warnings.append('protocol local_http is not enabled, not configuring local_http port value')
else:
port = want['local_http_port'] or 8080
add('protocol http localhost port %s' % port)
if any((needs_update('socket'), needs_update('socket'))):
if want['socket'] is False:
add('no protocol unix-socket')
else:
add('protocol unix-socket')
if needs_update('state') and not needs_update('vrf'):
if want['state'] == 'stopped':
add('shutdown')
elif want['state'] == 'started':
add('no shutdown')
if needs_update('vrf'):
add('vrf %s' % want['vrf'])
# switching operational vrfs here
# need to add the desired state as well
if want['state'] == 'stopped':
add('shutdown')
elif want['state'] == 'started':
add('no shutdown')
return commands
def parse_state(data):
if data[0]['enabled']:
return 'started'
else:
return 'stopped'
def map_config_to_obj(module):
out = run_commands(module, ['show management api http-commands | json'])
return {
'http': out[0]['httpServer']['configured'],
'http_port': out[0]['httpServer']['port'],
'https': out[0]['httpsServer']['configured'],
'https_port': out[0]['httpsServer']['port'],
'local_http': out[0]['localHttpServer']['configured'],
'local_http_port': out[0]['localHttpServer']['port'],
'socket': out[0]['unixSocketServer']['configured'],
'vrf': out[0]['vrf'],
'state': parse_state(out)
}
def map_params_to_obj(module):
obj = {
'http': module.params['http'],
'http_port': module.params['http_port'],
'https': module.params['https'],
'https_port': module.params['https_port'],
'local_http': module.params['local_http'],
'local_http_port': module.params['local_http_port'],
'socket': module.params['socket'],
'vrf': module.params['vrf'],
'state': module.params['state']
}
for key, value in iteritems(obj):
if value:
validator = globals().get('validate_%s' % key)
if validator:
validator(value, module)
return obj
def verify_state(updates, module):
want, have = updates
invalid_state = [('http', 'httpServer'),
('https', 'httpsServer'),
('local_http', 'localHttpServer'),
('socket', 'unixSocketServer')]
timeout = module.params['timeout'] or 30
state = module.params['state']
while invalid_state:
out = run_commands(module, ['show management api http-commands | json'])
for index, item in enumerate(invalid_state):
want_key, eapi_key = item
if want[want_key] is not None:
if want[want_key] == out[0][eapi_key]['running']:
del invalid_state[index]
elif state == 'stopped':
if not out[0][eapi_key]['running']:
del invalid_state[index]
else:
del invalid_state[index]
time.sleep(1)
timeout -= 1
if timeout == 0:
module.fail_json(msg='timeout expired before eapi running state changed')
def collect_facts(module, result):
out = run_commands(module, ['show management api http-commands | json'])
facts = dict(eos_eapi_urls=dict())
for each in out[0]['urls']:
intf, url = each.split(' : ')
key = str(intf).strip()
if key not in facts['eos_eapi_urls']:
facts['eos_eapi_urls'][key] = list()
facts['eos_eapi_urls'][key].append(str(url).strip())
result['ansible_facts'] = facts
def main():
""" main entry point for module execution
"""
argument_spec = dict(
http=dict(aliases=['enable_http'], type='bool'),
http_port=dict(type='int'),
https=dict(aliases=['enable_https'], type='bool'),
https_port=dict(type='int'),
local_http=dict(aliases=['enable_local_http'], type='bool'),
local_http_port=dict(type='int'),
socket=dict(aliases=['enable_socket'], type='bool'),
vrf=dict(default='default'),
config=dict(),
state=dict(default='started', choices=['stopped', 'started']),
)
argument_spec.update(eos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
check_transport(module)
result = {'changed': False}
warnings = list()
if module.params['config']:
warnings.append('config parameter is no longer necessary and will be ignored')
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module, warnings)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
if result['changed']:
verify_state((want, have), module)
collect_facts(module, result)
if warnings:
result['warnings'] = warnings
module.exit_json(**result)
if __name__ == '__main__':
main()
|
rleh/ocyco-server-python
|
refs/heads/develop
|
app/way_type_descriptions/models.py
|
1
|
from app import db
class WayTypeDescriptions(db.Model):
__tablename__ = 'way_type_descriptions'
_local_id = db.Column('local_id', db.BigInteger, db.Sequence('way_type_description_local_id_seq'),
primary_key=True, unique=True, autoincrement=True)
id = db.Column('id', db.BigInteger, db.ForeignKey('way_types.id'), nullable=False, index=True)
language = db.Column(db.Text, nullable=False)
description = db.Column(db.Text, nullable=False)
def __init__(self, id, description, language='de-DE'):
self.id = id
self.language = language
self.description = description
def __repr__(self):
return '<way_type_description %i %s>' % self.id, self.language
|
JulienMcJay/eclock
|
refs/heads/master
|
windows/Python27/Lib/site-packages/docutils/io.py
|
104
|
# $Id: io.py 7596 2013-01-25 13:42:17Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
I/O classes provide a uniform API for low-level input and output. Subclasses
exist for a variety of input/output mechanisms.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import codecs
from docutils import TransformSpec
from docutils._compat import b
from docutils.utils.error_reporting import locale_encoding, ErrorString, ErrorOutput
class InputError(IOError): pass
class OutputError(IOError): pass
def check_encoding(stream, encoding):
"""Test, whether the encoding of `stream` matches `encoding`.
Returns
:None: if `encoding` or `stream.encoding` are not a valid encoding
argument (e.g. ``None``) or `stream.encoding is missing.
:True: if the encoding argument resolves to the same value as `encoding`,
:False: if the encodings differ.
"""
try:
return codecs.lookup(stream.encoding) == codecs.lookup(encoding)
except (LookupError, AttributeError, TypeError):
return None
class Input(TransformSpec):
"""
Abstract base class for input wrappers.
"""
component_type = 'input'
default_source_path = None
def __init__(self, source=None, source_path=None, encoding=None,
error_handler='strict'):
self.encoding = encoding
"""Text encoding for the input source."""
self.error_handler = error_handler
"""Text decoding error handler."""
self.source = source
"""The source of input data."""
self.source_path = source_path
"""A text reference to the source."""
if not source_path:
self.source_path = self.default_source_path
self.successful_encoding = None
"""The encoding that successfully decoded the source data."""
def __repr__(self):
return '%s: source=%r, source_path=%r' % (self.__class__, self.source,
self.source_path)
def read(self):
raise NotImplementedError
def decode(self, data):
"""
Decode a string, `data`, heuristically.
Raise UnicodeError if unsuccessful.
The client application should call ``locale.setlocale`` at the
beginning of processing::
locale.setlocale(locale.LC_ALL, '')
"""
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, unicode), (
'input encoding is "unicode" '
'but input is not a unicode object')
if isinstance(data, unicode):
# Accept unicode even if self.encoding != 'unicode'.
return data
if self.encoding:
# We believe the user/application when the encoding is
# explicitly given.
encodings = [self.encoding]
else:
data_encoding = self.determine_encoding_from_data(data)
if data_encoding:
# If the data declares its encoding (explicitly or via a BOM),
# we believe it.
encodings = [data_encoding]
else:
# Apply heuristics only if no encoding is explicitly given and
# no BOM found. Start with UTF-8, because that only matches
# data that *IS* UTF-8:
encodings = ['utf-8', 'latin-1']
if locale_encoding:
encodings.insert(1, locale_encoding)
for enc in encodings:
try:
decoded = unicode(data, enc, self.error_handler)
self.successful_encoding = enc
# Return decoded, removing BOMs.
return decoded.replace(u'\ufeff', u'')
except (UnicodeError, LookupError), err:
error = err # in Python 3, the <exception instance> is
# local to the except clause
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: '
'%s.\n(%s)' % (', '.join([repr(enc) for enc in encodings]),
ErrorString(error)))
coding_slug = re.compile(b("coding[:=]\s*([-\w.]+)"))
"""Encoding declaration pattern."""
byte_order_marks = ((codecs.BOM_UTF8, 'utf-8'), # 'utf-8-sig' new in v2.5
(codecs.BOM_UTF16_BE, 'utf-16-be'),
(codecs.BOM_UTF16_LE, 'utf-16-le'),)
"""Sequence of (start_bytes, encoding) tuples for encoding detection.
The first bytes of input data are checked against the start_bytes strings.
A match indicates the given encoding."""
def determine_encoding_from_data(self, data):
"""
Try to determine the encoding of `data` by looking *in* `data`.
Check for a byte order mark (BOM) or an encoding declaration.
"""
# check for a byte order mark:
for start_bytes, encoding in self.byte_order_marks:
if data.startswith(start_bytes):
return encoding
# check for an encoding declaration pattern in first 2 lines of file:
for line in data.splitlines()[:2]:
match = self.coding_slug.search(line)
if match:
return match.group(1).decode('ascii')
return None
class Output(TransformSpec):
"""
Abstract base class for output wrappers.
"""
component_type = 'output'
default_destination_path = None
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict'):
self.encoding = encoding
"""Text encoding for the output destination."""
self.error_handler = error_handler or 'strict'
"""Text encoding error handler."""
self.destination = destination
"""The destination for output data."""
self.destination_path = destination_path
"""A text reference to the destination."""
if not destination_path:
self.destination_path = self.default_destination_path
def __repr__(self):
return ('%s: destination=%r, destination_path=%r'
% (self.__class__, self.destination, self.destination_path))
def write(self, data):
"""`data` is a Unicode string, to be encoded by `self.encode`."""
raise NotImplementedError
def encode(self, data):
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, unicode), (
'the encoding given is "unicode" but the output is not '
'a Unicode string')
return data
if not isinstance(data, unicode):
# Non-unicode (e.g. bytes) output.
return data
else:
return data.encode(self.encoding, self.error_handler)
class FileInput(Input):
"""
Input for single, simple file-like objects.
"""
def __init__(self, source=None, source_path=None,
encoding=None, error_handler='strict',
autoclose=True, handle_io_errors=None, mode='rU'):
"""
:Parameters:
- `source`: either a file-like object (which is read directly), or
`None` (which implies `sys.stdin` if no `source_path` given).
- `source_path`: a path to a file, which is opened and then read.
- `encoding`: the expected text encoding of the input file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after read (except when
`sys.stdin` is the source).
- `handle_io_errors`: ignored, deprecated, will be removed.
- `mode`: how the file is to be opened (see standard function
`open`). The default 'rU' provides universal newline support
for text files.
"""
Input.__init__(self, source, source_path, encoding, error_handler)
self.autoclose = autoclose
self._stderr = ErrorOutput()
if source is None:
if source_path:
# Specify encoding in Python 3
if sys.version_info >= (3,0):
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.source = open(source_path, mode, **kwargs)
except IOError, error:
raise InputError(error.errno, error.strerror, source_path)
else:
self.source = sys.stdin
elif (sys.version_info >= (3,0) and
check_encoding(self.source, self.encoding) is False):
# TODO: re-open, warn or raise error?
raise UnicodeError('Encoding clash: encoding given is "%s" '
'but source is opened with encoding "%s".' %
(self.encoding, self.source.encoding))
if not source_path:
try:
self.source_path = self.source.name
except AttributeError:
pass
def read(self):
"""
Read and decode a single file and return the data (Unicode string).
"""
try: # In Python < 2.5, try...except has to be nested in try...finally.
try:
if self.source is sys.stdin and sys.version_info >= (3,0):
# read as binary data to circumvent auto-decoding
data = self.source.buffer.read()
# normalize newlines
data = b('\n').join(data.splitlines()) + b('\n')
else:
data = self.source.read()
except (UnicodeError, LookupError), err: # (in Py3k read() decodes)
if not self.encoding and self.source_path:
# re-read in binary mode and decode with heuristics
b_source = open(self.source_path, 'rb')
data = b_source.read()
b_source.close()
# normalize newlines
data = b('\n').join(data.splitlines()) + b('\n')
else:
raise
finally:
if self.autoclose:
self.close()
return self.decode(data)
def readlines(self):
"""
Return lines of a single file as list of Unicode strings.
"""
return self.read().splitlines(True)
def close(self):
if self.source is not sys.stdin:
self.source.close()
class FileOutput(Output):
"""
Output for single, simple file-like objects.
"""
mode = 'w'
"""The mode argument for `open()`."""
# 'wb' for binary (e.g. OpenOffice) files (see also `BinaryFileOutput`).
# (Do not use binary mode ('wb') for text files, as this prevents the
# conversion of newlines to the system specific default.)
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict', autoclose=True,
handle_io_errors=None, mode=None):
"""
:Parameters:
- `destination`: either a file-like object (which is written
directly) or `None` (which implies `sys.stdout` if no
`destination_path` given).
- `destination_path`: a path to a file, which is opened and then
written.
- `encoding`: the text encoding of the output file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after write (except when
`sys.stdout` or `sys.stderr` is the destination).
- `handle_io_errors`: ignored, deprecated, will be removed.
- `mode`: how the file is to be opened (see standard function
`open`). The default is 'w', providing universal newline
support for text files.
"""
Output.__init__(self, destination, destination_path,
encoding, error_handler)
self.opened = True
self.autoclose = autoclose
if mode is not None:
self.mode = mode
self._stderr = ErrorOutput()
if destination is None:
if destination_path:
self.opened = False
else:
self.destination = sys.stdout
elif (# destination is file-type object -> check mode:
mode and hasattr(self.destination, 'mode')
and mode != self.destination.mode):
print >>self._stderr, ('Warning: Destination mode "%s" '
'differs from specified mode "%s"' %
(self.destination.mode, mode))
if not destination_path:
try:
self.destination_path = self.destination.name
except AttributeError:
pass
def open(self):
# Specify encoding in Python 3.
if sys.version_info >= (3,0) and 'b' not in self.mode:
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.destination = open(self.destination_path, self.mode, **kwargs)
except IOError, error:
raise OutputError(error.errno, error.strerror,
self.destination_path)
self.opened = True
def write(self, data):
"""Encode `data`, write it to a single file, and return it.
With Python 3 or binary output mode, `data` is returned unchanged,
except when specified encoding and output encoding differ.
"""
if not self.opened:
self.open()
if ('b' not in self.mode and sys.version_info < (3,0)
or check_encoding(self.destination, self.encoding) is False
):
if sys.version_info >= (3,0) and os.linesep != '\n':
data = data.replace('\n', os.linesep) # fix endings
data = self.encode(data)
try: # In Python < 2.5, try...except has to be nested in try...finally.
try:
self.destination.write(data)
except TypeError, e:
if sys.version_info >= (3,0) and isinstance(data, bytes):
try:
self.destination.buffer.write(data)
except AttributeError:
if check_encoding(self.destination,
self.encoding) is False:
raise ValueError('Encoding of %s (%s) differs \n'
' from specified encoding (%s)' %
(self.destination_path or 'destination',
self.destination.encoding, self.encoding))
else:
raise e
except (UnicodeError, LookupError), err:
raise UnicodeError(
'Unable to encode output data. output-encoding is: '
'%s.\n(%s)' % (self.encoding, ErrorString(err)))
finally:
if self.autoclose:
self.close()
return data
def close(self):
if self.destination not in (sys.stdout, sys.stderr):
self.destination.close()
self.opened = False
class BinaryFileOutput(FileOutput):
"""
A version of docutils.io.FileOutput which writes to a binary file.
"""
# Used by core.publish_cmdline_to_binary() which in turn is used by
# rst2odt (OpenOffice writer)
mode = 'wb'
class StringInput(Input):
"""
Direct string input.
"""
default_source_path = '<string>'
def read(self):
"""Decode and return the source string."""
return self.decode(self.source)
class StringOutput(Output):
"""
Direct string output.
"""
default_destination_path = '<string>'
def write(self, data):
"""Encode `data`, store it in `self.destination`, and return it."""
self.destination = self.encode(data)
return self.destination
class NullInput(Input):
"""
Degenerate input: read nothing.
"""
default_source_path = 'null input'
def read(self):
"""Return a null string."""
return u''
class NullOutput(Output):
"""
Degenerate output: write nothing.
"""
default_destination_path = 'null output'
def write(self, data):
"""Do nothing ([don't even] send data to the bit bucket)."""
pass
class DocTreeInput(Input):
"""
Adapter for document tree input.
The document tree must be passed in the ``source`` parameter.
"""
default_source_path = 'doctree input'
def read(self):
"""Return the document tree."""
return self.source
|
mojeto/django
|
refs/heads/master
|
tests/m2m_signals/tests.py
|
84
|
"""
Testing signals emitted on changing m2m relations.
"""
from django.db import models
from django.test import TestCase
from .models import Car, Part, Person, SportsCar
class ManyToManySignalsTest(TestCase):
def m2m_changed_signal_receiver(self, signal, sender, **kwargs):
message = {
'instance': kwargs['instance'],
'action': kwargs['action'],
'reverse': kwargs['reverse'],
'model': kwargs['model'],
}
if kwargs['pk_set']:
message['objects'] = list(
kwargs['model'].objects.filter(pk__in=kwargs['pk_set'])
)
self.m2m_changed_messages.append(message)
def setUp(self):
self.m2m_changed_messages = []
self.vw = Car.objects.create(name='VW')
self.bmw = Car.objects.create(name='BMW')
self.toyota = Car.objects.create(name='Toyota')
self.wheelset = Part.objects.create(name='Wheelset')
self.doors = Part.objects.create(name='Doors')
self.engine = Part.objects.create(name='Engine')
self.airbag = Part.objects.create(name='Airbag')
self.sunroof = Part.objects.create(name='Sunroof')
self.alice = Person.objects.create(name='Alice')
self.bob = Person.objects.create(name='Bob')
self.chuck = Person.objects.create(name='Chuck')
self.daisy = Person.objects.create(name='Daisy')
def tearDown(self):
# disconnect all signal handlers
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def _initialize_signal_car(self, add_default_parts_before_set_signal=False):
""" Install a listener on the two m2m relations. """
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
if add_default_parts_before_set_signal:
# adding a default part to our car - no signal listener installed
self.vw.default_parts.add(self.sunroof)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
def test_m2m_relations_add_remove_clear(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
self.vw.default_parts.add(self.wheelset, self.doors, self.engine)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the BMW and Toyota some doors as well
self.doors.car_set.add(self.bmw, self.toyota)
expected_messages.append({
'instance': self.doors,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.doors,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_remove_relation(self):
self._initialize_signal_car()
# remove the engine from the self.vw and the airbag (which is not set
# but is returned)
self.vw.default_parts.remove(self.engine, self.airbag)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}, {
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}
])
def test_m2m_relations_signals_give_the_self_vw_some_optional_parts(self):
expected_messages = []
self._initialize_signal_car()
# give the self.vw some optional parts (second relation to same model)
self.vw.optional_parts.add(self.airbag, self.sunroof)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# add airbag to all the cars (even though the self.vw already has one)
self.airbag.cars_optional.add(self.vw, self.bmw, self.toyota)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_reverse_relation_with_custom_related_name(self):
self._initialize_signal_car()
# remove airbag from the self.vw (reverse relation with custom
# related_name)
self.airbag.cars_optional.remove(self.vw)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}, {
'instance': self.airbag,
'action': 'post_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}
])
def test_m2m_relations_signals_clear_all_parts_of_the_self_vw(self):
self._initialize_signal_car()
# clear all parts of the self.vw
self.vw.default_parts.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
}, {
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
}
])
def test_m2m_relations_signals_all_the_doors_off_of_cars(self):
self._initialize_signal_car()
# take all the doors off of cars
self.doors.car_set.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.doors,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.doors,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_reverse_relation(self):
self._initialize_signal_car()
# take all the airbags off of cars (clear reverse relation with custom
# related_name)
self.airbag.cars_optional.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.airbag,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_alternative_ways(self):
expected_messages = []
self._initialize_signal_car()
# alternative ways of setting relation:
self.vw.default_parts.create(name='Windows')
p6 = Part.objects.get(name='Windows')
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# direct assignment clears the set first, then adds
self.vw.default_parts.set([self.wheelset, self.doors, self.engine])
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_clearing_removing(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# set by clearing.
self.vw.default_parts.set([self.wheelset, self.doors, self.engine], clear=True)
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# set by only removing what's necessary.
self.vw.default_parts.set([self.wheelset, self.doors], clear=False)
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_when_inheritance(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# Signals still work when model inheritance is involved
c4 = SportsCar.objects.create(name='Bugatti', price='1000000')
c4b = Car.objects.get(name='Bugatti')
c4.default_parts.set([self.doors])
expected_messages.append({
'instance': c4,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
expected_messages.append({
'instance': c4,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.engine.car_set.add(c4)
expected_messages.append({
'instance': self.engine,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
expected_messages.append({
'instance': self.engine,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def _initialize_signal_person(self):
# Install a listener on the two m2m relations.
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def test_m2m_relations_with_self_add_friends(self):
self._initialize_signal_person()
self.alice.friends.set([self.bob, self.chuck])
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}
])
def test_m2m_relations_with_self_add_fan(self):
self._initialize_signal_person()
self.alice.fans.set([self.daisy])
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}
])
def test_m2m_relations_with_self_add_idols(self):
self._initialize_signal_person()
self.chuck.idols.set([self.alice, self.bob])
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.chuck,
'action': 'pre_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}, {
'instance': self.chuck,
'action': 'post_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}
])
|
kantlove/flask-simple-page
|
refs/heads/master
|
Lib/site-packages/pip/_vendor/distlib/util.py
|
224
|
#
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
return os.path.normcase(sys.executable)
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
|
frontibit/riestercoin
|
refs/heads/master
|
contrib/spendfrom/spendfrom.py
|
792
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
lpabon/qemu
|
refs/heads/master
|
scripts/analyse-9p-simpletrace.py
|
333
|
#!/usr/bin/env python
# Pretty print 9p simpletrace log
# Usage: ./analyse-9p-simpletrace <trace-events> <trace-pid>
#
# Author: Harsh Prateek Bora
import os
import simpletrace
symbol_9p = {
6 : 'TLERROR',
7 : 'RLERROR',
8 : 'TSTATFS',
9 : 'RSTATFS',
12 : 'TLOPEN',
13 : 'RLOPEN',
14 : 'TLCREATE',
15 : 'RLCREATE',
16 : 'TSYMLINK',
17 : 'RSYMLINK',
18 : 'TMKNOD',
19 : 'RMKNOD',
20 : 'TRENAME',
21 : 'RRENAME',
22 : 'TREADLINK',
23 : 'RREADLINK',
24 : 'TGETATTR',
25 : 'RGETATTR',
26 : 'TSETATTR',
27 : 'RSETATTR',
30 : 'TXATTRWALK',
31 : 'RXATTRWALK',
32 : 'TXATTRCREATE',
33 : 'RXATTRCREATE',
40 : 'TREADDIR',
41 : 'RREADDIR',
50 : 'TFSYNC',
51 : 'RFSYNC',
52 : 'TLOCK',
53 : 'RLOCK',
54 : 'TGETLOCK',
55 : 'RGETLOCK',
70 : 'TLINK',
71 : 'RLINK',
72 : 'TMKDIR',
73 : 'RMKDIR',
74 : 'TRENAMEAT',
75 : 'RRENAMEAT',
76 : 'TUNLINKAT',
77 : 'RUNLINKAT',
100 : 'TVERSION',
101 : 'RVERSION',
102 : 'TAUTH',
103 : 'RAUTH',
104 : 'TATTACH',
105 : 'RATTACH',
106 : 'TERROR',
107 : 'RERROR',
108 : 'TFLUSH',
109 : 'RFLUSH',
110 : 'TWALK',
111 : 'RWALK',
112 : 'TOPEN',
113 : 'ROPEN',
114 : 'TCREATE',
115 : 'RCREATE',
116 : 'TREAD',
117 : 'RREAD',
118 : 'TWRITE',
119 : 'RWRITE',
120 : 'TCLUNK',
121 : 'RCLUNK',
122 : 'TREMOVE',
123 : 'RREMOVE',
124 : 'TSTAT',
125 : 'RSTAT',
126 : 'TWSTAT',
127 : 'RWSTAT'
}
class VirtFSRequestTracker(simpletrace.Analyzer):
def begin(self):
print "Pretty printing 9p simpletrace log ..."
def v9fs_rerror(self, tag, id, err):
print "RERROR (tag =", tag, ", id =", symbol_9p[id], ", err = \"", os.strerror(err), "\")"
def v9fs_version(self, tag, id, msize, version):
print "TVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_version_return(self, tag, id, msize, version):
print "RVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_attach(self, tag, id, fid, afid, uname, aname):
print "TATTACH (tag =", tag, ", fid =", fid, ", afid =", afid, ", uname =", uname, ", aname =", aname, ")"
def v9fs_attach_return(self, tag, id, type, version, path):
print "RATTACH (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_stat(self, tag, id, fid):
print "TSTAT (tag =", tag, ", fid =", fid, ")"
def v9fs_stat_return(self, tag, id, mode, atime, mtime, length):
print "RSTAT (tag =", tag, ", mode =", mode, ", atime =", atime, ", mtime =", mtime, ", length =", length, ")"
def v9fs_getattr(self, tag, id, fid, request_mask):
print "TGETATTR (tag =", tag, ", fid =", fid, ", request_mask =", hex(request_mask), ")"
def v9fs_getattr_return(self, tag, id, result_mask, mode, uid, gid):
print "RGETATTR (tag =", tag, ", result_mask =", hex(result_mask), ", mode =", oct(mode), ", uid =", uid, ", gid =", gid, ")"
def v9fs_walk(self, tag, id, fid, newfid, nwnames):
print "TWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", nwnames =", nwnames, ")"
def v9fs_walk_return(self, tag, id, nwnames, qids):
print "RWALK (tag =", tag, ", nwnames =", nwnames, ", qids =", hex(qids), ")"
def v9fs_open(self, tag, id, fid, mode):
print "TOPEN (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ")"
def v9fs_open_return(self, tag, id, type, version, path, iounit):
print "ROPEN (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_lcreate(self, tag, id, dfid, flags, mode, gid):
print "TLCREATE (tag =", tag, ", dfid =", dfid, ", flags =", oct(flags), ", mode =", oct(mode), ", gid =", gid, ")"
def v9fs_lcreate_return(self, tag, id, type, version, path, iounit):
print "RLCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_fsync(self, tag, id, fid, datasync):
print "TFSYNC (tag =", tag, ", fid =", fid, ", datasync =", datasync, ")"
def v9fs_clunk(self, tag, id, fid):
print "TCLUNK (tag =", tag, ", fid =", fid, ")"
def v9fs_read(self, tag, id, fid, off, max_count):
print "TREAD (tag =", tag, ", fid =", fid, ", off =", off, ", max_count =", max_count, ")"
def v9fs_read_return(self, tag, id, count, err):
print "RREAD (tag =", tag, ", count =", count, ", err =", err, ")"
def v9fs_readdir(self, tag, id, fid, offset, max_count):
print "TREADDIR (tag =", tag, ", fid =", fid, ", offset =", offset, ", max_count =", max_count, ")"
def v9fs_readdir_return(self, tag, id, count, retval):
print "RREADDIR (tag =", tag, ", count =", count, ", retval =", retval, ")"
def v9fs_write(self, tag, id, fid, off, count, cnt):
print "TWRITE (tag =", tag, ", fid =", fid, ", off =", off, ", count =", count, ", cnt =", cnt, ")"
def v9fs_write_return(self, tag, id, total, err):
print "RWRITE (tag =", tag, ", total =", total, ", err =", err, ")"
def v9fs_create(self, tag, id, fid, name, perm, mode):
print "TCREATE (tag =", tag, ", fid =", fid, ", perm =", oct(perm), ", name =", name, ", mode =", oct(mode), ")"
def v9fs_create_return(self, tag, id, type, version, path, iounit):
print "RCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_symlink(self, tag, id, fid, name, symname, gid):
print "TSYMLINK (tag =", tag, ", fid =", fid, ", name =", name, ", symname =", symname, ", gid =", gid, ")"
def v9fs_symlink_return(self, tag, id, type, version, path):
print "RSYMLINK (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_flush(self, tag, id, flush_tag):
print "TFLUSH (tag =", tag, ", flush_tag =", flush_tag, ")"
def v9fs_link(self, tag, id, dfid, oldfid, name):
print "TLINK (tag =", tag, ", dfid =", dfid, ", oldfid =", oldfid, ", name =", name, ")"
def v9fs_remove(self, tag, id, fid):
print "TREMOVE (tag =", tag, ", fid =", fid, ")"
def v9fs_wstat(self, tag, id, fid, mode, atime, mtime):
print "TWSTAT (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", atime =", atime, "mtime =", mtime, ")"
def v9fs_mknod(self, tag, id, fid, mode, major, minor):
print "TMKNOD (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", major =", major, ", minor =", minor, ")"
def v9fs_lock(self, tag, id, fid, type, start, length):
print "TLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_lock_return(self, tag, id, status):
print "RLOCK (tag =", tag, ", status =", status, ")"
def v9fs_getlock(self, tag, id, fid, type, start, length):
print "TGETLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_getlock_return(self, tag, id, type, start, length, proc_id):
print "RGETLOCK (tag =", tag, "type =", type, ", start =", start, ", length =", length, ", proc_id =", proc_id, ")"
def v9fs_mkdir(self, tag, id, fid, name, mode, gid):
print "TMKDIR (tag =", tag, ", fid =", fid, ", name =", name, ", mode =", mode, ", gid =", gid, ")"
def v9fs_mkdir_return(self, tag, id, type, version, path, err):
print "RMKDIR (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, err =", err, ")"
def v9fs_xattrwalk(self, tag, id, fid, newfid, name):
print "TXATTRWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", xattr name =", name, ")"
def v9fs_xattrwalk_return(self, tag, id, size):
print "RXATTRWALK (tag =", tag, ", xattrsize =", size, ")"
def v9fs_xattrcreate(self, tag, id, fid, name, size, flags):
print "TXATTRCREATE (tag =", tag, ", fid =", fid, ", name =", name, ", xattrsize =", size, ", flags =", flags, ")"
def v9fs_readlink(self, tag, id, fid):
print "TREADLINK (tag =", tag, ", fid =", fid, ")"
def v9fs_readlink_return(self, tag, id, target):
print "RREADLINK (tag =", tag, ", target =", target, ")"
simpletrace.run(VirtFSRequestTracker())
|
beardypig/streamlink
|
refs/heads/master
|
tests/plugins/test_bigo.py
|
2
|
import unittest
from streamlink.plugins.bigo import Bigo
class TestPluginBigo(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
"http://bigo.tv/00000000",
"https://bigo.tv/00000000",
"https://www.bigo.tv/00000000",
"http://www.bigo.tv/00000000",
"http://www.bigo.tv/fancy1234",
"http://www.bigo.tv/abc.123",
"http://www.bigo.tv/000000.00"
]
for url in should_match:
self.assertTrue(Bigo.can_handle_url(url), url)
def test_can_handle_url_negative(self):
should_not_match = [
# Old URLs don't work anymore
"http://live.bigo.tv/00000000",
"https://live.bigo.tv/00000000",
"http://www.bigoweb.co/show/00000000",
"https://www.bigoweb.co/show/00000000",
"http://bigoweb.co/show/00000000",
"https://bigoweb.co/show/00000000"
# Wrong URL structure
"https://www.bigo.tv/show/00000000",
"http://www.bigo.tv/show/00000000",
"http://bigo.tv/show/00000000",
"https://bigo.tv/show/00000000"
]
for url in should_not_match:
self.assertFalse(Bigo.can_handle_url(url), url)
|
CloudBoltSoftware/cloudbolt-forge
|
refs/heads/master
|
blueprints/aws_aurora_db_clusters/management/delete_snapshot.py
|
1
|
"""
Delete snapshot action for AWS RDS DB snapshot.
"""
from resourcehandlers.aws.models import AWSHandler
from common.methods import set_progress
from infrastructure.models import CustomField, Environment
import boto3
import time
from django.db import IntegrityError
def generate_options_for_snapshot(server=None, **kwargs):
resource = kwargs.get('resource')
snapshots = []
region = resource.attributes.get(field__name='aws_region').value
rh_id = resource.attributes.get(field__name='aws_rh_id').value
db_cluster_identifier = resource.attributes.get(field__name='db_cluster_identifier').value
handler = AWSHandler.objects.get(id=rh_id)
rds = boto3.client('rds',
region_name=region,
aws_access_key_id=handler.serviceaccount,
aws_secret_access_key=handler.servicepasswd
)
response = rds.describe_db_cluster_snapshots(
DBClusterIdentifier=db_cluster_identifier,
)
snapshots.extend([snapshot['DBClusterSnapshotIdentifier'] for snapshot in response['DBClusterSnapshots']])
if len(snapshots) == 0:
return []
return snapshots
def run(job, resource, **kwargs):
region = resource.attributes.get(field__name='aws_region').value
rh_id = resource.attributes.get(field__name='aws_rh_id').value
handler = AWSHandler.objects.get(id=rh_id)
snapshot_identifier = '{{ snapshot }}'
set_progress('Connecting to Amazon RDS')
rds = boto3.client('rds',
region_name=region,
aws_access_key_id=handler.serviceaccount,
aws_secret_access_key=handler.servicepasswd
)
set_progress('Deleting snapshot "{}"'.format(snapshot_identifier))
rds.delete_db_cluster_snapshot(
DBClusterSnapshotIdentifier=snapshot_identifier
)
return "SUCCESS", "Snapshot has succesfully been deleted", ""
|
ArcherSys/ArcherSys
|
refs/heads/master
|
Lib/site-packages/nbconvert/preprocessors/latex.py
|
26
|
"""Module that allows latex output notebooks to be conditioned before
they are converted.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function, absolute_import
from .base import Preprocessor
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class LatexPreprocessor(Preprocessor):
"""Preprocessor for latex destined documents.
Mainly populates the `latex` key in the resources dict,
adding definitions for pygments highlight styles.
"""
def preprocess(self, nb, resources):
"""Preprocessing to apply on each notebook.
Parameters
----------
nb : NotebookNode
Notebook being converted
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
"""
# Generate Pygments definitions for Latex
from pygments.formatters import LatexFormatter
resources.setdefault("latex", {})
resources["latex"].setdefault("pygments_definitions", LatexFormatter().get_style_defs())
return nb, resources
|
maclandrol/profileNJ
|
refs/heads/master
|
profileNJ/TreeLib/__init__.py
|
1
|
from TreeClass import TreeClass
import TreeUtils
import ClusterUtils
import SimulModel
from memorize import memorize
import params
__all__ = ["TreeUtils", "ClusterUtils",
"TreeClass", "memorize", "params", 'SimulModel']
|
alzeih/python-vodem-vodafone-K4607-Z
|
refs/heads/master
|
test/unit/api/test_ipv6_standby_dns_auto.py
|
1
|
import unittest
from vodem.api import ipv6_standby_dns_auto
class TestIpv6StandbyDnsAuto(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.valid_response = {
'ipv6_standby_dns_auto': '',
}
def test_call(self):
resp = ipv6_standby_dns_auto()
self.assertEqual(self.valid_response, resp)
|
ralph-mikera/Routeflow2
|
refs/heads/master
|
rflib/ipc/MongoIPC.py
|
6
|
import pymongo as mongo
from rflib.defs import MONGO_ADDRESS, MONGO_DB_NAME
import rflib.ipc.IPC as IPC
FROM_FIELD = "from"
TO_FIELD = "to"
TYPE_FIELD = "type"
READ_FIELD = "read"
CONTENT_FIELD = "content"
# 1 MB for the capped collection
CC_SIZE = 1048576
def put_in_envelope(from_, to, msg):
envelope = {}
envelope[FROM_FIELD] = from_
envelope[TO_FIELD] = to
envelope[READ_FIELD] = False
envelope[TYPE_FIELD] = msg.get_type()
envelope[CONTENT_FIELD] = {}
for (k, v) in msg.to_dict().items():
envelope[CONTENT_FIELD][k] = v
return envelope
def take_from_envelope(envelope, factory):
msg = factory.build_for_type(envelope[TYPE_FIELD]);
msg.from_dict(envelope[CONTENT_FIELD]);
return msg;
def format_address(address):
try:
tmp = address.split(":")
if len(tmp) == 2:
return (tmp[0], int(tmp[1]))
elif len(tmp) == 1:
return (tmp[0],)
except:
raise ValueError, "Invalid address: " + str(address)
class MongoIPCMessageService(IPC.IPCMessageService):
def __init__(self, address, db, id_, threading_):
"""Construct an IPCMessageService
Args:
address: designates where the MongoDB instance is running.
db: is the database name to connect to on MongoDB.
id_: is an identifier to allow messages to be directed to the
appropriate recipient.
threading_: thread management interface, see IPCService.py
"""
self._db = db
self.address = format_address(address)
self._id = id_
self._producer_connection = mongo.Connection(*self.address)
self._threading = threading_
def listen(self, channel_id, factory, processor, block=True):
worker = self._threading.Thread(target=self._listen_worker,
args=(channel_id, factory, processor))
worker.start()
if block:
worker.join()
def send(self, channel_id, to, msg):
self._create_channel(self._producer_connection, channel_id)
collection = self._producer_connection[self._db][channel_id]
collection.insert(put_in_envelope(self.get_id(), to, msg))
return True
def _listen_worker(self, channel_id, factory, processor):
connection = mongo.Connection(*self.address)
self._create_channel(connection, channel_id)
collection = connection[self._db][channel_id]
cursor = collection.find({TO_FIELD: self.get_id(), READ_FIELD: False}, sort=[("_id", mongo.ASCENDING)])
while True:
for envelope in cursor:
msg = take_from_envelope(envelope, factory)
processor.process(envelope[FROM_FIELD], envelope[TO_FIELD], channel_id, msg);
collection.update({"_id": envelope["_id"]}, {"$set": {READ_FIELD: True}})
self._threading.sleep(0.05)
cursor = collection.find({TO_FIELD: self.get_id(), READ_FIELD: False}, sort=[("_id", mongo.ASCENDING)])
def _create_channel(self, connection, name):
db = connection[self._db]
try:
collection = mongo.collection.Collection(db, name, None, True, capped=True, size=CC_SIZE)
collection.ensure_index([("_id", mongo.ASCENDING)])
collection.ensure_index([(TO_FIELD, mongo.ASCENDING)])
# TODO: improve this catch. It should be more specific, but pymongo
# behavior doesn't match its documentation, so we are being dirty.
except:
pass
def buildIPC(role, id_, threading_):
return MongoIPCMessageService(MONGO_ADDRESS, MONGO_DB_NAME, id_, threading_)
|
aidan-/ansible-modules-extras
|
refs/heads/devel
|
packaging/os/pkgng.py
|
2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, bleader
# Written by bleader <bleader@ratonland.org>
# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pkgng
short_description: Package manager for FreeBSD >= 9.0
description:
- Manage binary packages for FreeBSD using 'pkgng' which
is available in versions after 9.0.
version_added: "1.2"
options:
name:
description:
- Name of package to install/remove.
required: true
state:
description:
- State of the package.
choices: [ 'present', 'absent' ]
required: false
default: present
cached:
description:
- Use local package base instead of fetching an updated one.
choices: [ 'yes', 'no' ]
required: false
default: no
annotation:
description:
- A comma-separated list of keyvalue-pairs of the form
C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
C(-) denotes removing an annotation, and C(:) denotes modifying an
annotation.
If setting or modifying annotations, a value must be provided.
required: false
version_added: "1.6"
pkgsite:
description:
- For pkgng versions before 1.1.4, specify packagesite to use
for downloading packages. If not specified, use settings from
C(/usr/local/etc/pkg.conf).
- For newer pkgng versions, specify a the name of a repository
configured in C(/usr/local/etc/pkg/repos).
required: false
rootdir:
description:
- For pkgng versions 1.5 and later, pkg will install all packages
within the specified root directory.
- Can not be used together with I(chroot) option.
required: false
chroot:
version_added: "2.1"
description:
- Pkg will chroot in the specified environment.
- Can not be used together with I(rootdir) option.
required: false
autoremove:
version_added: "2.2"
description:
- Remove automatically installed packages which are no longer needed.
required: false
choices: [ "yes", "no" ]
default: no
author: "bleader (@bleader)"
notes:
- When using pkgsite, be careful that already in cache packages won't be downloaded again.
'''
EXAMPLES = '''
# Install package foo
- pkgng:
name: foo
state: present
# Annotate package foo and bar
- pkgng:
name: foo,bar
annotation: '+test1=baz,-test2,:test3=foobar'
# Remove packages foo and bar
- pkgng:
name: foo,bar
state: absent
'''
import re
from ansible.module_utils.basic import AnsibleModule
def query_package(module, pkgng_path, name, dir_arg):
rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
if rc == 0:
return True
return False
def pkgng_older_than(module, pkgng_path, compare_version):
rc, out, err = module.run_command("%s -v" % pkgng_path)
version = [int(x) for x in re.split(r'[\._]', out)]
i = 0
new_pkgng = True
while compare_version[i] == version[i]:
i += 1
if i == min(len(compare_version), len(version)):
break
else:
if compare_version[i] > version[i]:
new_pkgng = False
return not new_pkgng
def remove_packages(module, pkgng_path, packages, dir_arg):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, pkgng_path, package, dir_arg):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
return (True, "removed %s package(s)" % remove_c)
return (False, "package(s) already absent")
def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg):
install_c = 0
# as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
# in /usr/local/etc/pkg/repos
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
if pkgsite != "":
if old_pkgng:
pkgsite = "PACKAGESITE=%s" % (pkgsite)
else:
pkgsite = "-r %s" % (pkgsite)
batch_var = 'env BATCH=yes' # This environment variable skips mid-install prompts,
# setting them to their default values.
if not module.check_mode and not cached:
if old_pkgng:
rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
else:
rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg))
if rc != 0:
module.fail_json(msg="Could not update catalogue")
for package in packages:
if query_package(module, pkgng_path, package, dir_arg):
continue
if not module.check_mode:
if old_pkgng:
rc, out, err = module.run_command("%s %s %s install -g -U -y %s" % (batch_var, pkgsite, pkgng_path, package))
else:
rc, out, err = module.run_command("%s %s %s install %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, pkgsite, package))
if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err)
install_c += 1
if install_c > 0:
return (True, "added %s package(s)" % (install_c))
return (False, "package(s) already present")
def annotation_query(module, pkgng_path, package, tag, dir_arg):
rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
if match:
return match.group('value')
return False
def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not _value:
# Annotation does not exist, add it.
rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
% (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json("could not annotate %s: %s"
% (package, out), stderr=err)
return True
elif _value != value:
# Annotation exists, but value differs
module.fail_json(
mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
% (package, tag, _value, value))
return False
else:
# Annotation exists, nothing to do
return False
def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if _value:
rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
% (pkgng_path, dir_arg, package, tag))
if rc != 0:
module.fail_json("could not delete annotation to %s: %s"
% (package, out), stderr=err)
return True
return False
def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not value:
# No such tag
module.fail_json("could not change annotation to %s: tag %s does not exist"
% (package, tag))
elif _value == value:
# No change in value
return False
else:
rc,out,err = module.run_command('%s %s annotate -y -M %s %s "%s"'
% (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json("could not change annotation annotation to %s: %s"
% (package, out), stderr=err)
return True
def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
annotate_c = 0
annotations = map(lambda _annotation:
re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
_annotation).groupdict(),
re.split(r',', annotation))
operation = {
'+': annotation_add,
'-': annotation_delete,
':': annotation_modify
}
for package in packages:
for _annotation in annotations:
if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
annotate_c += 1
if annotate_c > 0:
return (True, "added %s annotations." % annotate_c)
return (False, "changed no annotations")
def autoremove_packages(module, pkgng_path, dir_arg):
rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
autoremove_c = 0
match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
if match:
autoremove_c = int(match.group(1))
if autoremove_c == 0:
return False, "no package(s) to autoremove"
if not module.check_mode:
rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
return True, "autoremoved %d package(s)" % (autoremove_c)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"], required=False),
name = dict(aliases=["pkg"], required=True, type='list'),
cached = dict(default=False, type='bool'),
annotation = dict(default="", required=False),
pkgsite = dict(default="", required=False),
rootdir = dict(default="", required=False, type='path'),
chroot = dict(default="", required=False, type='path'),
autoremove = dict(default=False, type='bool')),
supports_check_mode = True,
mutually_exclusive =[["rootdir", "chroot"]])
pkgng_path = module.get_bin_path('pkg', True)
p = module.params
pkgs = p["name"]
changed = False
msgs = []
dir_arg = ""
if p["rootdir"] != "":
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
if old_pkgng:
module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
else:
dir_arg = "--rootdir %s" % (p["rootdir"])
if p["chroot"] != "":
dir_arg = '--chroot %s' % (p["chroot"])
if p["state"] == "present":
_changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg)
changed = changed or _changed
msgs.append(_msg)
elif p["state"] == "absent":
_changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["autoremove"]:
_changed, _msg = autoremove_packages(module, pkgng_path, dir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["annotation"]:
_changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
changed = changed or _changed
msgs.append(_msg)
module.exit_json(changed=changed, msg=", ".join(msgs))
if __name__ == '__main__':
main()
|
alexdglover/shill-isms
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/constants.py
|
3007
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
|
wwright2/dcim3-angstrom1
|
refs/heads/master
|
sources/openembedded-core/scripts/lib/wic/3rdparty/pykickstart/base.py
|
14
|
#
# Chris Lumens <clumens@redhat.com>
#
# Copyright 2006, 2007, 2008 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
"""
Base classes for creating commands and syntax version object.
This module exports several important base classes:
BaseData - The base abstract class for all data objects. Data objects
are contained within a BaseHandler object.
BaseHandler - The base abstract class from which versioned kickstart
handler are derived. Subclasses of BaseHandler hold
BaseData and KickstartCommand objects.
DeprecatedCommand - An abstract subclass of KickstartCommand that should
be further subclassed by users of this module. When
a subclass is used, a warning message will be
printed.
KickstartCommand - The base abstract class for all kickstart commands.
Command objects are contained within a BaseHandler
object.
"""
import gettext
gettext.textdomain("pykickstart")
_ = lambda x: gettext.ldgettext("pykickstart", x)
import types
import warnings
from pykickstart.errors import *
from pykickstart.ko import *
from pykickstart.parser import Packages
from pykickstart.version import versionToString
###
### COMMANDS
###
class KickstartCommand(KickstartObject):
"""The base class for all kickstart commands. This is an abstract class."""
removedKeywords = []
removedAttrs = []
def __init__(self, writePriority=0, *args, **kwargs):
"""Create a new KickstartCommand instance. This method must be
provided by all subclasses, but subclasses must call
KickstartCommand.__init__ first. Instance attributes:
currentCmd -- The name of the command in the input file that
caused this handler to be run.
currentLine -- The current unprocessed line from the input file
that caused this handler to be run.
handler -- A reference to the BaseHandler subclass this
command is contained withing. This is needed to
allow referencing of Data objects.
lineno -- The current line number in the input file.
writePriority -- An integer specifying when this command should be
printed when iterating over all commands' __str__
methods. The higher the number, the later this
command will be written. All commands with the
same priority will be written alphabetically.
"""
# We don't want people using this class by itself.
if self.__class__ is KickstartCommand:
raise TypeError, "KickstartCommand is an abstract class."
KickstartObject.__init__(self, *args, **kwargs)
self.writePriority = writePriority
# These will be set by the dispatcher.
self.currentCmd = ""
self.currentLine = ""
self.handler = None
self.lineno = 0
# If a subclass provides a removedKeywords list, remove all the
# members from the kwargs list before we start processing it. This
# ensures that subclasses don't continue to recognize arguments that
# were removed.
for arg in filter(kwargs.has_key, self.removedKeywords):
kwargs.pop(arg)
def __call__(self, *args, **kwargs):
"""Set multiple attributes on a subclass of KickstartCommand at once
via keyword arguments. Valid attributes are anything specified in
a subclass, but unknown attributes will be ignored.
"""
for (key, val) in kwargs.items():
# Ignore setting attributes that were removed in a subclass, as
# if they were unknown attributes.
if key in self.removedAttrs:
continue
if hasattr(self, key):
setattr(self, key, val)
def __str__(self):
"""Return a string formatted for output to a kickstart file. This
method must be provided by all subclasses.
"""
return KickstartObject.__str__(self)
def parse(self, args):
"""Parse the list of args and set data on the KickstartCommand object.
This method must be provided by all subclasses.
"""
raise TypeError, "parse() not implemented for KickstartCommand"
def apply(self, instroot="/"):
"""Write out the configuration related to the KickstartCommand object.
Subclasses which do not provide this method will not have their
configuration written out.
"""
return
def dataList(self):
"""For commands that can occur multiple times in a single kickstart
file (like network, part, etc.), return the list that we should
append more data objects to.
"""
return None
def deleteRemovedAttrs(self):
"""Remove all attributes from self that are given in the removedAttrs
list. This method should be called from __init__ in a subclass,
but only after the superclass's __init__ method has been called.
"""
for attr in filter(lambda k: hasattr(self, k), self.removedAttrs):
delattr(self, attr)
# Set the contents of the opts object (an instance of optparse.Values
# returned by parse_args) as attributes on the KickstartCommand object.
# It's useful to call this from KickstartCommand subclasses after parsing
# the arguments.
def _setToSelf(self, optParser, opts):
self._setToObj(optParser, opts, self)
# Sets the contents of the opts object (an instance of optparse.Values
# returned by parse_args) as attributes on the provided object obj. It's
# useful to call this from KickstartCommand subclasses that handle lists
# of objects (like partitions, network devices, etc.) and need to populate
# a Data object.
def _setToObj(self, optParser, opts, obj):
for key in filter (lambda k: getattr(opts, k) != None, optParser.keys()):
setattr(obj, key, getattr(opts, key))
class DeprecatedCommand(KickstartCommand):
"""Specify that a command is deprecated and no longer has any function.
Any command that is deprecated should be subclassed from this class,
only specifying an __init__ method that calls the superclass's __init__.
This is an abstract class.
"""
def __init__(self, writePriority=None, *args, **kwargs):
# We don't want people using this class by itself.
if self.__class__ is KickstartCommand:
raise TypeError, "DeprecatedCommand is an abstract class."
# Create a new DeprecatedCommand instance.
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
def __str__(self):
"""Placeholder since DeprecatedCommands don't work anymore."""
return ""
def parse(self, args):
"""Print a warning message if the command is seen in the input file."""
mapping = {"lineno": self.lineno, "cmd": self.currentCmd}
warnings.warn(_("Ignoring deprecated command on line %(lineno)s: The %(cmd)s command has been deprecated and no longer has any effect. It may be removed from future releases, which will result in a fatal error from kickstart. Please modify your kickstart file to remove this command.") % mapping, DeprecationWarning)
###
### HANDLERS
###
class BaseHandler(KickstartObject):
"""Each version of kickstart syntax is provided by a subclass of this
class. These subclasses are what users will interact with for parsing,
extracting data, and writing out kickstart files. This is an abstract
class.
version -- The version this syntax handler supports. This is set by
a class attribute of a BaseHandler subclass and is used to
set up the command dict. It is for read-only use.
"""
version = None
def __init__(self, mapping=None, dataMapping=None, commandUpdates=None,
dataUpdates=None, *args, **kwargs):
"""Create a new BaseHandler instance. This method must be provided by
all subclasses, but subclasses must call BaseHandler.__init__ first.
mapping -- A custom map from command strings to classes,
useful when creating your own handler with
special command objects. It is otherwise unused
and rarely needed. If you give this argument,
the mapping takes the place of the default one
and so must include all commands you want
recognized.
dataMapping -- This is the same as mapping, but for data
objects. All the same comments apply.
commandUpdates -- This is similar to mapping, but does not take
the place of the defaults entirely. Instead,
this mapping is applied after the defaults and
updates it with just the commands you want to
modify.
dataUpdates -- This is the same as commandUpdates, but for
data objects.
Instance attributes:
commands -- A mapping from a string command to a KickstartCommand
subclass object that handles it. Multiple strings can
map to the same object, but only one instance of the
command object should ever exist. Most users should
never have to deal with this directly, as it is
manipulated internally and called through dispatcher.
currentLine -- The current unprocessed line from the input file
that caused this handler to be run.
packages -- An instance of pykickstart.parser.Packages which
describes the packages section of the input file.
platform -- A string describing the hardware platform, which is
needed only by system-config-kickstart.
scripts -- A list of pykickstart.parser.Script instances, which is
populated by KickstartParser.addScript and describes the
%pre/%post/%traceback script section of the input file.
"""
# We don't want people using this class by itself.
if self.__class__ is BaseHandler:
raise TypeError, "BaseHandler is an abstract class."
KickstartObject.__init__(self, *args, **kwargs)
# This isn't really a good place for these, but it's better than
# everything else I can think of.
self.scripts = []
self.packages = Packages()
self.platform = ""
# These will be set by the dispatcher.
self.commands = {}
self.currentLine = 0
# A dict keyed by an integer priority number, with each value being a
# list of KickstartCommand subclasses. This dict is maintained by
# registerCommand and used in __str__. No one else should be touching
# it.
self._writeOrder = {}
self._registerCommands(mapping, dataMapping, commandUpdates, dataUpdates)
def __str__(self):
"""Return a string formatted for output to a kickstart file."""
retval = ""
if self.platform != "":
retval += "#platform=%s\n" % self.platform
retval += "#version=%s\n" % versionToString(self.version)
lst = self._writeOrder.keys()
lst.sort()
for prio in lst:
for obj in self._writeOrder[prio]:
retval += obj.__str__()
for script in self.scripts:
retval += script.__str__()
retval += self.packages.__str__()
return retval
def _insertSorted(self, lst, obj):
length = len(lst)
i = 0
while i < length:
# If the two classes have the same name, it's because we are
# overriding an existing class with one from a later kickstart
# version, so remove the old one in favor of the new one.
if obj.__class__.__name__ > lst[i].__class__.__name__:
i += 1
elif obj.__class__.__name__ == lst[i].__class__.__name__:
lst[i] = obj
return
elif obj.__class__.__name__ < lst[i].__class__.__name__:
break
if i >= length:
lst.append(obj)
else:
lst.insert(i, obj)
def _setCommand(self, cmdObj):
# Add an attribute on this version object. We need this to provide a
# way for clients to access the command objects. We also need to strip
# off the version part from the front of the name.
if cmdObj.__class__.__name__.find("_") != -1:
name = unicode(cmdObj.__class__.__name__.split("_", 1)[1])
else:
name = unicode(cmdObj.__class__.__name__).lower()
setattr(self, name.lower(), cmdObj)
# Also, add the object into the _writeOrder dict in the right place.
if cmdObj.writePriority is not None:
if self._writeOrder.has_key(cmdObj.writePriority):
self._insertSorted(self._writeOrder[cmdObj.writePriority], cmdObj)
else:
self._writeOrder[cmdObj.writePriority] = [cmdObj]
def _registerCommands(self, mapping=None, dataMapping=None, commandUpdates=None,
dataUpdates=None):
if mapping == {} or mapping == None:
from pykickstart.handlers.control import commandMap
cMap = commandMap[self.version]
else:
cMap = mapping
if dataMapping == {} or dataMapping == None:
from pykickstart.handlers.control import dataMap
dMap = dataMap[self.version]
else:
dMap = dataMapping
if type(commandUpdates) == types.DictType:
cMap.update(commandUpdates)
if type(dataUpdates) == types.DictType:
dMap.update(dataUpdates)
for (cmdName, cmdClass) in cMap.iteritems():
# First make sure we haven't instantiated this command handler
# already. If we have, we just need to make another mapping to
# it in self.commands.
cmdObj = None
for (key, val) in self.commands.iteritems():
if val.__class__.__name__ == cmdClass.__name__:
cmdObj = val
break
# If we didn't find an instance in self.commands, create one now.
if cmdObj == None:
cmdObj = cmdClass()
self._setCommand(cmdObj)
# Finally, add the mapping to the commands dict.
self.commands[cmdName] = cmdObj
self.commands[cmdName].handler = self
# We also need to create attributes for the various data objects.
# No checks here because dMap is a bijection. At least, that's what
# the comment says. Hope no one screws that up.
for (dataName, dataClass) in dMap.iteritems():
setattr(self, dataName, dataClass)
def dispatcher(self, args, lineno):
"""Call the appropriate KickstartCommand handler for the current line
in the kickstart file. A handler for the current command should
be registered, though a handler of None is not an error. Returns
the data object returned by KickstartCommand.parse.
args -- A list of arguments to the current command
lineno -- The line number in the file, for error reporting
"""
cmd = args[0]
if not self.commands.has_key(cmd):
raise KickstartParseError, formatErrorMsg(lineno, msg=_("Unknown command: %s" % cmd))
elif self.commands[cmd] != None:
self.commands[cmd].currentCmd = cmd
self.commands[cmd].currentLine = self.currentLine
self.commands[cmd].lineno = lineno
# The parser returns the data object that was modified. This could
# be a BaseData subclass that should be put into a list, or it
# could be the command handler object itself.
obj = self.commands[cmd].parse(args[1:])
lst = self.commands[cmd].dataList()
if lst is not None:
lst.append(obj)
return obj
def maskAllExcept(self, lst):
"""Set all entries in the commands dict to None, except the ones in
the lst. All other commands will not be processed.
"""
self._writeOrder = {}
for (key, val) in self.commands.iteritems():
if not key in lst:
self.commands[key] = None
def hasCommand(self, cmd):
"""Return true if there is a handler for the string cmd."""
return hasattr(self, cmd)
###
### DATA
###
class BaseData(KickstartObject):
"""The base class for all data objects. This is an abstract class."""
removedKeywords = []
removedAttrs = []
def __init__(self, *args, **kwargs):
"""Create a new BaseData instance.
lineno -- Line number in the ks-file where this object was defined
"""
# We don't want people using this class by itself.
if self.__class__ is BaseData:
raise TypeError, "BaseData is an abstract class."
KickstartObject.__init__(self, *args, **kwargs)
self.lineno = 0
def __str__(self):
"""Return a string formatted for output to a kickstart file."""
return ""
def __call__(self, *args, **kwargs):
"""Set multiple attributes on a subclass of BaseData at once via
keyword arguments. Valid attributes are anything specified in a
subclass, but unknown attributes will be ignored.
"""
for (key, val) in kwargs.items():
# Ignore setting attributes that were removed in a subclass, as
# if they were unknown attributes.
if key in self.removedAttrs:
continue
if hasattr(self, key):
setattr(self, key, val)
def deleteRemovedAttrs(self):
"""Remove all attributes from self that are given in the removedAttrs
list. This method should be called from __init__ in a subclass,
but only after the superclass's __init__ method has been called.
"""
for attr in filter(lambda k: hasattr(self, k), self.removedAttrs):
delattr(self, attr)
|
stevekuznetsov/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/cloudstack/cs_vmsnapshot.py
|
48
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_vmsnapshot
short_description: Manages VM snapshots on Apache CloudStack based clouds.
description:
- Create, remove and revert VM from snapshots.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Unique Name of the snapshot. In CloudStack terms display name.
required: true
aliases: ['display_name']
vm:
description:
- Name of the virtual machine.
required: true
description:
description:
- Description of the snapshot.
required: false
default: null
snapshot_memory:
description:
- Snapshot memory if set to true.
required: false
default: false
zone:
description:
- Name of the zone in which the VM is in. If not set, default zone is used.
required: false
default: null
project:
description:
- Name of the project the VM is assigned to.
required: false
default: null
state:
description:
- State of the snapshot.
required: false
default: 'present'
choices: [ 'present', 'absent', 'revert' ]
domain:
description:
- Domain the VM snapshot is related to.
required: false
default: null
account:
description:
- Account the VM snapshot is related to.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a VM snapshot of disk and memory before an upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
snapshot_memory: yes
# Revert a VM to a snapshot after a failed upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
state: revert
# Remove a VM snapshot after successful upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
state: absent
'''
RETURN = '''
---
id:
description: UUID of the snapshot.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the snapshot.
returned: success
type: string
sample: snapshot before update
display_name:
description: Display name of the snapshot.
returned: success
type: string
sample: snapshot before update
created:
description: date of the snapshot.
returned: success
type: string
sample: 2015-03-29T14:57:06+0200
current:
description: true if snapshot is current
returned: success
type: boolean
sample: True
state:
description: state of the vm snapshot
returned: success
type: string
sample: Allocated
type:
description: type of vm snapshot
returned: success
type: string
sample: DiskAndMemory
description:
description: description of vm snapshot
returned: success
type: string
sample: snapshot brought to you by Ansible
domain:
description: Domain the the vm snapshot is related to.
returned: success
type: string
sample: example domain
account:
description: Account the vm snapshot is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the vm snapshot is related to.
returned: success
type: string
sample: Production
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackVmSnapshot(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVmSnapshot, self).__init__(module)
self.returns = {
'type': 'type',
'current': 'current',
}
def get_snapshot(self):
args = {}
args['virtualmachineid'] = self.get_vm('id')
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
args['projectid'] = self.get_project('id')
args['name'] = self.module.params.get('name')
snapshots = self.cs.listVMSnapshot(**args)
if snapshots:
return snapshots['vmSnapshot'][0]
return None
def create_snapshot(self):
snapshot = self.get_snapshot()
if not snapshot:
self.result['changed'] = True
args = {}
args['virtualmachineid'] = self.get_vm('id')
args['name'] = self.module.params.get('name')
args['description'] = self.module.params.get('description')
args['snapshotmemory'] = self.module.params.get('snapshot_memory')
if not self.module.check_mode:
res = self.cs.createVMSnapshot(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
snapshot = self.poll_job(res, 'vmsnapshot')
return snapshot
def remove_snapshot(self):
snapshot = self.get_snapshot()
if snapshot:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.deleteVMSnapshot(vmsnapshotid=snapshot['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'vmsnapshot')
return snapshot
def revert_vm_to_snapshot(self):
snapshot = self.get_snapshot()
if snapshot:
self.result['changed'] = True
if snapshot['state'] != "Ready":
self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state'])
if not self.module.check_mode:
res = self.cs.revertToVMSnapshot(vmsnapshotid=snapshot['id'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'vmsnapshot')
return snapshot
self.module.fail_json(msg="snapshot not found, could not revert VM")
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True, aliases=['display_name']),
vm = dict(required=True),
description = dict(default=None),
zone = dict(default=None),
snapshot_memory = dict(type='bool', default=False),
state = dict(choices=['present', 'absent', 'revert'], default='present'),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
['icmp_type', 'icmp_code'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
supports_check_mode=True
)
try:
acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module)
state = module.params.get('state')
if state in ['revert']:
snapshot = acs_vmsnapshot.revert_vm_to_snapshot()
elif state in ['absent']:
snapshot = acs_vmsnapshot.remove_snapshot()
else:
snapshot = acs_vmsnapshot.create_snapshot()
result = acs_vmsnapshot.get_result(snapshot)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
open-homeautomation/home-assistant
|
refs/heads/dev
|
homeassistant/components/http/ban.py
|
2
|
"""Ban logic for HTTP component."""
import asyncio
from collections import defaultdict
from datetime import datetime
from ipaddress import ip_address
import logging
from aiohttp.web_exceptions import HTTPForbidden, HTTPUnauthorized
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.config import load_yaml_config_file
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.util.yaml import dump
from .const import (
KEY_BANS_ENABLED, KEY_BANNED_IPS, KEY_LOGIN_THRESHOLD,
KEY_FAILED_LOGIN_ATTEMPTS)
from .util import get_real_ip
NOTIFICATION_ID_BAN = 'ip-ban'
NOTIFICATION_ID_LOGIN = 'http-login'
IP_BANS_FILE = 'ip_bans.yaml'
ATTR_BANNED_AT = "banned_at"
SCHEMA_IP_BAN_ENTRY = vol.Schema({
vol.Optional('banned_at'): vol.Any(None, cv.datetime)
})
_LOGGER = logging.getLogger(__name__)
@asyncio.coroutine
def ban_middleware(app, handler):
"""IP Ban middleware."""
if not app[KEY_BANS_ENABLED]:
return handler
if KEY_BANNED_IPS not in app:
hass = app['hass']
app[KEY_BANNED_IPS] = yield from hass.loop.run_in_executor(
None, load_ip_bans_config, hass.config.path(IP_BANS_FILE))
@asyncio.coroutine
def ban_middleware_handler(request):
"""Verify if IP is not banned."""
ip_address_ = get_real_ip(request)
is_banned = any(ip_ban.ip_address == ip_address_
for ip_ban in request.app[KEY_BANNED_IPS])
if is_banned:
raise HTTPForbidden()
try:
return (yield from handler(request))
except HTTPUnauthorized:
yield from process_wrong_login(request)
raise
return ban_middleware_handler
@asyncio.coroutine
def process_wrong_login(request):
"""Process a wrong login attempt."""
remote_addr = get_real_ip(request)
msg = ('Login attempt or request with invalid authentication '
'from {}'.format(remote_addr))
_LOGGER.warning(msg)
persistent_notification.async_create(
request.app['hass'], msg, 'Login attempt failed',
NOTIFICATION_ID_LOGIN)
if (not request.app[KEY_BANS_ENABLED] or
request.app[KEY_LOGIN_THRESHOLD] < 1):
return
if KEY_FAILED_LOGIN_ATTEMPTS not in request.app:
request.app[KEY_FAILED_LOGIN_ATTEMPTS] = defaultdict(int)
request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] += 1
if (request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] >
request.app[KEY_LOGIN_THRESHOLD]):
new_ban = IpBan(remote_addr)
request.app[KEY_BANNED_IPS].append(new_ban)
hass = request.app['hass']
yield from hass.loop.run_in_executor(
None, update_ip_bans_config, hass.config.path(IP_BANS_FILE),
new_ban)
_LOGGER.warning('Banned IP %s for too many login attempts',
remote_addr)
persistent_notification.async_create(
hass,
'Too many login attempts from {}'.format(remote_addr),
'Banning IP address', NOTIFICATION_ID_BAN)
class IpBan(object):
"""Represents banned IP address."""
def __init__(self, ip_ban: str, banned_at: datetime=None) -> None:
"""Initializing Ip Ban object."""
self.ip_address = ip_address(ip_ban)
self.banned_at = banned_at or datetime.utcnow()
def load_ip_bans_config(path: str):
"""Loading list of banned IPs from config file."""
ip_list = []
try:
list_ = load_yaml_config_file(path)
except FileNotFoundError:
return []
except HomeAssistantError as err:
_LOGGER.error('Unable to load %s: %s', path, str(err))
return []
for ip_ban, ip_info in list_.items():
try:
ip_info = SCHEMA_IP_BAN_ENTRY(ip_info)
ip_list.append(IpBan(ip_ban, ip_info['banned_at']))
except vol.Invalid as err:
_LOGGER.error('Failed to load IP ban %s: %s', ip_info, err)
continue
return ip_list
def update_ip_bans_config(path: str, ip_ban: IpBan):
"""Update config file with new banned IP address."""
with open(path, 'a') as out:
ip_ = {str(ip_ban.ip_address): {
ATTR_BANNED_AT: ip_ban.banned_at.strftime("%Y-%m-%dT%H:%M:%S")
}}
out.write('\n')
out.write(dump(ip_))
|
jedie/bootstrap_env
|
refs/heads/master
|
bootstrap_env/boot_source/{{cookiecutter.project_name}}/{{cookiecutter.bootstrap_filename}}.py
|
1
|
#!/usr/bin/python3
"""
{{cookiecutter.project_name}} bootstrap
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A interactive shell for booting the '{{cookiecutter.project_name}}' project.
Note:
- This file is "self contained".
- It used **only** stuff from Python lib.
- So it's "run able" on a bare python 3 installation
- On debian / ubuntu the 'python3-venv' package is needed!
usage, e.g.:
$ wget {{cookiecutter.raw_url}}/{{cookiecutter.package_name}}/boot_{{cookiecutter.package_name}}.py
$ python3 boot_{{cookiecutter.package_name}}.py
{{cookiecutter.bootstrap_filename}}> boot ~/{{cookiecutter.project_name}}-env
NOTE:
* This file is generated via cookiecutter!
* Don't edit it directly!
* The source file can be found here:
https://github.com/jedie/bootstrap_env/blob/master/bootstrap_env/boot_source/
* Create issues about this file here:
https://github.com/jedie/bootstrap_env/issues
* Pull requests are welcome ;)
:created: 11.03.2018 by Jens Diemer, www.jensdiemer.de
:copyleft: 2018-2019 by the bootstrap_env team, see AUTHORS for more details.
:license: GNU General Public License v3 or later (GPLv3+), see LICENSE for more details.
"""
import cmd
import logging
import os
import pathlib
import subprocess
import sys
import time
import traceback
from pathlib import Path
if sys.version_info < (3, 5):
print("\nERROR: Python 3.5 or greater is required!")
print("(Current Python Verison is %s)\n" % sys.version.split(" ",1)[0])
sys.exit(101)
try:
import venv
except ImportError as err:
# e.g.: debian / ubuntu doesn't have venv installed, isn't it?!?
print("\nERROR: 'venv' not available: %s (Maybe 'python3-venv' package not installed?!?)" % err)
try:
import ensurepip
except ImportError as err:
# e.g.: debian / ubuntu doesn't have venv installed, isn't it?!?
print("\nERROR: 'ensurepip' not available: %s (Maybe 'python3-venv' package not installed?!?)" % err)
__version__ = "{{cookiecutter._version}}" # Version from used 'bootstrap_env' to generate this file.
log = logging.getLogger(__name__)
PACKAGE_NAME="{{cookiecutter.package_name}}" # PyPi package name
# admin shell console script entry point name ('setup.py
# (used to call 'upgrade_requirements' after virtualenv creation)
# It's the 'scripts' keyword argument in project 'setup.py'
# see:
# https://python-packaging.readthedocs.io/en/latest/command-line-scripts.html#the-scripts-keyword-argument
#
ADMIN_FILE_NAME="{{cookiecutter.package_name}}_admin.py" # File under .../<project>/foobar_admin.py
# Note:
# on 'master' branch: '--pre' flag must not be set: So the last release on PyPi will be installed.
# on 'develop' branch: set the '--pre' flag and publish 'preview' versions on PyPi.
#
DEVELOPER_INSTALL=["-e", "{{cookiecutter.editable_url}}#egg=%s" % PACKAGE_NAME]
NORMAL_INSTALL=[
{%- if cookiecutter.use_pre_release == "y" %}
"--pre", # https://pip.pypa.io/en/stable/reference/pip_install/#pre-release-versions
{%- endif %}
PACKAGE_NAME
]
SELF_FILE_PATH=Path(__file__).resolve() # .../src/bootstrap-env/bootstrap_env/boot_bootstrap_env.py
ROOT_PATH=Path(SELF_FILE_PATH, "..", "..").resolve() # .../src/bootstrap_env/
OWN_FILE_NAME=SELF_FILE_PATH.name # boot_bootstrap_env.py
# print("SELF_FILE_PATH: %s" % SELF_FILE_PATH)
# print("ROOT_PATH: %s" % ROOT_PATH)
# print("OWN_FILE_NAME: %s" % OWN_FILE_NAME)
def in_virtualenv():
# Maybe this is not the best way?!?
return "VIRTUAL_ENV" in os.environ
if in_virtualenv():
print("Activated virtualenv detected: %r (%s)" % (sys.prefix, sys.executable))
else:
print("We are not in a virtualenv, ok.")
SUBPROCESS_TIMEOUT=60 # default timeout for subprocess calls
class Colorizer:
"""
Borrowed from Django:
https://github.com/django/django/blob/master/django/utils/termcolors.py
>>> c = Colorizer()
>>> c._supports_colors()
True
>>> c.color_support = True
>>> c.colorize('no color')
'no color'
>>> c.colorize('bold', opts=("bold",))
'\\x1b[1mbold\\x1b[0m'
>>> c.colorize("colors!", foreground="red", background="blue", opts=("bold", "blink"))
'\\x1b[31;44;1;5mcolors!\\x1b[0m'
"""
def __init__(self, stdout=sys.stdout, stderr=sys.stderr):
self._stdout = stdout
self._stderr = stderr
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
self._foreground_colors = dict([(color_names[x], '3%s' % x) for x in range(8)])
self._background_colors = dict([(color_names[x], '4%s' % x) for x in range(8)])
self._opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
self.color_support = self._supports_colors()
def _supports_colors(self):
if sys.platform in ('win32', 'Pocket PC'):
return False
# isatty is not always implemented!
if hasattr(self._stdout, 'isatty') and self._stdout.isatty():
return True
else:
return False
def colorize(self, text, foreground=None, background=None, opts=()):
"""
Returns your text, enclosed in ANSI graphics codes.
"""
if not self.color_support:
return text
code_list = []
if foreground:
code_list.append(self._foreground_colors[foreground])
if background:
code_list.append(self._background_colors[background])
for option in opts:
code_list.append(self._opt_dict[option])
if not code_list:
return text
return "\x1b[%sm%s\x1b[0m" % (';'.join(code_list), text)
def _out_err(self, func, *args, flush=False, **kwargs):
text = self.colorize(*args, **kwargs)
func.write("%s\n" % text)
if flush:
func.flush()
def out(self, *args, flush=False, **kwargs):
""" colorize and print to stdout """
self._out_err(self._stdout, *args, flush=flush, **kwargs)
def err(self, *args, flush=False, **kwargs):
""" colorize and print to stderr """
self._out_err(self._stderr, *args, flush=flush, **kwargs)
def demo(self):
for background_color in sorted(self._background_colors.keys()):
line = ["%10s:" % background_color]
for foreground_color in sorted(self._foreground_colors.keys()):
line.append(
self.colorize(" %s " % foreground_color,
foreground=foreground_color, background=background_color
)
)
for opt in sorted(self._opt_dict.keys()):
line.append(
self.colorize(" %s " % opt,
background=background_color, opts=(opt,)
)
)
self.out("".join(line), background=background_color)
colorizer = Colorizer()
# colorizer.demo()
class VerboseSubprocess:
"""
Verbose Subprocess
"""
def __init__(self, *popenargs, env_updates=None, timeout=SUBPROCESS_TIMEOUT, universal_newlines=True, stderr=subprocess.STDOUT, **kwargs):
"""
:param popenargs: 'args' for subprocess.Popen()
:param env_updates: dict to overwrite os.environ.
:param timeout: pass to subprocess.Popen()
:param kwargs: pass to subprocess.Popen()
"""
# subprocess doesn't accept Path() objects
for arg in popenargs:
assert not isinstance(arg, pathlib.Path), "Arg %r not accepted!" % arg
for key, value in kwargs.items():
assert not isinstance(value, pathlib.Path), "Keyword argument %r: %r not accepted!" % (key, value)
self.popenargs = popenargs
self.kwargs = kwargs
self.kwargs["timeout"] = timeout
self.kwargs["universal_newlines"] = universal_newlines
self.kwargs["stderr"] = stderr
self.kwargs["bufsize"] = -1
self.args_str = " ".join([str(x) for x in self.popenargs])
env = self.kwargs.get("env", os.environ.copy())
env["PYTHONUNBUFFERED"]="1" # If a python script called ;)
self.env_updates = env_updates
if self.env_updates is not None:
env.update(env_updates)
self.kwargs["env"] = env
def print_call_info(self):
print("")
print("_"*79)
kwargs_txt=[]
for key, value in self.kwargs.items():
if key == "env":
continue
key = colorizer.colorize(key, foreground="magenta", opts=("bold",))
value = colorizer.colorize(value, foreground="green", opts=("bold",))
kwargs_txt.append("%s=%s" % (key, value))
txt = "Call: '{args}' with: {kwargs}".format(
args=colorizer.colorize(self.args_str, foreground="cyan", opts=("bold",)),
kwargs=", ".join(kwargs_txt)
)
if self.env_updates is not None:
txt += colorizer.colorize(" env:", foreground="magenta", opts=("bold",))
txt += colorizer.colorize(repr(self.env_updates), opts=("bold",))
print(txt)
print("", flush=True)
def print_exit_code(self, exit_code):
txt = "\nExit code %r from %r\n" % (exit_code, self.args_str)
if exit_code:
colorizer.err(txt, foreground="red", flush=True)
else:
colorizer.out(txt, foreground="green", flush=True)
def verbose_call(self, check=True):
"""
run subprocess.call()
:param check: if True and subprocess exit_code !=0: sys.exit(exit_code) after run.
:return: process exit code
"""
self.print_call_info()
try:
exit_code = subprocess.call(self.popenargs, **self.kwargs)
except KeyboardInterrupt:
print("\nExit %r\n" % self.args_str, flush=True)
exit_code=None # good idea?!?
sys.stderr.flush()
self.print_exit_code(exit_code)
if check and exit_code:
sys.exit(exit_code)
return exit_code
def verbose_output(self, check=True):
"""
run subprocess.check_output()
:param check: if True and subprocess exit_code !=0: sys.exit(exit_code) after run.
:return: process output
"""
self.print_call_info()
try:
return subprocess.check_output(self.popenargs, **self.kwargs)
except subprocess.CalledProcessError as err:
print("\n%s" % err)
if check:
sys.exit(err.returncode)
raise
def iter_output(self, check=True):
"""
A subprocess with tee ;)
"""
self.print_call_info()
orig_timeout = self.kwargs.pop("timeout")
self.kwargs.update({
"stdout":subprocess.PIPE,
"stderr":subprocess.STDOUT,
})
proc=subprocess.Popen(self.popenargs, **self.kwargs)
end_time = time.time() + orig_timeout
for line in iter(proc.stdout.readline, ''):
yield line
if time.time()>end_time:
raise subprocess.TimeoutExpired(self.popenargs, orig_timeout)
if check and proc.returncode:
sys.exit(proc.returncode)
def print_output(self, check=True):
for line in self.iter_output(check=check):
print(line, flush=True)
def get_pip_file_name():
if sys.platform == 'win32':
return "pip3.exe"
else:
return "pip3"
class DisplayErrors:
"""
Decorator to print traceback on exceptions.
Used in e.g.: Cmd class
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
self.func(*args, **kwargs)
except Exception as err:
traceback.print_exc(file=sys.stderr)
return "%s: %s" % (err.__class__.__name__, err)
class Cmd2(cmd.Cmd):
"""
Enhanced version of 'Cmd' class:
- command alias
- methods can be called directly from commandline: e.g.: ./foobar.py --help
- Display
"""
version = __version__
command_alias = { # used in self.precmd()
"q": "quit", "EOF": "quit", "exit": "quit",
"": "help", # Just hit ENTER -> help
"--help": "help", "-h": "help", "-?": "help",
}
unknown_command="*** Unknown command: %r ***\n"
# Will be append to 'doc_leader' in self.do_help():
complete_hint="\nUse <{key}> to command completion.\n"
missing_complete="\n(Sorry, no command completion available.)\n" # if 'readline' not available
def __init__(self, *args, self_filename=None, **kwargs):
super().__init__(*args, **kwargs)
self.self_filename = self.get_self_filename(self_filename=self_filename)
self.intro = self.get_intro()
self.prompt = self.get_prompt()
self.doc_header = self.get_doc_header()
# e.g.: $ bootstrap_env_admin.py boot /tmp/bootstrap_env-env -> run self.do_boot("/tmp/bootstrap_env-env") on startup
args = sys.argv[1:]
if args:
self.cmdqueue = [" ".join(args)]
def get_self_filename(self, self_filename):
if self_filename is None:
self_filename = SELF_FILE_PATH.name # Path(__file__).name ;)
self_filename = self_filename.split(".")[0] # remove file extension
return self_filename
def get_intro(self):
intro = "{filename} shell v{version}".format(filename=self.self_filename, version=self.version)
intro = "\n{intro}\nType help or ? to list commands.\n".format(
intro=colorizer.colorize(intro, foreground="blue", background="black", opts=("bold",))
)
return intro
def get_prompt(self):
prompt = "%s." % colorizer.colorize(os.uname().nodename, foreground="green")
prompt += colorizer.colorize(self.self_filename, foreground="cyan")
prompt += colorizer.colorize("> ", opts=("bold",))
return prompt
def get_doc_header(self):
doc_header = "Available commands (type help <topic>):\n"
doc_leader = (
"\nHint: All commands can be called directly from commandline.\n" "e.g.: $ ./{filename} help\n"
).format(filename=self.self_filename)
return doc_header
def default(self, line):
""" Called on an input line when the command prefix is not recognized. """
colorizer.err(self.unknown_command % line, foreground="red")
@DisplayErrors
def _complete_list(self, items, text, line, begidx, endidx):
if text:
return [x for x in items if x.startswith(text)]
else:
return items
@DisplayErrors
def _complete_path(self, text, line, begidx, endidx):
"""
complete a command argument with a existing path
usage e.g.:
class FooCmd(Cmd2):
def complete_foobar(self, text, line, begidx, endidx):
return self._complete_path(text, line, begidx, endidx)
def do_foobar(self, path): # 'path' is type string!
print("path:", path)
"""
try:
destination = line.split(" ", 1)[1]
except IndexError:
destination = "."
if destination=="~":
return [os.sep]
destination = Path(destination).expanduser().resolve()
if not destination.is_dir():
destination = destination.parent.resolve()
if destination.is_dir():
complete_list = [x.stem + os.sep for x in destination.iterdir() if x.is_dir()]
if text:
if text in complete_list:
return [text + os.sep]
complete_list = [x for x in complete_list if x.startswith(text)]
else:
complete_list = []
return complete_list
def get_doc_line(self, command):
"""
return the first line of the DocString.
If no DocString: return None
"""
assert command.startswith("do_")
doc=getattr(self, command, None).__doc__
if doc is not None:
doc = doc.strip().split("\n",1)[0]
return doc
_complete_hint_added=False
def do_help(self, arg):
"""
List available commands with "help" or detailed help with "help cmd".
"""
if arg:
# Help for one command
return super().do_help(arg)
# List available commands:
self.stdout.write("%s\n" % self.doc_leader)
self.stdout.write("%s\n" % self.doc_header)
commands = [name for name in self.get_names() if name.startswith("do_")]
commands.sort()
max_length = max([len(name) for name in commands])
for command in commands:
doc_line = self.get_doc_line(command) or "(Undocumented command)"
command = command[3:] # remove "do_"
command = "{cmd:{width}}".format(cmd=command, width=max_length)
command = colorizer.colorize(command, opts=("bold",))
self.stdout.write(" {cmd} - {doc}\n".format(
cmd=command,
doc=doc_line
))
self.stdout.write("\n")
def do_quit(self, arg):
"Exit this interactiv shell"
print("\n\nbye")
return True
def precmd(self, line):
"""
1. Apply alias list
2. print first DocString line (if exists), before start the command
"""
try:
line=self.command_alias[line]
except KeyError:
pass
cmd = line.split(" ",1)[0]
doc_line = self.get_doc_line("do_%s" % cmd)
if doc_line:
colorizer.out("\n\n *** %s ***\n" % doc_line, background="cyan", opts=("bold",))
return line
def postcmd(self, stop, line):
# stop if we are called with commandline arguments
if len(sys.argv)>1:
stop = True
return stop
class EnvBuilder(venv.EnvBuilder):
"""
* Create new virtualenv
* install and update pip
* install "{{cookiecutter.package_name}}"
* call "{{cookiecutter.package_name}}_admin.py update_env" to install all requirements
"""
verbose = True
def __init__(self, requirements):
super().__init__(with_pip=True)
self.requirements = requirements
def create(self, env_dir):
print(" * Create new {{cookiecutter.project_name}} virtualenv here: %r" % env_dir)
if "VIRTUAL_ENV" in os.environ:
print("\nERROR: Don't call me in a activated virtualenv!")
print("You are in VIRTUAL_ENV: %r" % os.environ["VIRTUAL_ENV"])
return
return super().create(env_dir)
def ensure_directories(self, env_dir):
print(" * Create the directories for the environment.")
return super().ensure_directories(env_dir)
def create_configuration(self, context):
print(" * Create 'pyvenv.cfg' configuration file.")
return super().create_configuration(context)
def setup_python(self, context):
print(" * Set up a Python executable in the environment.")
return super().setup_python(context)
def call_new_python(self, context, *args, check=True, **kwargs):
"""
Do the same as bin/activate so that <args> runs in a "activated" virtualenv.
"""
kwargs.update({
"env_updates": {
"VIRTUAL_ENV": context.env_dir,
"PATH": "%s:%s" % (context.bin_path, os.environ["PATH"]),
}
})
VerboseSubprocess(*args, **kwargs).verbose_call(
check=check # sys.exit(return_code) if return_code != 0
)
def _setup_pip(self, context):
print(" * Install pip in a virtual environment.")
# install pip with ensurepip:
super()._setup_pip(context)
print(" * Upgrades pip in a virtual environment.")
# Upgrade pip first (e.g.: running python 3.5)
context.pip_bin=Path(context.bin_path, get_pip_file_name()) # e.g.: .../bin/pip3
assert context.pip_bin.is_file(), "Pip not found here: %s" % context.pip_bin
if sys.platform == 'win32':
# Note: On windows it will crash with a PermissionError: [WinError 32]
# because pip can't replace himself while running ;)
# Work-a-round is "python -m pip install --upgrade pip"
# see also: https://github.com/pypa/pip/issues/3804
self.call_new_python(
context,
context.env_exe, "-m", "pip", "install", "--upgrade", "pip",
check=False # Don't exit on errors
)
else:
self.call_new_python(
context,
str(context.pip_bin), "install", "--upgrade", "pip",
check=False # Don't exit on errors
)
def setup_scripts(self, context):
print(" * Set up scripts into the created environment.")
return super().setup_scripts(context)
def post_setup(self, context):
"""
Set up any packages which need to be pre-installed into the
virtual environment being created.
:param context: The information for the virtual environment
creation request being processed.
"""
print(" * post-setup modification")
# Install "{{cookiecutter.package_name}}"
# in normal mode as package from PyPi
# in dev. mode as editable from github
self.call_new_python(
context,
str(context.pip_bin), "install",
# "--verbose",
*self.requirements
)
# Check if ".../bin/{{cookiecutter.package_name}}_admin.py" exists
bootstrap_env_admin_path = Path(context.bin_path, ADMIN_FILE_NAME)
if not bootstrap_env_admin_path.is_file():
print("ERROR: admin script not found here: '%s'" % bootstrap_env_admin_path)
VerboseSubprocess("ls", "-la", str(context.bin_path)).verbose_call()
sys.exit(-1)
# Install all requirements by call: "{{cookiecutter.package_name}}_admin.py update_env"
self.call_new_python(
context,
context.env_exe,
str(bootstrap_env_admin_path),
"update_env",
timeout=4*60
) # extended timeout for slow Travis ;)
class BootBootstrapEnvShell(Cmd2):
"""
The bootstrap shell to start the virtualenv creation.
It's implement only two commands:
* boot
* boot_developer
"""
def _resolve_path(self, path):
return Path(path).expanduser().resolve()
def complete_boot(self, text, line, begidx, endidx):
# print("text: %r" % text)
# print("line: %r" % line)
return self._complete_path(text, line, begidx, endidx)
def _parse_requirements(self, requirement_string):
requirements = []
for line in requirement_string.splitlines():
line = line.strip()
if line and not line.startswith("#"):
line = line.split("# ", 1)[0] # Remove pip-compile comments e.g.: "... # via foo"
line = line.rstrip()
if line.startswith("-e"): # split editables
requirements += line.split(" ")
else:
requirements.append(line)
return requirements
def _boot(self, destination, requirements):
"""
Create a {{cookiecutter.project_name}} virtualenv and install requirements.
"""
if not destination:
self.stdout.write("\nERROR: No destination path given!\n")
self.stdout.write("\n(Hint call 'boot' with a path as argument, e.g.: '~/foo/bar')\n\n")
sys.exit(1)
destination = Path(destination).expanduser().resolve()
if destination.exists():
self.stdout.write("\nERROR: Path '%s' already exists!\n\n" % destination)
sys.exit(1)
builder = EnvBuilder(requirements)
builder.create(str(destination))
self.stdout.write("\n")
if not destination.is_dir():
self.stdout.write("ERROR: Creating virtualenv!\n")
sys.exit(1)
else:
self.stdout.write("virtualenv created at: '%s'\n" % destination)
def do_boot(self, destination):
"""
Bootstrap {{cookiecutter.project_name}} virtualenv in "normal" mode.
usage:
{{cookiecutter.bootstrap_filename}}> boot [path]
Create a {{cookiecutter.project_name}} virtualenv in the given [path].
Install packages via PyPi and read-only sources from github.
The destination path must not exist yet!
(used the requirements/normal_installation.txt)
"""
self._boot(destination, requirements=NORMAL_INSTALL)
complete_boot = complete_boot
def do_boot_developer(self, destination):
"""
Bootstrap {{cookiecutter.project_name}} virtualenv in "developer" mode.
All own projects installed as editables via github HTTPS (readonly)
**Should be only used for developing/contributing. All others: Use normal 'boot' ;) **
usage:
{{cookiecutter.bootstrap_filename}}> boot_developer [path]
Create a {{cookiecutter.project_name}} virtualenv in the given [path].
Install packages via PyPi and read-only sources from github.
The destination path must not exist yet!
(used the requirements/developer_installation.txt)
"""
self._boot(destination, requirements=DEVELOPER_INSTALL)
complete_boot_developer = complete_boot
def main():
"""
Start the shell.
This may also used in setup.py, e.g.:
entry_points={'console_scripts': [
"{{cookiecutter.project_name}}_boot = {{cookiecutter.project_name}}.{{cookiecutter.project_name}}_boot:main",
]},
"""
BootBootstrapEnvShell().cmdloop()
if __name__ == '__main__':
main()
|
pentestfail/TA-Github
|
refs/heads/master
|
bin/ta_github/solnlib/packages/requests/packages/urllib3/contrib/ntlmpool.py
|
199
|
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
neogi/machine-learning
|
refs/heads/master
|
classification/logistic_regression/logistic_regression_mle_sg.py
|
1
|
# Imports
import string
import numpy as np
# Function to remove punctuations
def remove_punctuation(text):
"""
Purpose: Remove all punctuations from a line of text
Input : A line of text
Output : Text without punctuations
"""
return string.translate(text, None, string.punctuation)
# Function to compute score
def compute_score(feature_matrix, coefficients):
"""
Purpose: Compute the dot product of features and their coefficients
Input : Feature matrix and a coefficient vector
Output : Dot product of feature matrix and coefficient vector
"""
return feature_matrix.dot(coefficients)
# Function to compute probability
def compute_probability(score):
"""
Purpose: Compute sigmoid response (probabilities) from scores
Input : A vector of scores
Output : A vector of probabilities
"""
return 1.0/(1 + np.exp(-score))
# Function to compute feature derivative
def compute_feature_derivative(errors, feature):
"""
Purpose: Compute derivative of a feature wrt coefficient
Input : Error between true output and predicted output values, feature,
coefficient, if the feature is constant or not
Output : Derivative of the feature wrt coefficient
"""
return feature.T.dot(errors)
# Function to compute log likelihood
def compute_log_likelihood(feature_matrix, sentiment, coefficients):
"""
Purpose: Compute Log-Likelihood
Input : Feature matrix, coefficients, true output values
Output : Log-Likelihood
"""
indicator = (sentiment == +1)
scores = compute_score(feature_matrix, coefficients)
log_likelihood = np.sum((indicator - 1) * scores - np.log(1.0 + np.exp(-scores)))
return log_likelihood
# Function to compute average log likelihood
def compute_avg_log_likelihood(feature_matrix, sentiment, coefficients):
"""
Purpose: Compute average Log-Likelihood
Input : Feature matrix, coefficients, true output values
Output : Log-Likelihood
"""
indicator = (sentiment == +1)
scores = compute_score(feature_matrix, coefficients)
logexp = np.log(1.0 + np.exp(-scores))
mask = np.isinf(logexp)
logexp[mask] = -scores[mask]
log_likelihood = np.sum((indicator - 1) * scores - logexp)/len(feature_matrix)
return log_likelihood
# Function to prepare array from SFrame
def get_data(data_frame, features, label):
"""
Purpose: Extract features and prepare a feature matrix
Set the first feature x0 = 1
Input : Original Dataframe, list of feature variables, output variable
Output : Feature matrix, label array
"""
data_frame['constant'] = 1.0
features = ['constant'] + features
features_matrix = data_frame[features].to_numpy()
if label != None:
label_array = data_frame[label].to_numpy()
else:
label_array = []
return(features_matrix, label_array)
# Funtion to perform logistic regression
def logistic_regression(feature_matrix, sentiment, initial_coefficients, step_size, max_iter):
"""
Purpose: Perform logistic regression
Input : Feature matrix, true output values, initial estimate of coefficients, step size
maximum number of iterations
Output : Estimated coefficient vector
"""
coefficients = np.array(initial_coefficients)
for itr in xrange(max_iter):
predictions = compute_probability(compute_score(feature_matrix, coefficients))
indicator = (sentiment == +1)*1.0
errors = indicator - predictions
for j in xrange(len(coefficients)):
derivative = compute_feature_derivative(errors, feature_matrix[:, j])
coefficients[j] = coefficients[j] + step_size * derivative
if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \
or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:
lp = compute_log_likelihood(feature_matrix, sentiment, coefficients)
print 'iteration %*d: log likelihood of observed labels = %.8f' % \
(int(np.ceil(np.log10(max_iter))), itr, lp)
return coefficients
# Function to perform logistic regression (stochastic gradient)
def logistic_regression_SG(feature_matrix, sentiment, initial_coefficients, step_size, batch_size, max_iter):
"""
Purpose: Perform logistic regression using stochastic gradient ascent
Input : Feature matrix, true output values, initial estimate of coefficients, step size
batch size, maximum number of iterations
Output : Estimated coefficient vector, log likelihood
"""
log_likelihood_all = []
coefficients = np.array(initial_coefficients)
np.random.seed(seed=1)
permutation = np.random.permutation(len(feature_matrix))
feature_matrix = feature_matrix[permutation,:]
sentiment = sentiment[permutation]
i = 0
for itr in xrange(max_iter):
predictions = compute_probability(compute_score(feature_matrix[i:i+batch_size,:], coefficients))
indicator = (sentiment[i:i+batch_size] == +1)*1.0
errors = indicator - predictions
for j in xrange(len(coefficients)):
derivative = compute_feature_derivative(errors, feature_matrix[i:i+batch_size,j])
coefficients[j] = coefficients[j] + step_size * derivative/batch_size
lp = compute_avg_log_likelihood(feature_matrix[i:i+batch_size,:], sentiment[i:i+batch_size], coefficients)
log_likelihood_all.append(lp)
if itr <= 15 or (itr <= 1000 and itr % 100 == 0) or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0 or itr == max_iter-1:
data_size = len(feature_matrix)
print 'Iteration %*d: Average log likelihood (of data points [%0*d:%0*d]) = %.8f' % \
(int(np.ceil(np.log10(max_iter))), itr, \
int(np.ceil(np.log10(data_size))), i, \
int(np.ceil(np.log10(data_size))), i+batch_size, lp)
i += batch_size
if i+batch_size > len(feature_matrix):
permutation = np.random.permutation(len(feature_matrix))
feature_matrix = feature_matrix[permutation,:]
sentiment = sentiment[permutation]
i = 0
return coefficients, log_likelihood_all
# Function to predict classes
def predict(feature_matrix, coefficients):
"""
Purpose: Predict output values from feature matrix and estimated coefficients
Input : Feature matrix, coefficient vector
Output : Predicted output vector
"""
scores = compute_score(feature_matrix, coefficients)
classes = (scores > 0.0)*2
classes = classes - 1
return classes
|
Soya93/Extract-Refactoring
|
refs/heads/master
|
python/testData/folding/longStringsFolding.py
|
40
|
print(<fold text='"..."'>"This is "
"really "
"long string. "
"It should be foldable"</fold>)
|
patilsangram/erpnext
|
refs/heads/develop
|
erpnext/patches/v4_0/fix_employee_user_id.py
|
119
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import get_fullname
def execute():
for user_id in frappe.db.sql_list("""select distinct user_id from `tabEmployee`
where ifnull(user_id, '')!=''
group by user_id having count(name) > 1"""):
fullname = get_fullname(user_id)
employee = frappe.db.get_value("Employee", {"employee_name": fullname, "user_id": user_id})
if employee:
frappe.db.sql("""update `tabEmployee` set user_id=null
where user_id=%s and name!=%s""", (user_id, employee))
else:
count = frappe.db.sql("""select count(*) from `tabEmployee` where user_id=%s""", user_id)[0][0]
frappe.db.sql("""update `tabEmployee` set user_id=null
where user_id=%s limit %s""", (user_id, count - 1))
|
arantebillywilson/python-snippets
|
refs/heads/master
|
microblog/flask/lib/python3.5/site-packages/whoosh/qparser/plugins.py
|
52
|
# Copyright 2011 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import copy
from whoosh import query
from whoosh.compat import u
from whoosh.compat import iteritems, xrange
from whoosh.qparser import syntax
from whoosh.qparser.common import attach
from whoosh.qparser.taggers import RegexTagger, FnTagger
from whoosh.util.text import rcompile
class Plugin(object):
"""Base class for parser plugins.
"""
def taggers(self, parser):
"""Should return a list of ``(Tagger, priority)`` tuples to add to the
syntax the parser understands. Lower priorities run first.
"""
return ()
def filters(self, parser):
"""Should return a list of ``(filter_function, priority)`` tuples to
add to parser. Lower priority numbers run first.
Filter functions will be called with ``(parser, groupnode)`` and should
return a group node.
"""
return ()
class TaggingPlugin(RegexTagger):
"""A plugin that also acts as a Tagger, to avoid having an extra Tagger
class for simple cases.
A TaggingPlugin object should have a ``priority`` attribute and either a
``nodetype`` attribute or a ``create()`` method. If the subclass doesn't
override ``create()``, the base class will call ``self.nodetype`` with the
Match object's named groups as keyword arguments.
"""
priority = 0
def __init__(self, expr=None):
self.expr = rcompile(expr or self.expr)
def taggers(self, parser):
return [(self, self.priority)]
def filters(self, parser):
return ()
def create(self, parser, match):
# Groupdict keys can be unicode sometimes apparently? Convert them to
# str for use as keyword arguments. This should be Py3-safe.
kwargs = dict((str(k), v) for k, v in iteritems(match.groupdict()))
return self.nodetype(**kwargs)
class WhitespacePlugin(TaggingPlugin):
"""Tags whitespace and removes it at priority 500. Depending on whether
your plugin's filter wants to see where whitespace was in the original
query, it should run with priority lower than 500 (before removal of
whitespace) or higher than 500 (after removal of whitespace).
"""
nodetype = syntax.Whitespace
priority = 100
def __init__(self, expr=r"\s+"):
TaggingPlugin.__init__(self, expr)
def filters(self, parser):
return [(self.remove_whitespace, 500)]
def remove_whitespace(self, parser, group):
newgroup = group.empty_copy()
for node in group:
if isinstance(node, syntax.GroupNode):
newgroup.append(self.remove_whitespace(parser, node))
elif not node.is_ws():
newgroup.append(node)
return newgroup
class SingleQuotePlugin(TaggingPlugin):
"""Adds the ability to specify single "terms" containing spaces by
enclosing them in single quotes.
"""
expr = r"(^|(?<=\W))'(?P<text>.*?)'(?=\s|\]|[)}]|$)"
nodetype = syntax.WordNode
class PrefixPlugin(TaggingPlugin):
"""Adds the ability to specify prefix queries by ending a term with an
asterisk.
This plugin is useful if you want the user to be able to create prefix but
not wildcard queries (for performance reasons). If you are including the
wildcard plugin, you should not include this plugin as well.
>>> qp = qparser.QueryParser("content", myschema)
>>> qp.remove_plugin_class(qparser.WildcardPlugin)
>>> qp.add_plugin(qparser.PrefixPlugin())
>>> q = qp.parse("pre*")
"""
class PrefixNode(syntax.TextNode):
qclass = query.Prefix
def r(self):
return "%r*" % self.text
expr = "(?P<text>[^ \t\r\n*]+)[*](?= |$|\\))"
nodetype = PrefixNode
class WildcardPlugin(TaggingPlugin):
# \u055E = Armenian question mark
# \u061F = Arabic question mark
# \u1367 = Ethiopic question mark
qmarks = u("?\u055E\u061F\u1367")
expr = "(?P<text>[*%s])" % qmarks
def filters(self, parser):
# Run early, but definitely before multifield plugin
return [(self.do_wildcards, 50)]
def do_wildcards(self, parser, group):
i = 0
while i < len(group):
node = group[i]
if isinstance(node, self.WildcardNode):
if i < len(group) - 1 and group[i + 1].is_text():
nextnode = group.pop(i + 1)
node.text += nextnode.text
if i > 0 and group[i - 1].is_text():
prevnode = group.pop(i - 1)
node.text = prevnode.text + node.text
else:
i += 1
else:
if isinstance(node, syntax.GroupNode):
self.do_wildcards(parser, node)
i += 1
for i in xrange(len(group)):
node = group[i]
if isinstance(node, self.WildcardNode):
text = node.text
if len(text) > 1 and not any(qm in text for qm in self.qmarks):
if text.find("*") == len(text) - 1:
newnode = PrefixPlugin.PrefixNode(text[:-1])
newnode.startchar = node.startchar
newnode.endchar = node.endchar
group[i] = newnode
return group
class WildcardNode(syntax.TextNode):
# Note that this node inherits tokenize = False from TextNode,
# so the text in this node will not be analyzed... just passed
# straight to the query
qclass = query.Wildcard
def r(self):
return "Wild %r" % self.text
nodetype = WildcardNode
class RegexPlugin(TaggingPlugin):
"""Adds the ability to specify regular expression term queries.
The default syntax for a regular expression term is ``r"termexpr"``.
>>> qp = qparser.QueryParser("content", myschema)
>>> qp.add_plugin(qparser.RegexPlugin())
>>> q = qp.parse('foo title:r"bar+"')
"""
class RegexNode(syntax.TextNode):
qclass = query.Regex
def r(self):
return "Regex %r" % self.text
expr = 'r"(?P<text>[^"]*)"'
nodetype = RegexNode
class BoostPlugin(TaggingPlugin):
"""Adds the ability to boost clauses of the query using the circumflex.
>>> qp = qparser.QueryParser("content", myschema)
>>> q = qp.parse("hello there^2")
"""
expr = "\\^(?P<boost>[0-9]*(\\.[0-9]+)?)($|(?=[ \t\r\n)]))"
class BoostNode(syntax.SyntaxNode):
def __init__(self, original, boost):
self.original = original
self.boost = boost
def r(self):
return "^ %s" % self.boost
def create(self, parser, match):
# Override create so we can grab group 0
original = match.group(0)
try:
boost = float(match.group("boost"))
except ValueError:
# The text after the ^ wasn't a valid number, so turn it into a
# word
node = syntax.WordNode(original)
else:
node = self.BoostNode(original, boost)
return node
def filters(self, parser):
return [(self.clean_boost, 0), (self.do_boost, 510)]
def clean_boost(self, parser, group):
"""This filter finds any BoostNodes in positions where they can't boost
the previous node (e.g. at the very beginning, after whitespace, or
after another BoostNode) and turns them into WordNodes.
"""
bnode = self.BoostNode
for i, node in enumerate(group):
if isinstance(node, bnode):
if (not i or not group[i - 1].has_boost):
group[i] = syntax.to_word(node)
return group
def do_boost(self, parser, group):
"""This filter finds BoostNodes and applies the boost to the previous
node.
"""
newgroup = group.empty_copy()
for node in group:
if isinstance(node, syntax.GroupNode):
node = self.do_boost(parser, node)
elif isinstance(node, self.BoostNode):
if (newgroup and newgroup[-1].has_boost):
# Apply the BoostNode's boost to the previous node
newgroup[-1].set_boost(node.boost)
# Skip adding the BoostNode to the new group
continue
else:
node = syntax.to_word(node)
newgroup.append(node)
return newgroup
class GroupPlugin(Plugin):
"""Adds the ability to group clauses using parentheses.
"""
# Marker nodes for open and close bracket
class OpenBracket(syntax.SyntaxNode):
def r(self):
return "("
class CloseBracket(syntax.SyntaxNode):
def r(self):
return ")"
def __init__(self, openexpr="[(]", closeexpr="[)]"):
self.openexpr = openexpr
self.closeexpr = closeexpr
def taggers(self, parser):
return [(FnTagger(self.openexpr, self.OpenBracket, "openB"), 0),
(FnTagger(self.closeexpr, self.CloseBracket, "closeB"), 0)]
def filters(self, parser):
return [(self.do_groups, 0)]
def do_groups(self, parser, group):
"""This filter finds open and close bracket markers in a flat group
and uses them to organize the nodes into a hierarchy.
"""
ob, cb = self.OpenBracket, self.CloseBracket
# Group hierarchy stack
stack = [parser.group()]
for node in group:
if isinstance(node, ob):
# Open bracket: push a new level of hierarchy on the stack
stack.append(parser.group())
elif isinstance(node, cb):
# Close bracket: pop the current level of hierarchy and append
# it to the previous level
if len(stack) > 1:
last = stack.pop()
stack[-1].append(last)
else:
# Anything else: add it to the current level of hierarchy
stack[-1].append(node)
top = stack[0]
# If the parens were unbalanced (more opens than closes), just take
# whatever levels of hierarchy were left on the stack and tack them on
# the end of the top-level
if len(stack) > 1:
for ls in stack[1:]:
top.extend(ls)
if len(top) == 1 and isinstance(top[0], syntax.GroupNode):
boost = top.boost
top = top[0]
top.boost = boost
return top
class EveryPlugin(TaggingPlugin):
expr = "[*]:[*]"
priority = -1
def create(self, parser, match):
return self.EveryNode()
class EveryNode(syntax.SyntaxNode):
def r(self):
return "*:*"
def query(self, parser):
return query.Every()
class FieldsPlugin(TaggingPlugin):
"""Adds the ability to specify the field of a clause.
"""
class FieldnameTagger(RegexTagger):
def create(self, parser, match):
return syntax.FieldnameNode(match.group("text"), match.group(0))
def __init__(self, expr=r"(?P<text>\w+|[*]):", remove_unknown=True):
"""
:param expr: the regular expression to use for tagging fields.
:param remove_unknown: if True, converts field specifications for
fields that aren't in the schema into regular text.
"""
self.expr = expr
self.removeunknown = remove_unknown
def taggers(self, parser):
return [(self.FieldnameTagger(self.expr), 0)]
def filters(self, parser):
return [(self.do_fieldnames, 100)]
def do_fieldnames(self, parser, group):
"""This filter finds FieldnameNodes in the tree and applies their
fieldname to the next node.
"""
fnclass = syntax.FieldnameNode
if self.removeunknown and parser.schema:
# Look for field nodes that aren't in the schema and convert them
# to text
schema = parser.schema
newgroup = group.empty_copy()
prev_field_node = None
for node in group:
if isinstance(node, fnclass) and node.fieldname not in schema:
prev_field_node = node
continue
elif prev_field_node:
# If prev_field_node is not None, it contains a field node
# that appeared before this node but isn't in the schema,
# so we'll convert it to text here
if node.has_text:
node.text = prev_field_node.original + node.text
else:
newgroup.append(syntax.to_word(prev_field_node))
prev_field_node = None
newgroup.append(node)
if prev_field_node:
newgroup.append(syntax.to_word(prev_field_node))
group = newgroup
newgroup = group.empty_copy()
# Iterate backwards through the stream, looking for field-able objects
# with field nodes in front of them
i = len(group)
while i > 0:
i -= 1
node = group[i]
if isinstance(node, fnclass):
# If we see a fieldname node, it must not have been in front
# of something fieldable, since we would have already removed
# it (since we're iterating backwards), so convert it to text
node = syntax.to_word(node)
elif isinstance(node, syntax.GroupNode):
node = self.do_fieldnames(parser, node)
if i > 0 and not node.is_ws() and isinstance(group[i - 1],
fnclass):
node.set_fieldname(group[i - 1].fieldname, override=False)
i -= 1
newgroup.append(node)
newgroup.reverse()
return newgroup
class FuzzyTermPlugin(TaggingPlugin):
"""Adds syntax to the query parser to create "fuzzy" term queries, which
match any term within a certain "edit distance" (number of inserted,
deleted, or transposed characters) by appending a tilde (``~``) and an
optional maximum edit distance to a term. If you don't specify an explicit
maximum edit distance, the default is 1.
>>> qp = qparser.QueryParser("content", myschema)
>>> qp.add_plugin(qparser.FuzzyTermPlugin())
>>> q = qp.parse("Stephen~2 Colbert")
For example, the following query creates a :class:`whoosh.query.FuzzyTerm`
query with a maximum edit distance of 1::
bob~
The following creates a fuzzy term query with a maximum edit distance of
2::
bob~2
The maximum edit distance can only be a single digit. Note that edit
distances greater than 2 can take an extremely long time and are generally
not useful.
You can specify a prefix length using ``~n/m``. For example, to allow a
maximum edit distance of 2 and require a prefix match of 3 characters::
johannson~2/3
To specify a prefix with the default edit distance::
johannson~/3
"""
expr = rcompile("""
(?<=\\S) # Only match right after non-space
~ # Initial tilde
(?P<maxdist>[0-9])? # Optional maxdist
(/ # Optional prefix slash
(?P<prefix>[1-9][0-9]*) # prefix
)? # (end prefix group)
""", verbose=True)
class FuzzinessNode(syntax.SyntaxNode):
def __init__(self, maxdist, prefixlength, original):
self.maxdist = maxdist
self.prefixlength = prefixlength
self.original = original
def __repr__(self):
return "<~%d/%d>" % (self.maxdist, self.prefixlength)
class FuzzyTermNode(syntax.TextNode):
qclass = query.FuzzyTerm
def __init__(self, wordnode, maxdist, prefixlength):
self.fieldname = wordnode.fieldname
self.text = wordnode.text
self.boost = wordnode.boost
self.startchar = wordnode.startchar
self.endchar = wordnode.endchar
self.maxdist = maxdist
self.prefixlength = prefixlength
def r(self):
return "%r ~%d/%d" % (self.text, self.maxdist, self.prefixlength)
def query(self, parser):
# Use the superclass's query() method to create a FuzzyTerm query
# (it looks at self.qclass), just because it takes care of some
# extra checks and attributes
q = syntax.TextNode.query(self, parser)
# Set FuzzyTerm-specific attributes
q.maxdist = self.maxdist
q.prefixlength = self.prefixlength
return q
def create(self, parser, match):
mdstr = match.group("maxdist")
maxdist = int(mdstr) if mdstr else 1
pstr = match.group("prefix")
prefixlength = int(pstr) if pstr else 0
return self.FuzzinessNode(maxdist, prefixlength, match.group(0))
def filters(self, parser):
return [(self.do_fuzzyterms, 0)]
def do_fuzzyterms(self, parser, group):
newgroup = group.empty_copy()
i = 0
while i < len(group):
node = group[i]
if i < len(group) - 1 and isinstance(node, syntax.WordNode):
nextnode = group[i + 1]
if isinstance(nextnode, self.FuzzinessNode):
node = self.FuzzyTermNode(node, nextnode.maxdist,
nextnode.prefixlength)
i += 1
if isinstance(node, self.FuzzinessNode):
node = syntax.to_word(node)
if isinstance(node, syntax.GroupNode):
node = self.do_fuzzyterms(parser, node)
newgroup.append(node)
i += 1
return newgroup
class FunctionPlugin(TaggingPlugin):
"""Adds an abitrary "function call" syntax to the query parser to allow
advanced and extensible query functionality.
This is unfinished and experimental.
"""
expr = rcompile("""
[#](?P<name>[A-Za-z_][A-Za-z0-9._]*) # function name
( # optional args
\\[ # inside square brackets
(?P<args>.*?)
\\]
)?
""", verbose=True)
class FunctionNode(syntax.SyntaxNode):
has_fieldname = False
has_boost = True
merging = False
def __init__(self, name, fn, args, kwargs):
self.name = name
self.fn = fn
self.args = args
self.kwargs = kwargs
self.nodes = []
self.boost = None
def __repr__(self):
return "#%s<%r>(%r)" % (self.name, self.args, self.nodes)
def query(self, parser):
qs = [n.query(parser) for n in self.nodes]
kwargs = self.kwargs
if "boost" not in kwargs and self.boost is not None:
kwargs["boost"] = self.boost
# TODO: If this call raises an exception, return an error query
return self.fn(qs, *self.args, **self.kwargs)
def __init__(self, fns):
"""
:param fns: a dictionary mapping names to functions that return a
query.
"""
self.fns = fns
def create(self, parser, match):
name = match.group("name")
if name in self.fns:
fn = self.fns[name]
argstring = match.group("args")
if argstring:
args, kwargs = self._parse_args(argstring)
else:
args = ()
kwargs = {}
return self.FunctionNode(name, fn, args, kwargs)
def _parse_args(self, argstring):
args = []
kwargs = {}
parts = argstring.split(",")
for part in parts:
if "=" in part:
name, value = part.split("=", 1)
# Wrap with str() because Python 2.5 can't handle unicode kws
name = str(name.strip())
else:
name = None
value = part
value = value.strip()
if value.startswith("'") and value.endswith("'"):
value = value[1:-1]
if name:
kwargs[name] = value
else:
args.append(value)
return args, kwargs
def filters(self, parser):
return [(self.do_functions, 600)]
def do_functions(self, parser, group):
newgroup = group.empty_copy()
i = 0
while i < len(group):
node = group[i]
if (isinstance(node, self.FunctionNode)
and i < len(group) - 1
and isinstance(group[i + 1], syntax.GroupNode)):
nextnode = group[i + 1]
node.nodes = list(self.do_functions(parser, nextnode))
if nextnode.boost != 1:
node.set_boost(nextnode.boost)
i += 1
elif isinstance(node, syntax.GroupNode):
node = self.do_functions(parser, node)
newgroup.append(node)
i += 1
return newgroup
class PhrasePlugin(Plugin):
"""Adds the ability to specify phrase queries inside double quotes.
"""
# Didn't use TaggingPlugin because I need to add slop parsing at some
# point
# Expression used to find words if a schema isn't available
wordexpr = rcompile(r'\S+')
class PhraseNode(syntax.TextNode):
def __init__(self, text, textstartchar, slop=1):
syntax.TextNode.__init__(self, text)
self.textstartchar = textstartchar
self.slop = slop
def r(self):
return "%s %r~%s" % (self.__class__.__name__, self.text, self.slop)
def apply(self, fn):
return self.__class__(self.type, [fn(node) for node in self.nodes],
slop=self.slop, boost=self.boost)
def query(self, parser):
text = self.text
fieldname = self.fieldname or parser.fieldname
# We want to process the text of the phrase into "words" (tokens),
# and also record the startchar and endchar of each word
sc = self.textstartchar
if parser.schema and fieldname in parser.schema:
field = parser.schema[fieldname]
if field.analyzer:
# We have a field with an analyzer, so use it to parse
# the phrase into tokens
tokens = field.tokenize(text, mode="query", chars=True)
words = []
char_ranges = []
for t in tokens:
words.append(t.text)
char_ranges.append((sc + t.startchar, sc + t.endchar))
else:
# We have a field but it doesn't have a format object,
# for some reason (it's self-parsing?), so use process_text
# to get the texts (we won't know the start/end chars)
words = list(field.process_text(text, mode="query"))
char_ranges = [(None, None)] * len(words)
else:
# We're parsing without a schema, so just use the default
# regular expression to break the text into words
words = []
char_ranges = []
for match in PhrasePlugin.wordexpr.finditer(text):
words.append(match.group(0))
char_ranges.append((sc + match.start(), sc + match.end()))
qclass = parser.phraseclass
q = qclass(fieldname, words, slop=self.slop, boost=self.boost,
char_ranges=char_ranges)
return attach(q, self)
class PhraseTagger(RegexTagger):
def create(self, parser, match):
text = match.group("text")
textstartchar = match.start("text")
slopstr = match.group("slop")
slop = int(slopstr) if slopstr else 1
return PhrasePlugin.PhraseNode(text, textstartchar, slop)
def __init__(self, expr='"(?P<text>.*?)"(~(?P<slop>[1-9][0-9]*))?'):
self.expr = expr
def taggers(self, parser):
return [(self.PhraseTagger(self.expr), 0)]
class SequencePlugin(Plugin):
"""Adds the ability to group arbitrary queries inside double quotes to
produce a query matching the individual sub-queries in sequence.
To enable this plugin, first remove the default PhrasePlugin, then add
this plugin::
qp = qparser.QueryParser("field", my_schema)
qp.remove_plugin_class(qparser.PhrasePlugin)
qp.add_plugin(qparser.SequencePlugin())
This enables parsing "phrases" such as::
"(jon OR john OR jonathan~1) smith*"
"""
def __init__(self, expr='["](~(?P<slop>[1-9][0-9]*))?'):
"""
:param expr: a regular expression for the marker at the start and end
of a phrase. The default is the double-quotes character.
"""
self.expr = expr
class SequenceNode(syntax.GroupNode):
qclass = query.Sequence
class QuoteNode(syntax.MarkerNode):
def __init__(self, slop=None):
self.slop = int(slop) if slop else 1
def taggers(self, parser):
return [(FnTagger(self.expr, self.QuoteNode, "quote"), 0)]
def filters(self, parser):
return [(self.do_quotes, 550)]
def do_quotes(self, parser, group):
# New group to copy nodes into
newgroup = group.empty_copy()
# Buffer for sequence nodes; when it's None, it means we're not in
# a sequence
seq = None
# Start copying nodes from group to newgroup. When we find a quote
# node, start copying nodes into the buffer instead. When we find
# the next (end) quote, put the buffered nodes into a SequenceNode
# and add it to newgroup.
for node in group:
if isinstance(node, syntax.GroupNode):
# Recurse
node = self.do_quotes(parser, node)
if isinstance(node, self.QuoteNode):
if seq is None:
# Start a new sequence
seq = []
else:
# End the current sequence
sn = self.SequenceNode(seq, slop=node.slop)
newgroup.append(sn)
seq = None
elif seq is None:
# Not in a sequence, add directly
newgroup.append(node)
else:
# In a sequence, add it to the buffer
seq.append(node)
# We can end up with buffered nodes if there was an unbalanced quote;
# just add the buffered nodes directly to newgroup
if seq is not None:
newgroup.extend(seq)
return newgroup
class RangePlugin(Plugin):
"""Adds the ability to specify term ranges.
"""
expr = rcompile(r"""
(?P<open>\{|\[) # Open paren
(?P<start>
('[^']*?'\s+) # single-quoted
| # or
([^\]}]+?(?=[Tt][Oo])) # everything until "to"
)?
[Tt][Oo] # "to"
(?P<end>
(\s+'[^']*?') # single-quoted
| # or
([^\]}]+?) # everything until "]" or "}"
)?
(?P<close>}|]) # Close paren
""", verbose=True)
class RangeTagger(RegexTagger):
def __init__(self, expr, excl_start, excl_end):
self.expr = expr
self.excl_start = excl_start
self.excl_end = excl_end
def create(self, parser, match):
start = match.group("start")
end = match.group("end")
if start:
# Strip the space before the "to"
start = start.rstrip()
# Strip single quotes
if start.startswith("'") and start.endswith("'"):
start = start[1:-1]
if end:
# Strip the space before the "to"
end = end.lstrip()
# Strip single quotes
if end.startswith("'") and end.endswith("'"):
end = end[1:-1]
# What kind of open and close brackets were used?
startexcl = match.group("open") == self.excl_start
endexcl = match.group("close") == self.excl_end
rn = syntax.RangeNode(start, end, startexcl, endexcl)
return rn
def __init__(self, expr=None, excl_start="{", excl_end="}"):
self.expr = expr or self.expr
self.excl_start = excl_start
self.excl_end = excl_end
def taggers(self, parser):
tagger = self.RangeTagger(self.expr, self.excl_start, self.excl_end)
return [(tagger, 1)]
class OperatorsPlugin(Plugin):
"""By default, adds the AND, OR, ANDNOT, ANDMAYBE, and NOT operators to
the parser syntax. This plugin scans the token stream for subclasses of
:class:`Operator` and calls their :meth:`Operator.make_group` methods
to allow them to manipulate the stream.
There are two levels of configuration available.
The first level is to change the regular expressions of the default
operators, using the ``And``, ``Or``, ``AndNot``, ``AndMaybe``, and/or
``Not`` keyword arguments. The keyword value can be a pattern string or
a compiled expression, or None to remove the operator::
qp = qparser.QueryParser("content", schema)
cp = qparser.OperatorsPlugin(And="&", Or="\\|", AndNot="&!",
AndMaybe="&~", Not=None)
qp.replace_plugin(cp)
You can also specify a list of ``(OpTagger, priority)`` pairs as the first
argument to the initializer to use custom operators. See :ref:`custom-op`
for more information on this.
"""
class OpTagger(RegexTagger):
def __init__(self, expr, grouptype, optype=syntax.InfixOperator,
leftassoc=True, memo=""):
RegexTagger.__init__(self, expr)
self.grouptype = grouptype
self.optype = optype
self.leftassoc = leftassoc
self.memo = memo
def __repr__(self):
return "<%s %r (%s)>" % (self.__class__.__name__,
self.expr.pattern, self.memo)
def create(self, parser, match):
return self.optype(match.group(0), self.grouptype, self.leftassoc)
def __init__(self, ops=None, clean=False,
And=r"(?<=\s)AND(?=\s)",
Or=r"(?<=\s)OR(?=\s)",
AndNot=r"(?<=\s)ANDNOT(?=\s)",
AndMaybe=r"(?<=\s)ANDMAYBE(?=\s)",
Not=r"(^|(?<=(\s|[()])))NOT(?=\s)",
Require=r"(^|(?<=\s))REQUIRE(?=\s)"):
if ops:
ops = list(ops)
else:
ops = []
if not clean:
ot = self.OpTagger
if Not:
ops.append((ot(Not, syntax.NotGroup, syntax.PrefixOperator,
memo="not"), 0))
if And:
ops.append((ot(And, syntax.AndGroup, memo="and"), 0))
if Or:
ops.append((ot(Or, syntax.OrGroup, memo="or"), 0))
if AndNot:
ops.append((ot(AndNot, syntax.AndNotGroup,
memo="anot"), -5))
if AndMaybe:
ops.append((ot(AndMaybe, syntax.AndMaybeGroup,
memo="amaybe"), -5))
if Require:
ops.append((ot(Require, syntax.RequireGroup,
memo="req"), 0))
self.ops = ops
def taggers(self, parser):
return self.ops
def filters(self, parser):
return [(self.do_operators, 600)]
def do_operators(self, parser, group):
"""This filter finds PrefixOperator, PostfixOperator, and InfixOperator
nodes in the tree and calls their logic to rearrange the nodes.
"""
for tagger, _ in self.ops:
# Get the operators created by the configured taggers
optype = tagger.optype
gtype = tagger.grouptype
# Left-associative infix operators are replaced left-to-right, and
# right-associative infix operators are replaced right-to-left.
# Most of the work is done in the different implementations of
# Operator.replace_self().
if tagger.leftassoc:
i = 0
while i < len(group):
t = group[i]
if isinstance(t, optype) and t.grouptype is gtype:
i = t.replace_self(parser, group, i)
else:
i += 1
else:
i = len(group) - 1
while i >= 0:
t = group[i]
if isinstance(t, optype):
i = t.replace_self(parser, group, i)
i -= 1
# Descend into the groups and recursively call do_operators
for i, t in enumerate(group):
if isinstance(t, syntax.GroupNode):
group[i] = self.do_operators(parser, t)
return group
#
class PlusMinusPlugin(Plugin):
"""Adds the ability to use + and - in a flat OR query to specify required
and prohibited terms.
This is the basis for the parser configuration returned by
``SimpleParser()``.
"""
# Marker nodes for + and -
class Plus(syntax.MarkerNode):
pass
class Minus(syntax.MarkerNode):
pass
def __init__(self, plusexpr="\\+", minusexpr="-"):
self.plusexpr = plusexpr
self.minusexpr = minusexpr
def taggers(self, parser):
return [(FnTagger(self.plusexpr, self.Plus, "plus"), 0),
(FnTagger(self.minusexpr, self.Minus, "minus"), 0)]
def filters(self, parser):
return [(self.do_plusminus, 510)]
def do_plusminus(self, parser, group):
"""This filter sorts nodes in a flat group into "required", "optional",
and "banned" subgroups based on the presence of plus and minus nodes.
"""
required = syntax.AndGroup()
optional = syntax.OrGroup()
banned = syntax.OrGroup()
# If the top-level group is an AndGroup we make everything "required" by default
if isinstance(group, syntax.AndGroup):
optional = syntax.AndGroup()
# Which group to put the next node we see into
next = optional
for node in group:
if isinstance(node, self.Plus):
# +: put the next node in the required group
next = required
elif isinstance(node, self.Minus):
# -: put the next node in the banned group
next = banned
else:
# Anything else: put it in the appropriate group
next.append(node)
# Reset to putting things in the optional group by default
next = optional
group = optional
if required:
group = syntax.AndMaybeGroup([required, group])
if banned:
group = syntax.AndNotGroup([group, banned])
return group
class GtLtPlugin(TaggingPlugin):
"""Allows the user to use greater than/less than symbols to create range
queries::
a:>100 b:<=z c:>=-1.4 d:<mz
This is the equivalent of::
a:{100 to] b:[to z] c:[-1.4 to] d:[to mz}
The plugin recognizes ``>``, ``<``, ``>=``, ``<=``, ``=>``, and ``=<``
after a field specifier. The field specifier is required. You cannot do the
following::
>100
This plugin requires the FieldsPlugin and RangePlugin to work.
"""
class GtLtNode(syntax.SyntaxNode):
def __init__(self, rel):
self.rel = rel
def __repr__(self):
return "(%s)" % self.rel
expr = r"(?P<rel>(<=|>=|<|>|=<|=>))"
nodetype = GtLtNode
def filters(self, parser):
# Run before the fields filter removes FilenameNodes at priority 100.
return [(self.do_gtlt, 99)]
def do_gtlt(self, parser, group):
"""This filter translate FieldnameNode/GtLtNode pairs into RangeNodes.
"""
fname = syntax.FieldnameNode
newgroup = group.empty_copy()
i = 0
lasti = len(group) - 1
while i < len(group):
node = group[i]
# If this is a GtLtNode...
if isinstance(node, self.GtLtNode):
# If it's not the last node in the group...
if i < lasti:
prevnode = newgroup[-1]
nextnode = group[i + 1]
# If previous was a fieldname and next node has text
if isinstance(prevnode, fname) and nextnode.has_text:
# Make the next node into a range based on the symbol
newgroup.append(self.make_range(nextnode, node.rel))
# Skip the next node
i += 1
else:
# If it's not a GtLtNode, add it to the filtered group
newgroup.append(node)
i += 1
return newgroup
def make_range(self, node, rel):
text = node.text
if rel == "<":
n = syntax.RangeNode(None, text, False, True)
elif rel == ">":
n = syntax.RangeNode(text, None, True, False)
elif rel == "<=" or rel == "=<":
n = syntax.RangeNode(None, text, False, False)
elif rel == ">=" or rel == "=>":
n = syntax.RangeNode(text, None, False, False)
return n.set_range(node.startchar, node.endchar)
class MultifieldPlugin(Plugin):
"""Converts any unfielded terms into OR clauses that search for the
term in a specified list of fields.
>>> qp = qparser.QueryParser(None, myschema)
>>> qp.add_plugin(qparser.MultifieldPlugin(["a", "b"])
>>> qp.parse("alfa c:bravo")
And([Or([Term("a", "alfa"), Term("b", "alfa")]), Term("c", "bravo")])
This plugin is the basis for the ``MultifieldParser``.
"""
def __init__(self, fieldnames, fieldboosts=None, group=syntax.OrGroup):
"""
:param fieldnames: a list of fields to search.
:param fieldboosts: an optional dictionary mapping field names to
a boost to use for that field.
:param group: the group to use to relate the fielded terms to each
other.
"""
self.fieldnames = fieldnames
self.boosts = fieldboosts or {}
self.group = group
def filters(self, parser):
# Run after the fields filter applies explicit fieldnames (at priority
# 100)
return [(self.do_multifield, 110)]
def do_multifield(self, parser, group):
for i, node in enumerate(group):
if isinstance(node, syntax.GroupNode):
# Recurse inside groups
group[i] = self.do_multifield(parser, node)
elif node.has_fieldname and node.fieldname is None:
# For an unfielded node, create a new group containing fielded
# versions of the node for each configured "multi" field.
newnodes = []
for fname in self.fieldnames:
newnode = copy.copy(node)
newnode.set_fieldname(fname)
newnode.set_boost(self.boosts.get(fname, 1.0))
newnodes.append(newnode)
group[i] = self.group(newnodes)
return group
class FieldAliasPlugin(Plugin):
"""Adds the ability to use "aliases" of fields in the query string.
This plugin is useful for allowing users of languages that can't be
represented in ASCII to use field names in their own language, and
translate them into the "real" field names, which must be valid Python
identifiers.
>>> # Allow users to use 'body' or 'text' to refer to the 'content' field
>>> parser.add_plugin(FieldAliasPlugin({"content": ["body", "text"]}))
>>> parser.parse("text:hello")
Term("content", "hello")
"""
def __init__(self, fieldmap):
self.fieldmap = fieldmap
self.reverse = {}
for key, values in iteritems(fieldmap):
for value in values:
self.reverse[value] = key
def filters(self, parser):
# Run before fields plugin at 100
return [(self.do_aliases, 90)]
def do_aliases(self, parser, group):
for i, node in enumerate(group):
if isinstance(node, syntax.GroupNode):
group[i] = self.do_aliases(parser, node)
elif node.has_fieldname and node.fieldname is not None:
fname = node.fieldname
if fname in self.reverse:
node.set_fieldname(self.reverse[fname], override=True)
return group
class CopyFieldPlugin(Plugin):
"""Looks for basic syntax nodes (terms, prefixes, wildcards, phrases, etc.)
occurring in a certain field and replaces it with a group (by default OR)
containing the original token and the token copied to a new field.
For example, the query::
hello name:matt
could be automatically converted by ``CopyFieldPlugin({"name", "author"})``
to::
hello (name:matt OR author:matt)
This is useful where one field was indexed with a differently-analyzed copy
of another, and you want the query to search both fields.
You can specify a different group type with the ``group`` keyword. You can
also specify ``group=None``, in which case the copied node is inserted
"inline" next to the original, instead of in a new group::
hello name:matt author:matt
"""
def __init__(self, map, group=syntax.OrGroup, mirror=False):
"""
:param map: a dictionary mapping names of fields to copy to the
names of the destination fields.
:param group: the type of group to create in place of the original
token. You can specify ``group=None`` to put the copied node
"inline" next to the original node instead of in a new group.
:param two_way: if True, the plugin copies both ways, so if the user
specifies a query in the 'toname' field, it will be copied to
the 'fromname' field.
"""
self.map = map
self.group = group
if mirror:
# Add in reversed mappings
map.update(dict((v, k) for k, v in iteritems(map)))
def filters(self, parser):
# Run after the fieldname filter (100) but before multifield (110)
return [(self.do_copyfield, 109)]
def do_copyfield(self, parser, group):
map = self.map
newgroup = group.empty_copy()
for node in group:
if isinstance(node, syntax.GroupNode):
# Recurse into groups
node = self.do_copyfield(parser, node)
elif node.has_fieldname:
fname = node.fieldname or parser.fieldname
if fname in map:
newnode = copy.copy(node)
newnode.set_fieldname(map[fname], override=True)
if self.group is None:
newgroup.append(node)
newgroup.append(newnode)
else:
newgroup.append(self.group([node, newnode]))
continue
newgroup.append(node)
return newgroup
class PseudoFieldPlugin(Plugin):
"""This is an advanced plugin that lets you define "pseudo-fields" the user
can use in their queries. When the parser encounters one of these fields,
it runs a given function on the following node in the abstract syntax tree.
Unfortunately writing the transform function(s) requires knowledge of the
parser's abstract syntax tree classes. A transform function takes a
:class:`whoosh.qparser.SyntaxNode` and returns a
:class:`~whoosh.qparser.SyntaxNode` (or None if the node should be removed
instead of transformed).
Some things you can do in the transform function::
from whoosh import qparser
def my_xform_fn(node):
# Is this a text node?
if node.has_text:
# Change the node's text
node.text = node.text + "foo"
# Change the node into a prefix query
node = qparser.PrefixPlugin.PrefixNode(node.text)
# Set the field the node should search in
node.set_fieldname("title")
return node
else:
# If the pseudo-field wasn't applied to a text node (e.g.
# it preceded a group, as in ``pfield:(a OR b)`` ), remove the
# node. Alternatively you could just ``return node`` here to
# leave the non-text node intact.
return None
In the following example, if the user types ``regex:foo.bar``, the function
transforms the text in the pseudo-field "regex" into a regular expression
query in the "content" field::
from whoosh import qparser
def regex_maker(node):
if node.has_text:
node = qparser.RegexPlugin.RegexNode(node.text)
node.set_fieldname("content")
return node
qp = qparser.QueryParser("content", myindex.schema)
qp.add_plugin(qparser.PseudoFieldPlugin({"regex": regex_maker}))
q = qp.parse("alfa regex:br.vo")
The name of the "pseudo" field can be the same as an actual field. Imagine
the schema has a field named ``reverse``, and you want the user to be able
to type ``reverse:foo`` and transform it to ``reverse:(foo OR oof)``::
def rev_text(node):
if node.has_text:
# Create a word node for the reversed text
revtext = node.text[::-1] # Reverse the text
rnode = qparser.WordNode(revtext)
# Put the original node and the reversed node in an OrGroup
group = qparser.OrGroup([node, rnode])
# Need to set the fieldname here because the PseudoFieldPlugin
# removes the field name syntax
group.set_fieldname("reverse")
return group
qp = qparser.QueryParser("content", myindex.schema)
qp.add_plugin(qparser.PseudoFieldPlugin({"reverse": rev_text}))
q = qp.parse("alfa reverse:bravo")
Note that transforming the query like this can potentially really confuse
the spell checker!
This plugin works as a filter, so it can only operate on the query after it
has been parsed into an abstract syntax tree. For parsing control (i.e. to
give a pseudo-field its own special syntax), you would need to write your
own parsing plugin.
"""
def __init__(self, xform_map):
"""
:param xform_map: a dictionary mapping psuedo-field names to transform
functions. The function should take a
:class:`whoosh.qparser.SyntaxNode` as an argument, and return a
:class:`~whoosh.qparser.SyntaxNode`. If the function returns None,
the node will be removed from the query.
"""
self.xform_map = xform_map
def filters(self, parser):
# Run before the fieldname filter (100)
return [(self.do_pseudofield, 99)]
def do_pseudofield(self, parser, group):
xform_map = self.xform_map
newgroup = group.empty_copy()
xform_next = None
for node in group:
if isinstance(node, syntax.GroupNode):
node = self.do_pseudofield(parser, node)
elif (isinstance(node, syntax.FieldnameNode)
and node.fieldname in xform_map):
xform_next = xform_map[node.fieldname]
continue
if xform_next:
newnode = xform_next(node)
xform_next = None
if newnode is None:
continue
else:
newnode.set_range(node.startchar, node.endchar)
node = newnode
newgroup.append(node)
return newgroup
|
mixxorz/wagtail
|
refs/heads/master
|
wagtail/images/formats.py
|
15
|
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
from wagtail.utils.apps import get_app_submodules
from .shortcuts import get_rendition_or_not_found
class Format:
def __init__(self, name, label, classnames, filter_spec):
self.name = name
self.label = label
self.classnames = classnames
self.filter_spec = filter_spec
def editor_attributes(self, image, alt_text):
"""
Return additional attributes to go on the HTML element
when outputting this image within a rich text editor field
"""
return {
'data-embedtype': "image",
'data-id': image.id,
'data-format': self.name,
'data-alt': escape(alt_text),
}
def image_to_editor_html(self, image, alt_text):
return self.image_to_html(
image, alt_text, self.editor_attributes(image, alt_text)
)
def image_to_html(self, image, alt_text, extra_attributes=None):
if extra_attributes is None:
extra_attributes = {}
rendition = get_rendition_or_not_found(image, self.filter_spec)
extra_attributes['alt'] = escape(alt_text)
if self.classnames:
extra_attributes['class'] = "%s" % escape(self.classnames)
return rendition.img_tag(extra_attributes)
FORMATS = []
FORMATS_BY_NAME = {}
def register_image_format(format):
if format.name in FORMATS_BY_NAME:
raise KeyError("Image format '%s' is already registered" % format.name)
FORMATS_BY_NAME[format.name] = format
FORMATS.append(format)
def unregister_image_format(format_name):
global FORMATS
# handle being passed a format object rather than a format name string
try:
format_name = format_name.name
except AttributeError:
pass
try:
del FORMATS_BY_NAME[format_name]
FORMATS = [fmt for fmt in FORMATS if fmt.name != format_name]
except KeyError:
raise KeyError("Image format '%s' is not registered" % format_name)
def get_image_formats():
search_for_image_formats()
return FORMATS
def get_image_format(name):
search_for_image_formats()
return FORMATS_BY_NAME[name]
_searched_for_image_formats = False
def search_for_image_formats():
global _searched_for_image_formats
if not _searched_for_image_formats:
list(get_app_submodules('image_formats'))
_searched_for_image_formats = True
# Define default image formats
register_image_format(Format('fullwidth', _('Full width'), 'richtext-image full-width', 'width-800'))
register_image_format(Format('left', _('Left-aligned'), 'richtext-image left', 'width-500'))
register_image_format(Format('right', _('Right-aligned'), 'richtext-image right', 'width-500'))
|
asm666/sympy
|
refs/heads/master
|
examples/beginner/differentiation.py
|
106
|
#!/usr/bin/env python
"""Differentiation example
Demonstrates some differentiation operations.
"""
import sympy
from sympy import pprint
def main():
a = sympy.Symbol('a')
b = sympy.Symbol('b')
e = (a + 2*b)**5
print("\nExpression : ")
print()
pprint(e)
print("\n\nDifferentiating w.r.t. a:")
print()
pprint(e.diff(a))
print("\n\nDifferentiating w.r.t. b:")
print()
pprint(e.diff(b))
print("\n\nSecond derivative of the above result w.r.t. a:")
print()
pprint(e.diff(b).diff(a, 2))
print("\n\nExpanding the above result:")
print()
pprint(e.expand().diff(b).diff(a, 2))
print()
if __name__ == "__main__":
main()
|
str90/RK3188_tablet_kernel_sources
|
refs/heads/master
|
tools/perf/python/twatch.py
|
3213
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
alextruberg/custom_django
|
refs/heads/master
|
django/contrib/sitemaps/tests/test_http.py
|
109
|
from __future__ import unicode_literals
import os
from datetime import date
from django.conf import settings
from django.contrib.sitemaps import Sitemap, GenericSitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test.utils import override_settings
from django.utils.unittest import skipUnless
from django.utils.formats import localize
from django.utils._os import upath
from django.utils.translation import activate, deactivate
from .base import TestModel, SitemapTestsBase
class HTTPSitemapTests(SitemapTestsBase):
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
response = self.client.get('/simple/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),)
)
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
response = self.client.get('/simple/custom-index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap_section(self):
"A simple sitemap section can be rendered"
response = self.client.get('/simple/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),)
)
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
response = self.client.get('/simple/custom-sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
@override_settings(USE_L10N=True)
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
activate('fr')
self.assertEqual('0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today())
deactivate()
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception
Site._meta.installed = False
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today()
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@skipUnless("django.contrib.sites" in settings.INSTALLED_APPS,
"django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
Site._meta.installed = False
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
test_sitemap = GenericSitemap({'queryset': TestModel.objects.all()})
def is_testmodel(url):
return isinstance(url['item'], TestModel)
item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
def test_cached_sitemap_index(self):
"""
Check that a cached sitemap index can be rendered (#2713).
"""
response = self.client.get('/cached/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_x_robots_sitemap(self):
response = self.client.get('/simple/index.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
response = self.client.get('/simple/sitemap.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
|
opennode/waldur-mastermind
|
refs/heads/develop
|
src/waldur_slurm/tests/test_association.py
|
1
|
from rest_framework import test
from . import factories, fixtures
class AssociationGetTest(test.APITransactionTestCase):
def setUp(self) -> None:
self.fixture = fixtures.SlurmFixture()
self.association = self.fixture.association
self.allocation = self.fixture.allocation
self.client.force_login(self.fixture.user)
def test_get_association(self):
url = factories.AssociationFactory.get_url(self.association)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertEqual(self.association.uuid.hex, response.data['uuid'])
def test_filter_associations_by_allocation_uuid(self):
second_association = factories.AssociationFactory(allocation=self.allocation)
url = factories.AssociationFactory.get_list_url()
response = self.client.get(url, {'allocation_uuid': self.allocation.uuid.hex})
self.assertEqual(2, len(response.data))
self.assertEqual(
[self.association.uuid.hex, second_association.uuid.hex],
[item['uuid'] for item in response.data],
)
|
fuselock/odoo
|
refs/heads/8.0
|
addons/l10n_bo/__openerp__.py
|
259
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Bolivia Localization Chart Account",
"version": "1.0",
"description": """
Bolivian accounting chart and tax localization.
Plan contable boliviano e impuestos de acuerdo a disposiciones vigentes
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Localization/Account Charts",
"depends": [
"account_chart",
],
"data":[
"account_tax_code.xml",
"l10n_bo_chart.xml",
"account_tax.xml",
"l10n_bo_wizard.xml",
],
"demo_xml": [
],
"data": [
],
"active": False,
"installable": True,
"certificate" : "",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
colemanja91/PyEloqua-Examples
|
refs/heads/master
|
venv/lib/python3.4/site-packages/pip/req/__init__.py
|
806
|
from __future__ import absolute_import
from .req_install import InstallRequirement
from .req_set import RequirementSet, Requirements
from .req_file import parse_requirements
__all__ = [
"RequirementSet", "Requirements", "InstallRequirement",
"parse_requirements",
]
|
hyperspy/hyperspy
|
refs/heads/RELEASE_next_minor
|
hyperspy/drawing/_widgets/vertical_line.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from hyperspy.drawing.widgets import Widget1DBase
from hyperspy.drawing.utils import picker_kwargs
from hyperspy.defaults_parser import preferences
class VerticalLineWidget(Widget1DBase):
"""A draggable, vertical line widget.
"""
def _update_patch_position(self):
if self.is_on and self.patch:
self.patch[0].set_xdata(self._pos[0])
self.draw_patch()
def _set_patch(self):
ax = self.ax
kwargs = picker_kwargs(preferences.Plot.pick_tolerance)
self.patch = [ax.axvline(self._pos[0],
color=self.color,
alpha=self.alpha,
**kwargs)]
def _onmousemove(self, event):
"""on mouse motion draw the cursor if picked"""
if self.picked is True and event.inaxes:
self.position = (event.xdata,)
|
Juniper/ceilometer
|
refs/heads/master
|
ceilometer/alarm/rpc.py
|
2
|
#
# Copyright 2013 eNovance <licensing@enovance.com>
#
# Authors: Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_context import context
import six
from ceilometer.alarm.storage import models
from ceilometer.i18n import _
from ceilometer import messaging
from ceilometer.openstack.common import log
OPTS = [
cfg.StrOpt('notifier_rpc_topic',
default='alarm_notifier',
help='The topic that ceilometer uses for alarm notifier '
'messages.'),
cfg.StrOpt('partition_rpc_topic',
default='alarm_partition_coordination',
help='The topic that ceilometer uses for alarm partition '
'coordination messages. DEPRECATED: RPC-based partitioned'
'alarm evaluation service will be removed in Kilo in '
'favour of the default alarm evaluation service using '
'tooz for partitioning.'),
]
cfg.CONF.register_opts(OPTS, group='alarm')
LOG = log.getLogger(__name__)
class RPCAlarmNotifier(object):
def __init__(self):
transport = messaging.get_transport()
self.client = messaging.get_rpc_client(
transport, topic=cfg.CONF.alarm.notifier_rpc_topic,
version="1.0")
def notify(self, alarm, previous, reason, reason_data):
actions = getattr(alarm, models.Alarm.ALARM_ACTIONS_MAP[alarm.state])
if not actions:
LOG.debug(_('alarm %(alarm_id)s has no action configured '
'for state transition from %(previous)s to '
'state %(state)s, skipping the notification.') %
{'alarm_id': alarm.alarm_id,
'previous': previous,
'state': alarm.state})
return
self.client.cast(context.get_admin_context(),
'notify_alarm', data={
'actions': actions,
'alarm_id': alarm.alarm_id,
'alarm_name': alarm.name,
'severity': alarm.severity,
'previous': previous,
'current': alarm.state,
'reason': six.text_type(reason),
'reason_data': reason_data})
class RPCAlarmPartitionCoordination(object):
def __init__(self):
transport = messaging.get_transport()
self.client = messaging.get_rpc_client(
transport, topic=cfg.CONF.alarm.partition_rpc_topic,
version="1.0")
def presence(self, uuid, priority):
cctxt = self.client.prepare(fanout=True)
return cctxt.cast(context.get_admin_context(),
'presence', data={'uuid': uuid,
'priority': priority})
def assign(self, uuid, alarms):
cctxt = self.client.prepare(fanout=True)
return cctxt.cast(context.get_admin_context(),
'assign', data={'uuid': uuid,
'alarms': alarms})
def allocate(self, uuid, alarms):
cctxt = self.client.prepare(fanout=True)
return cctxt.cast(context.get_admin_context(),
'allocate', data={'uuid': uuid,
'alarms': alarms})
|
s-macke/Kerasimo
|
refs/heads/master
|
models/mnist_acgan.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Train an Auxiliary Classifier Generative Adversarial Network (ACGAN) on the
MNIST dataset. See https://arxiv.org/abs/1610.09585 for more details.
You should start to see reasonable images after ~5 epochs, and good images
by ~15 epochs. You should use a GPU, as the convolution-heavy operations are
very slow on the CPU. Prefer the TensorFlow backend if you plan on iterating,
as the compilation time can be a blocker using Theano.
Timings:
Hardware | Backend | Time / Epoch
-------------------------------------------
CPU | TF | 3 hrs
Titan X (maxwell) | TF | 4 min
Titan X (maxwell) | TH | 7 min
Consult https://github.com/lukedeo/keras-acgan for more information and
example output
"""
from __future__ import print_function
from collections import defaultdict
try:
import cPickle as pickle
except ImportError:
import pickle
from PIL import Image
from six.moves import range
import keras.backend as K
from keras.datasets import mnist
from keras import layers
from keras.layers import Input, Dense, Reshape, Flatten, Embedding, Dropout
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils.generic_utils import Progbar
import numpy as np
import sys
from keras.utils import plot_model
np.random.seed(1337)
K.set_image_data_format('channels_first')
def build_generator(latent_size):
# we will map a pair of (z, L), where z is a latent vector and L is a
# label drawn from P_c, to image space (..., 1, 28, 28)
cnn = Sequential()
cnn.add(Dense(1024, input_dim=latent_size, activation='relu'))
cnn.add(Dense(128 * 7 * 7, activation='relu'))
cnn.add(Reshape((128, 7, 7)))
# upsample to (..., 14, 14)
cnn.add(UpSampling2D(size=(2, 2)))
cnn.add(Conv2D(256, 5, padding='same',
activation='relu',
kernel_initializer='glorot_normal'))
# upsample to (..., 28, 28)
cnn.add(UpSampling2D(size=(2, 2)))
cnn.add(Conv2D(128, 5, padding='same',
activation='relu',
kernel_initializer='glorot_normal'))
# take a channel axis reduction
cnn.add(Conv2D(1, 2, padding='same',
activation='tanh',
kernel_initializer='glorot_normal'))
# this is the z space commonly refered to in GAN papers
latent = Input(shape=(latent_size, ))
# this will be our label
image_class = Input(shape=(1,), dtype='int32')
# 10 classes in MNIST
cls = Flatten()(Embedding(10, latent_size,
embeddings_initializer='glorot_normal')(image_class))
# hadamard product between z-space and a class conditional embedding
h = layers.multiply([latent, cls])
fake_image = cnn(h)
return Model([latent, image_class], fake_image)
def build_discriminator():
# build a relatively standard conv net, with LeakyReLUs as suggested in
# the reference paper
cnn = Sequential()
cnn.add(Conv2D(32, 3, padding='same', strides=2,
input_shape=(1, 28, 28)))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Conv2D(64, 3, padding='same', strides=1))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Conv2D(128, 3, padding='same', strides=2))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Conv2D(256, 3, padding='same', strides=1))
cnn.add(LeakyReLU())
cnn.add(Dropout(0.3))
cnn.add(Flatten())
image = Input(shape=(1, 28, 28))
features = cnn(image)
# first output (name=generation) is whether or not the discriminator
# thinks the image that is being shown is fake, and the second output
# (name=auxiliary) is the class that the discriminator thinks the image
# belongs to.
fake = Dense(1, activation='sigmoid', name='generation')(features)
aux = Dense(10, activation='softmax', name='auxiliary')(features)
return Model(image, [fake, aux])
if __name__ == '__main__':
# batch and latent size taken from the paper
epochs = 100
batch_size = 100
latent_size = 100
# Adam parameters suggested in https://arxiv.org/abs/1511.06434
adam_lr = 0.00005
adam_beta_1 = 0.5
# build the discriminator
discriminator = build_discriminator()
discriminator.compile(
optimizer=Adam(lr=adam_lr, beta_1=adam_beta_1),
loss=['binary_crossentropy', 'sparse_categorical_crossentropy']
)
# build the generator
generator = build_generator(latent_size)
generator.compile(optimizer=Adam(lr=adam_lr, beta_1=adam_beta_1),
loss='binary_crossentropy')
plot_model(generator, to_file='acgan_generator.png', show_shapes=True)
plot_model(discriminator, to_file='acgan_discriminator.png', show_shapes=True)
discriminator.summary()
generator.summary()
latent = Input(shape=(latent_size, ))
image_class = Input(shape=(1,), dtype='int32')
# get a fake image
fake = generator([latent, image_class])
# we only want to be able to train generation for the combined model
discriminator.trainable = False
fake, aux = discriminator(fake)
combined = Model([latent, image_class], [fake, aux])
combined.compile(
optimizer=Adam(lr=adam_lr, beta_1=adam_beta_1),
loss=['binary_crossentropy', 'sparse_categorical_crossentropy']
)
# get our mnist data, and force it to be of shape (..., 1, 28, 28) with
# range [-1, 1]
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=1)
X_test = (X_test.astype(np.float32) - 127.5) / 127.5
X_test = np.expand_dims(X_test, axis=1)
num_train, num_test = X_train.shape[0], X_test.shape[0]
train_history = defaultdict(list)
test_history = defaultdict(list)
for epoch in range(epochs):
print('Epoch {} of {}'.format(epoch + 1, epochs))
num_batches = int(X_train.shape[0] / batch_size)
progress_bar = Progbar(target=num_batches)
epoch_gen_loss = []
epoch_disc_loss = []
for index in range(num_batches):
progress_bar.update(index)
# generate a new batch of noise
noise = np.random.uniform(-1, 1, (batch_size, latent_size))
# get a batch of real images
image_batch = X_train[index * batch_size:(index + 1) * batch_size]
label_batch = y_train[index * batch_size:(index + 1) * batch_size]
# sample some labels from p_c
sampled_labels = np.random.randint(0, 10, batch_size)
# generate a batch of fake images, using the generated labels as a
# conditioner. We reshape the sampled labels to be
# (batch_size, 1) so that we can feed them into the embedding
# layer as a length one sequence
generated_images = generator.predict(
[noise, sampled_labels.reshape((-1, 1))], verbose=0)
X = np.concatenate((image_batch, generated_images))
y = np.array([1] * batch_size + [0] * batch_size)
aux_y = np.concatenate((label_batch, sampled_labels), axis=0)
# see if the discriminator can figure itself out...
epoch_disc_loss.append(discriminator.train_on_batch(X, [y, aux_y]))
# make new noise. we generate 2 * batch size here such that we have
# the generator optimize over an identical number of images as the
# discriminator
noise = np.random.uniform(-1, 1, (2 * batch_size, latent_size))
sampled_labels = np.random.randint(0, 10, 2 * batch_size)
# we want to train the generator to trick the discriminator
# For the generator, we want all the {fake, not-fake} labels to say
# not-fake
trick = np.ones(2 * batch_size)
epoch_gen_loss.append(combined.train_on_batch(
[noise, sampled_labels.reshape((-1, 1))],
[trick, sampled_labels]))
print('\nTesting for epoch {}:'.format(epoch + 1))
# evaluate the testing loss here
# generate a new batch of noise
noise = np.random.uniform(-1, 1, (num_test, latent_size))
# sample some labels from p_c and generate images from them
sampled_labels = np.random.randint(0, 10, num_test)
generated_images = generator.predict(
[noise, sampled_labels.reshape((-1, 1))], verbose=False)
X = np.concatenate((X_test, generated_images))
y = np.array([1] * num_test + [0] * num_test)
aux_y = np.concatenate((y_test, sampled_labels), axis=0)
# see if the discriminator can figure itself out...
discriminator_test_loss = discriminator.evaluate(
X, [y, aux_y], verbose=False)
discriminator_train_loss = np.mean(np.array(epoch_disc_loss), axis=0)
# make new noise
noise = np.random.uniform(-1, 1, (2 * num_test, latent_size))
sampled_labels = np.random.randint(0, 10, 2 * num_test)
trick = np.ones(2 * num_test)
generator_test_loss = combined.evaluate(
[noise, sampled_labels.reshape((-1, 1))],
[trick, sampled_labels], verbose=False)
generator_train_loss = np.mean(np.array(epoch_gen_loss), axis=0)
# generate an epoch report on performance
train_history['generator'].append(generator_train_loss)
train_history['discriminator'].append(discriminator_train_loss)
test_history['generator'].append(generator_test_loss)
test_history['discriminator'].append(discriminator_test_loss)
print('{0:<22s} | {1:4s} | {2:15s} | {3:5s}'.format(
'component', *discriminator.metrics_names))
print('-' * 65)
ROW_FMT = '{0:<22s} | {1:<4.2f} | {2:<15.2f} | {3:<5.2f}'
print(ROW_FMT.format('generator (train)',
*train_history['generator'][-1]))
print(ROW_FMT.format('generator (test)',
*test_history['generator'][-1]))
print(ROW_FMT.format('discriminator (train)',
*train_history['discriminator'][-1]))
print(ROW_FMT.format('discriminator (test)',
*test_history['discriminator'][-1]))
# save weights every epoch
generator.save_weights(
'params_generator_epoch_{0:03d}.hdf5'.format(epoch), True)
discriminator.save_weights(
'params_discriminator_epoch_{0:03d}.hdf5'.format(epoch), True)
# generate some digits to display
noise = np.random.uniform(-1, 1, (100, latent_size))
sampled_labels = np.array([
[i] * 10 for i in range(10)
]).reshape(-1, 1)
# get a batch to display
generated_images = generator.predict(
[noise, sampled_labels], verbose=0)
# arrange them into a grid
img = (np.concatenate([r.reshape(-1, 28)
for r in np.split(generated_images, 10)
], axis=-1) * 127.5 + 127.5).astype(np.uint8)
Image.fromarray(img).save(
'plot_epoch_{0:03d}_generated.png'.format(epoch))
pickle.dump({'train': train_history, 'test': test_history},
open('acgan-history.pkl', 'wb'))
|
stefan-jonasson/home-assistant
|
refs/heads/dev
|
homeassistant/components/light/isy994.py
|
9
|
"""
Support for ISY994 lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.isy994/
"""
import logging
from typing import Callable
from homeassistant.components.light import (
Light, SUPPORT_BRIGHTNESS)
import homeassistant.components.isy994 as isy
from homeassistant.const import STATE_ON, STATE_OFF
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
UOM = ['2', '51', '78']
STATES = [STATE_OFF, STATE_ON, 'true', 'false', '%']
# pylint: disable=unused-argument
def setup_platform(hass, config: ConfigType,
add_devices: Callable[[list], None], discovery_info=None):
"""Set up the ISY994 light platform."""
if isy.ISY is None or not isy.ISY.connected:
_LOGGER.error("A connection has not been made to the ISY controller")
return False
devices = []
for node in isy.filter_nodes(isy.NODES, units=UOM, states=STATES):
if node.dimmable or '51' in node.uom:
devices.append(ISYLightDevice(node))
add_devices(devices)
class ISYLightDevice(isy.ISYDevice, Light):
"""Representation of an ISY994 light devie."""
def __init__(self, node: object) -> None:
"""Initialize the ISY994 light device."""
isy.ISYDevice.__init__(self, node)
@property
def is_on(self) -> bool:
"""Get whether the ISY994 light is on."""
return self.value > 0
@property
def brightness(self) -> float:
"""Get the brightness of the ISY994 light."""
return self.value
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 light device."""
if not self._node.off():
_LOGGER.debug("Unable to turn off light")
def turn_on(self, brightness=None, **kwargs) -> None:
"""Send the turn on command to the ISY994 light device."""
if not self._node.on(val=brightness):
_LOGGER.debug("Unable to turn on light")
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
|
smunaut/gnuradio
|
refs/heads/master
|
gnuradio-runtime/python/gnuradio/gru/gnuplot_freqz.py
|
59
|
#!/usr/bin/env python
#
# Copyright 2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
__all__ = ['gnuplot_freqz']
import tempfile
import os
import math
import numpy
from gnuradio import gr
from gnuradio.gru.freqz import freqz
def gnuplot_freqz (hw, Fs=None, logfreq=False):
"""hw is a tuple of the form (h, w) where h is sequence of complex
freq responses, and w is a sequence of corresponding frequency
points. Plot the frequency response using gnuplot. If Fs is
provide, use it as the sampling frequency, else use 2*pi.
Returns a handle to the gnuplot graph. When the handle is reclaimed
the graph is torn down."""
data_file = tempfile.NamedTemporaryFile ()
cmd_file = os.popen ('gnuplot', 'w')
h, w = hw
ampl = 20 * numpy.log10 (numpy.absolute (h) + 1e-9)
phase = map (lambda x: math.atan2 (x.imag, x.real), h)
if Fs:
w *= (Fs/(2*math.pi))
for freq, a, ph in zip (w, ampl, phase):
data_file.write ("%g\t%g\t%g\n" % (freq, a, ph))
data_file.flush ()
cmd_file.write ("set grid\n")
if logfreq:
cmd_file.write ("set logscale x\n")
else:
cmd_file.write ("unset logscale x\n")
cmd_file.write ("plot '%s' using 1:2 with lines\n" % (data_file.name,))
cmd_file.flush ()
return (cmd_file, data_file)
def test_plot ():
sample_rate = 2.0e6
#taps = firdes.low_pass(1, sample_rate, 200000, 100000, firdes.WIN_HAMMING)
taps = (0.0007329441141337156, 0.0007755281985737383, 0.0005323155201040208,
-7.679847761841656e-19, -0.0007277769618667662, -0.001415981911122799,
-0.0017135187517851591, -0.001282231998629868, 1.61239866282397e-18,
0.0018589380197227001, 0.0035909228026866913, 0.004260237794369459,
0.00310456077568233, -3.0331308923229716e-18, -0.004244099836796522,
-0.007970594801008701, -0.009214458055794239, -0.006562007591128349,
4.714311174044374e-18, 0.008654761128127575, 0.01605774275958538,
0.01841980405151844, 0.013079923577606678, -6.2821650235090215e-18,
-0.017465557903051376, -0.032989680767059326, -0.03894065320491791,
-0.028868533670902252, 7.388111706347014e-18, 0.04517475143074989,
0.09890196472406387, 0.14991308748722076, 0.18646684288978577,
0.19974154233932495, 0.18646684288978577, 0.14991308748722076,
0.09890196472406387, 0.04517475143074989, 7.388111706347014e-18,
-0.028868533670902252, -0.03894065320491791, -0.032989680767059326,
-0.017465557903051376, -6.2821650235090215e-18, 0.013079923577606678,
0.01841980405151844, 0.01605774275958538, 0.008654761128127575,
4.714311174044374e-18, -0.006562007591128349, -0.009214458055794239,
-0.007970594801008701, -0.004244099836796522, -3.0331308923229716e-18,
0.00310456077568233, 0.004260237794369459, 0.0035909228026866913,
0.0018589380197227001, 1.61239866282397e-18, -0.001282231998629868,
-0.0017135187517851591, -0.001415981911122799, -0.0007277769618667662,
-7.679847761841656e-19, 0.0005323155201040208, 0.0007755281985737383,
0.0007329441141337156)
# print len (taps)
return gnuplot_freqz (freqz (taps, 1), sample_rate)
if __name__ == '__main__':
handle = test_plot ()
raw_input ('Press Enter to continue: ')
|
s0lst1c3/eaphammer
|
refs/heads/master
|
local/hostapd-eaphammer/wpaspy/test.py
|
2
|
#!/usr/bin/python
#
# Test script for wpaspy
# Copyright (c) 2013, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import os
import sys
import time
import wpaspy
wpas_ctrl = '/var/run/wpa_supplicant'
def wpas_connect(host=None, port=9877):
ifaces = []
if host != None:
try:
wpas = wpaspy.Ctrl(host, port)
return wpas
except:
print("Could not connect to host: ", host)
return None
if os.path.isdir(wpas_ctrl):
try:
ifaces = [os.path.join(wpas_ctrl, i) for i in os.listdir(wpas_ctrl)]
except OSError as error:
print("Could not find wpa_supplicant: ", error)
return None
if len(ifaces) < 1:
print("No wpa_supplicant control interface found")
return None
for ctrl in ifaces:
try:
wpas = wpaspy.Ctrl(ctrl)
return wpas
except Exception as e:
pass
return None
def main(host=None, port=9877):
print("Testing wpa_supplicant control interface connection")
wpas = wpas_connect(host, port)
if wpas is None:
return
print("Connected to wpa_supplicant")
print(wpas.request('PING'))
mon = wpas_connect(host, port)
if mon is None:
print("Could not open event monitor connection")
return
mon.attach()
print("Scan")
print(wpas.request('SCAN'))
count = 0
while count < 10:
count += 1
time.sleep(1)
while mon.pending():
ev = mon.recv()
print(ev)
if 'CTRL-EVENT-SCAN-RESULTS' in ev:
print('Scan completed')
print(wpas.request('SCAN_RESULTS'))
count = 10
pass
if __name__ == "__main__":
if len(sys.argv) > 2:
main(host=sys.argv[1], port=int(sys.argv[2]))
else:
main()
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
cpython/213_test_pty.py
|
2
|
import pty, os, sys
from test_support import verbose, TestFailed, TestSkipped
TEST_STRING_1 = "I wish to buy a fish license.\n"
TEST_STRING_2 = "For my pet fish, Eric.\n"
if verbose:
def debug(msg):
print msg
else:
def debug(msg):
pass
# Marginal testing of pty suite. Cannot do extensive 'do or fail' testing
# because pty code is not too portable.
try:
debug("Calling master_open()")
master_fd, slave_name = pty.master_open()
debug("Got master_fd '%d', slave_name '%s'"%(master_fd, slave_name))
debug("Calling slave_open(%s)"%`slave_name`)
slave_fd = pty.slave_open(slave_name)
debug("Got slave_fd '%d'"%slave_fd)
except OSError:
# " An optional feature could not be imported " ... ?
raise TestSkipped, "Pseudo-terminals (seemingly) not functional."
if not os.isatty(slave_fd):
raise TestFailed, "slave_fd is not a tty"
# IRIX apparently turns \n into \r\n. Allow that, but avoid allowing other
# differences (like extra whitespace, trailing garbage, etc.)
debug("Writing to slave_fd")
os.write(slave_fd, TEST_STRING_1)
s1 = os.read(master_fd, 1024)
sys.stdout.write(s1.replace("\r\n", "\n"))
debug("Writing chunked output")
os.write(slave_fd, TEST_STRING_2[:5])
os.write(slave_fd, TEST_STRING_2[5:])
s2 = os.read(master_fd, 1024)
sys.stdout.write(s2.replace("\r\n", "\n"))
os.close(slave_fd)
os.close(master_fd)
# basic pty passed.
debug("calling pty.fork()")
pid, master_fd = pty.fork()
if pid == pty.CHILD:
# stdout should be connected to a tty.
if not os.isatty(1):
debug("Child's fd 1 is not a tty?!")
os._exit(3)
# After pty.fork(), the child should already be a session leader.
# (on those systems that have that concept.)
debug("In child, calling os.setsid()")
try:
os.setsid()
except OSError:
# Good, we already were session leader
debug("Good: OSError was raised.")
pass
except AttributeError:
# Have pty, but not setsid() ?
debug("No setsid() available ?")
pass
except:
# We don't want this error to propagate, escaping the call to
# os._exit() and causing very peculiar behavior in the calling
# regrtest.py !
# Note: could add traceback printing here.
debug("An unexpected error was raised.")
os._exit(1)
else:
debug("os.setsid() succeeded! (bad!)")
os._exit(2)
os._exit(4)
else:
debug("Waiting for child (%d) to finish."%pid)
(pid, status) = os.waitpid(pid, 0)
res = status / 256
debug("Child (%d) exited with status %d (%d)."%(pid, res, status))
if res == 1:
raise TestFailed, "Child raised an unexpected exception in os.setsid()"
elif res == 2:
raise TestFailed, "pty.fork() failed to make child a session leader."
elif res == 3:
raise TestFailed, "Child spawned by pty.fork() did not have a tty as stdout"
elif res != 4:
raise TestFailed, "pty.fork() failed for unknown reasons."
os.close(master_fd)
# pty.fork() passed.
|
eviljeff/zamboni
|
refs/heads/master
|
mkt/ratings/validators.py
|
25
|
from django.core.exceptions import ValidationError
def validate_rating(value):
if value > 5 or value < 1 or not isinstance(value, (int, long)):
raise ValidationError('Rating must be an integer between 1 and 5, '
'inclusive')
|
airbnb/caravel
|
refs/heads/master
|
tests/base_tests.py
|
1
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for Superset"""
import json
import unittest
from flask_appbuilder.security.sqla import models as ab_models
from mock import Mock, patch
import pandas as pd
from superset import app, db, is_feature_enabled, security_manager
from superset.connectors.druid.models import DruidCluster, DruidDatasource
from superset.connectors.sqla.models import SqlaTable
from superset.models import core as models
from superset.models.core import Database
from superset.utils.core import get_main_database
BASE_DIR = app.config.get('BASE_DIR')
class SupersetTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(SupersetTestCase, self).__init__(*args, **kwargs)
self.client = app.test_client()
self.maxDiff = None
@classmethod
def create_druid_test_objects(cls):
# create druid cluster and druid datasources
session = db.session
cluster = (
session.query(DruidCluster)
.filter_by(cluster_name='druid_test')
.first()
)
if not cluster:
cluster = DruidCluster(cluster_name='druid_test')
session.add(cluster)
session.commit()
druid_datasource1 = DruidDatasource(
datasource_name='druid_ds_1',
cluster_name='druid_test',
)
session.add(druid_datasource1)
druid_datasource2 = DruidDatasource(
datasource_name='druid_ds_2',
cluster_name='druid_test',
)
session.add(druid_datasource2)
session.commit()
def get_table(self, table_id):
return (
db.session
.query(SqlaTable)
.filter_by(id=table_id)
.one()
)
def get_or_create(self, cls, criteria, session, **kwargs):
obj = session.query(cls).filter_by(**criteria).first()
if not obj:
obj = cls(**criteria)
obj.__dict__.update(**kwargs)
session.add(obj)
session.commit()
return obj
def login(self, username='admin', password='general'):
resp = self.get_resp(
'/login/',
data=dict(username=username, password=password))
self.assertNotIn('User confirmation needed', resp)
def get_slice(self, slice_name, session):
slc = (
session.query(models.Slice)
.filter_by(slice_name=slice_name)
.one()
)
session.expunge_all()
return slc
def get_table_by_name(self, name):
return db.session.query(SqlaTable).filter_by(table_name=name).one()
def get_database_by_id(self, db_id):
return db.session.query(Database).filter_by(id=db_id).one()
def get_druid_ds_by_name(self, name):
return db.session.query(DruidDatasource).filter_by(
datasource_name=name).first()
def get_datasource_mock(self):
datasource = Mock()
results = Mock()
results.query = Mock()
results.status = Mock()
results.error_message = None
results.df = pd.DataFrame()
datasource.type = 'table'
datasource.query = Mock(return_value=results)
mock_dttm_col = Mock()
datasource.get_col = Mock(return_value=mock_dttm_col)
datasource.query = Mock(return_value=results)
datasource.database = Mock()
datasource.database.db_engine_spec = Mock()
datasource.database.db_engine_spec.mutate_expression_label = lambda x: x
return datasource
def get_resp(
self, url, data=None, follow_redirects=True, raise_on_error=True):
"""Shortcut to get the parsed results while following redirects"""
if data:
resp = self.client.post(
url, data=data, follow_redirects=follow_redirects)
else:
resp = self.client.get(url, follow_redirects=follow_redirects)
if raise_on_error and resp.status_code > 400:
raise Exception(
'http request failed with code {}'.format(resp.status_code))
return resp.data.decode('utf-8')
def get_json_resp(
self, url, data=None, follow_redirects=True, raise_on_error=True):
"""Shortcut to get the parsed results while following redirects"""
resp = self.get_resp(url, data, follow_redirects, raise_on_error)
return json.loads(resp)
def get_access_requests(self, username, ds_type, ds_id):
DAR = models.DatasourceAccessRequest
return (
db.session.query(DAR)
.filter(
DAR.created_by == security_manager.find_user(username=username),
DAR.datasource_type == ds_type,
DAR.datasource_id == ds_id,
)
.first()
)
def logout(self):
self.client.get('/logout/', follow_redirects=True)
def grant_public_access_to_table(self, table):
public_role = security_manager.find_role('Public')
perms = db.session.query(ab_models.PermissionView).all()
for perm in perms:
if (perm.permission.name == 'datasource_access' and
perm.view_menu and table.perm in perm.view_menu.name):
security_manager.add_permission_role(public_role, perm)
def revoke_public_access_to_table(self, table):
public_role = security_manager.find_role('Public')
perms = db.session.query(ab_models.PermissionView).all()
for perm in perms:
if (perm.permission.name == 'datasource_access' and
perm.view_menu and table.perm in perm.view_menu.name):
security_manager.del_permission_role(public_role, perm)
def run_sql(self, sql, client_id=None, user_name=None, raise_on_error=False,
query_limit=None):
if user_name:
self.logout()
self.login(username=(user_name if user_name else 'admin'))
dbid = get_main_database(db.session).id
resp = self.get_json_resp(
'/superset/sql_json/',
raise_on_error=False,
data=dict(database_id=dbid, sql=sql, select_as_create_as=False,
client_id=client_id, queryLimit=query_limit),
)
if raise_on_error and 'error' in resp:
raise Exception('run_sql failed')
return resp
@patch.dict('superset.feature_flags', {'FOO': True}, clear=True)
def test_existing_feature_flags(self):
self.assertTrue(is_feature_enabled('FOO'))
@patch.dict('superset.feature_flags', {}, clear=True)
def test_nonexistent_feature_flags(self):
self.assertFalse(is_feature_enabled('FOO'))
|
jernsthausen/datesplitter
|
refs/heads/master
|
lib/python2.7/site-packages/setuptools/tests/test_integration.py
|
125
|
"""Run some integration tests.
Try to install a few packages.
"""
import glob
import os
import sys
import pytest
from setuptools.command.easy_install import easy_install
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
from setuptools.compat import urlopen
def setup_module(module):
packages = 'stevedore', 'virtualenvwrapper', 'pbr', 'novaclient'
for pkg in packages:
try:
__import__(pkg)
tmpl = "Integration tests cannot run when {pkg} is installed"
pytest.skip(tmpl.format(**locals()))
except ImportError:
pass
try:
urlopen('https://pypi.python.org/pypi')
except Exception as exc:
pytest.skip(reason=str(exc))
@pytest.fixture
def install_context(request, tmpdir, monkeypatch):
"""Fixture to set up temporary installation directory.
"""
# Save old values so we can restore them.
new_cwd = tmpdir.mkdir('cwd')
user_base = tmpdir.mkdir('user_base')
user_site = tmpdir.mkdir('user_site')
install_dir = tmpdir.mkdir('install_dir')
def fin():
# undo the monkeypatch, particularly needed under
# windows because of kept handle on cwd
monkeypatch.undo()
new_cwd.remove()
user_base.remove()
user_site.remove()
install_dir.remove()
request.addfinalizer(fin)
# Change the environment and site settings to control where the
# files are installed and ensure we do not overwrite anything.
monkeypatch.chdir(new_cwd)
monkeypatch.setattr(easy_install_pkg, '__file__', user_site.strpath)
monkeypatch.setattr('site.USER_BASE', user_base.strpath)
monkeypatch.setattr('site.USER_SITE', user_site.strpath)
monkeypatch.setattr('sys.path', sys.path + [install_dir.strpath])
monkeypatch.setenv('PYTHONPATH', os.path.pathsep.join(sys.path))
# Set up the command for performing the installation.
dist = Distribution()
cmd = easy_install(dist)
cmd.install_dir = install_dir.strpath
return cmd
def _install_one(requirement, cmd, pkgname, modulename):
cmd.args = [requirement]
cmd.ensure_finalized()
cmd.run()
target = cmd.install_dir
dest_path = glob.glob(os.path.join(target, pkgname + '*.egg'))
assert dest_path
assert os.path.exists(os.path.join(dest_path[0], pkgname, modulename))
def test_stevedore(install_context):
_install_one('stevedore', install_context,
'stevedore', 'extension.py')
@pytest.mark.xfail
def test_virtualenvwrapper(install_context):
_install_one('virtualenvwrapper', install_context,
'virtualenvwrapper', 'hook_loader.py')
def test_pbr(install_context):
_install_one('pbr', install_context,
'pbr', 'core.py')
@pytest.mark.xfail
def test_python_novaclient(install_context):
_install_one('python-novaclient', install_context,
'novaclient', 'base.py')
|
bliti/django-nonrel-1.5
|
refs/heads/nonrel-1.5
|
tests/regressiontests/decorators/tests.py
|
48
|
import warnings
from functools import wraps
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required, permission_required, user_passes_test
from django.http import HttpResponse, HttpRequest, HttpResponseNotAllowed
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.utils.decorators import method_decorator
from django.utils.functional import allow_lazy, lazy, memoize
from django.utils.unittest import TestCase
from django.views.decorators.cache import cache_page, never_cache, cache_control
from django.views.decorators.clickjacking import xframe_options_deny, xframe_options_sameorigin, xframe_options_exempt
from django.views.decorators.http import require_http_methods, require_GET, require_POST, require_safe, condition
from django.views.decorators.vary import vary_on_headers, vary_on_cookie
def fully_decorated(request):
"""Expected __doc__"""
return HttpResponse('<html><body>dummy</body></html>')
fully_decorated.anything = "Expected __dict__"
def compose(*functions):
# compose(f, g)(*args, **kwargs) == f(g(*args, **kwargs))
functions = list(reversed(functions))
def _inner(*args, **kwargs):
result = functions[0](*args, **kwargs)
for f in functions[1:]:
result = f(result)
return result
return _inner
full_decorator = compose(
# django.views.decorators.http
require_http_methods(["GET"]),
require_GET,
require_POST,
require_safe,
condition(lambda r: None, lambda r: None),
# django.views.decorators.vary
vary_on_headers('Accept-language'),
vary_on_cookie,
# django.views.decorators.cache
cache_page(60*15),
cache_control(private=True),
never_cache,
# django.contrib.auth.decorators
# Apply user_passes_test twice to check #9474
user_passes_test(lambda u:True),
login_required,
permission_required('change_world'),
# django.contrib.admin.views.decorators
staff_member_required,
# django.utils.functional
lambda f: memoize(f, {}, 1),
allow_lazy,
lazy,
)
fully_decorated = full_decorator(fully_decorated)
class DecoratorsTest(TestCase):
def test_attributes(self):
"""
Tests that django decorators set certain attributes of the wrapped
function.
"""
self.assertEqual(fully_decorated.__name__, 'fully_decorated')
self.assertEqual(fully_decorated.__doc__, 'Expected __doc__')
self.assertEqual(fully_decorated.__dict__['anything'], 'Expected __dict__')
def test_user_passes_test_composition(self):
"""
Test that the user_passes_test decorator can be applied multiple times
(#9474).
"""
def test1(user):
user.decorators_applied.append('test1')
return True
def test2(user):
user.decorators_applied.append('test2')
return True
def callback(request):
return request.user.decorators_applied
callback = user_passes_test(test1)(callback)
callback = user_passes_test(test2)(callback)
class DummyUser(object): pass
class DummyRequest(object): pass
request = DummyRequest()
request.user = DummyUser()
request.user.decorators_applied = []
response = callback(request)
self.assertEqual(response, ['test2', 'test1'])
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
def test_cache_page_old_style(self):
"""
Test that we can call cache_page the old way
"""
def my_view(request):
return "response"
with warnings.catch_warnings(record=True):
my_view_cached = cache_page(my_view, 123)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(my_view, 123, key_prefix="test")
self.assertEqual(my_view_cached2(HttpRequest()), "response")
my_view_cached3 = cache_page(my_view)
self.assertEqual(my_view_cached3(HttpRequest()), "response")
my_view_cached4 = cache_page()(my_view)
self.assertEqual(my_view_cached4(HttpRequest()), "response")
def test_require_safe_accepts_only_safe_methods(self):
"""
Test for the require_safe decorator.
A view returns either a response or an exception.
Refs #15637.
"""
def my_view(request):
return HttpResponse("OK")
my_safe_view = require_safe(my_view)
request = HttpRequest()
request.method = 'GET'
self.assertTrue(isinstance(my_safe_view(request), HttpResponse))
request.method = 'HEAD'
self.assertTrue(isinstance(my_safe_view(request), HttpResponse))
request.method = 'POST'
self.assertTrue(isinstance(my_safe_view(request), HttpResponseNotAllowed))
request.method = 'PUT'
self.assertTrue(isinstance(my_safe_view(request), HttpResponseNotAllowed))
request.method = 'DELETE'
self.assertTrue(isinstance(my_safe_view(request), HttpResponseNotAllowed))
# For testing method_decorator, a decorator that assumes a single argument.
# We will get type arguments if there is a mismatch in the number of arguments.
def simple_dec(func):
def wrapper(arg):
return func("test:" + arg)
return wraps(func)(wrapper)
simple_dec_m = method_decorator(simple_dec)
# For testing method_decorator, two decorators that add an attribute to the function
def myattr_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr = True
return wraps(func)(wrapper)
myattr_dec_m = method_decorator(myattr_dec)
def myattr2_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr2 = True
return wraps(func)(wrapper)
myattr2_dec_m = method_decorator(myattr2_dec)
class MethodDecoratorTests(TestCase):
"""
Tests for method_decorator
"""
def test_preserve_signature(self):
class Test(object):
@simple_dec_m
def say(self, arg):
return arg
self.assertEqual("test:hello", Test().say("hello"))
def test_preserve_attributes(self):
# Sanity check myattr_dec and myattr2_dec
@myattr_dec
@myattr2_dec
def func():
pass
self.assertEqual(getattr(func, 'myattr', False), True)
self.assertEqual(getattr(func, 'myattr2', False), True)
# Now check method_decorator
class Test(object):
@myattr_dec_m
@myattr2_dec_m
def method(self):
"A method"
pass
self.assertEqual(getattr(Test().method, 'myattr', False), True)
self.assertEqual(getattr(Test().method, 'myattr2', False), True)
self.assertEqual(getattr(Test.method, 'myattr', False), True)
self.assertEqual(getattr(Test.method, 'myattr2', False), True)
self.assertEqual(Test.method.__doc__, 'A method')
self.assertEqual(Test.method.__name__, 'method')
class XFrameOptionsDecoratorsTests(TestCase):
"""
Tests for the X-Frame-Options decorators.
"""
def test_deny_decorator(self):
"""
Ensures @xframe_options_deny properly sets the X-Frame-Options header.
"""
@xframe_options_deny
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_sameorigin_decorator(self):
"""
Ensures @xframe_options_sameorigin properly sets the X-Frame-Options
header.
"""
@xframe_options_sameorigin
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_exempt_decorator(self):
"""
Ensures @xframe_options_exempt properly instructs the
XFrameOptionsMiddleware to NOT set the header.
"""
@xframe_options_exempt
def a_view(request):
return HttpResponse()
req = HttpRequest()
resp = a_view(req)
self.assertEqual(resp.get('X-Frame-Options', None), None)
self.assertTrue(resp.xframe_options_exempt)
# Since the real purpose of the exempt decorator is to suppress
# the middleware's functionality, let's make sure it actually works...
r = XFrameOptionsMiddleware().process_response(req, resp)
self.assertEqual(r.get('X-Frame-Options', None), None)
|
codysk/BGmi
|
refs/heads/master
|
tests/test_cli.py
|
1
|
from unittest import mock
from bgmi.lib.models import Bangumi, Filter, Followed
from bgmi.main import main
from bgmi.website.bangumi_moe import BangumiMoe
def test_gen_nginx_conf():
main("gen nginx.conf --server-name _".split())
def test_cal_force_update(clean_bgmi):
class MockWebsite(BangumiMoe):
def fetch_bangumi_calendar(self):
bangumi = BangumiMoe().fetch_bangumi_calendar()
bangumi[0].update_time = "Unknown"
return bangumi
with mock.patch("bgmi.lib.controllers.website", MockWebsite()):
main("cal -f".split())
assert [
x.name for x in Bangumi.select().where(Bangumi.update_time == "Unknown")
], "at least 1 bangumi's update_time is 'Unknown'"
def test_cal_config():
main("config".split())
main("config ADMIN_TOKEN 233".split())
main("config DOWNLOAD_DELEGATE xunlei".split())
main("config BANGUMI_MOE_URL https://bangumi.moe".split())
def test_add(bangumi_names, clean_bgmi):
main("add {} {} {}".format(*bangumi_names).split())
def test_update(bangumi_names, clean_bgmi):
main("add {} {} {}".format(*bangumi_names).split())
main(["update"])
def test_update_single(bangumi_names, clean_bgmi):
name = bangumi_names[0]
main("add {}".format(name).split())
main(["update", name])
def test_search(bangumi_names, clean_bgmi):
main(["search", "海贼王", "--regex-filter", ".*MP4.*720P.*"])
def test_delete(bangumi_names, clean_bgmi):
name = bangumi_names[0]
main("add {} --episode 0".format(name).split())
main("delete --name {}".format(name).split())
def test_delete_batch(bangumi_names, clean_bgmi):
main("add {} {} {} --episode 0".format(*bangumi_names).split())
main("delete --clear-all --batch".split())
def test_filter(bangumi_names, clean_bgmi):
name = bangumi_names[0]
main("add {} --episode 0".format(name).split())
main(["filter", name, "--subtitle", "", "--exclude", "MKV", "--regex", "720p|720P"])
f = Filter.get(bangumi_name=name, exclude="MKV", regex="720p|720P")
assert not f.include
assert not f.subtitle
def test_fetch(bangumi_names, clean_bgmi):
name = bangumi_names[0]
main("add {} --episode 0".format(name).split())
main("fetch {}".format(name).split())
def test_mark(bangumi_names, clean_bgmi):
name = bangumi_names[0]
main("add {} --episode 0".format(name).split())
main("mark {} 1".format(name).split())
assert Followed.get(bangumi_name=name).episode == 1
|
catchmrbharath/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/example/echo_wsh.py
|
494
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
_GOODBYE_MESSAGE = u'Goodbye'
def web_socket_do_extra_handshake(request):
# This example handler accepts any request. See origin_check_wsh.py for how
# to reject access from untrusted scripts based on origin value.
pass # Always accept.
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
if isinstance(line, unicode):
request.ws_stream.send_message(line, binary=False)
if line == _GOODBYE_MESSAGE:
return
else:
request.ws_stream.send_message(line, binary=True)
# vi:sts=4 sw=4 et
|
defionscode/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/aci/aci_epg_to_contract.py
|
15
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_epg_to_contract
short_description: Bind EPGs to Contracts (fv:RsCons, fv:RsProv)
description:
- Bind EPGs to Contracts on Cisco ACI fabrics.
notes:
- The C(tenant), C(app_profile), C(EPG), and C(Contract) used must exist before using this module in your playbook.
The M(aci_tenant), M(aci_ap), M(aci_epg), and M(aci_contract) modules can be used for this.
- More information about the internal APIC classes B(fv:RsCons) and B(fv:RsProv) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
ap:
description:
- Name of an existing application network profile, that will contain the EPGs.
aliases: [ app_profile, app_profile_name ]
contract:
description:
- The name of the contract.
aliases: [ contract_name ]
contract_type:
description:
- Determines if the EPG should Provide or Consume the Contract.
required: yes
choices: [ consumer, provider ]
epg:
description:
- The name of the end point group.
aliases: [ epg_name ]
priority:
description:
- QoS class.
- The APIC defaults to C(unspecified) when unset during creation.
choices: [ level1, level2, level3, unspecified ]
provider_match:
description:
- The matching algorithm for Provided Contracts.
- The APIC defaults to C(at_least_one) when unset during creation.
choices: [ all, at_least_one, at_most_one, none ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- Name of an existing tenant.
aliases: [ tenant_name ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new contract to EPG binding
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
contract: anstest_http
contract_type: provider
state: present
delegate_to: localhost
- name: Remove an existing contract to EPG binding
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
contract: anstest_http
contract_type: provider
state: absent
delegate_to: localhost
- name: Query a specific contract to EPG binding
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
contract: anstest_http
contract_type: provider
state: query
delegate_to: localhost
register: query_result
- name: Query all provider contract to EPG bindings
aci_epg_to_contract:
host: apic
username: admin
password: SomeSecretPassword
contract_type: provider
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
ACI_CLASS_MAPPING = {"consumer": {"class": "fvRsCons", "rn": "rscons-"}, "provider": {"class": "fvRsProv", "rn": "rsprov-"}}
PROVIDER_MATCH_MAPPING = {"all": "All", "at_least_one": "AtleastOne", "at_most_one": "AtmostOne", "none": "None"}
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
ap=dict(type='str', aliases=['app_profile', 'app_profile_name']), # Not required for querying all objects
epg=dict(type='str', aliases=['epg_name']), # Not required for querying all objects
contract=dict(type='str', aliases=['contract_name']), # Not required for querying all objects
contract_type=dict(type='str', required=True, choices=['consumer', 'provider']),
priority=dict(type='str', choices=['level1', 'level2', 'level3', 'unspecified']),
provider_match=dict(type='str', choices=['all', 'at_least_one', 'at_most_one', 'none']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['ap', 'contract', 'epg', 'tenant']],
['state', 'present', ['ap', 'contract', 'epg', 'tenant']],
],
)
ap = module.params['ap']
contract = module.params['contract']
contract_type = module.params['contract_type']
epg = module.params['epg']
priority = module.params['priority']
provider_match = module.params['provider_match']
if provider_match is not None:
provider_match = PROVIDER_MATCH_MAPPING[provider_match]
state = module.params['state']
tenant = module.params['tenant']
aci_class = ACI_CLASS_MAPPING[contract_type]["class"]
aci_rn = ACI_CLASS_MAPPING[contract_type]["rn"]
if contract_type == "consumer" and provider_match is not None:
module.fail_json(msg="the 'provider_match' is only configurable for Provided Contracts")
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvAp',
aci_rn='ap-{0}'.format(ap),
module_object=ap,
target_filter={'name': ap},
),
subclass_2=dict(
aci_class='fvAEPg',
aci_rn='epg-{0}'.format(epg),
module_object=epg,
target_filter={'name': epg},
),
subclass_3=dict(
aci_class=aci_class,
aci_rn='{0}{1}'.format(aci_rn, contract),
module_object=contract,
target_filter={'tnVzBrCPName': contract},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class=aci_class,
class_config=dict(
matchT=provider_match,
prio=priority,
tnVzBrCPName=contract,
),
)
aci.get_diff(aci_class=aci_class)
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
kubeflow/kfp-tekton
|
refs/heads/master
|
backend/src/apiserver/visualization/types/table.py
|
2
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# gcsfs is required for pandas GCS integration.
import gcsfs
from itables import show
# itables is requires as importing it changes the way pandas DataFrames are
# rendered.
import itables.interactive
from itables.javascript import load_datatables
import itables.options as opts
import pandas as pd
from tensorflow.python.lib.io import file_io
# Forcefully load required JavaScript and CSS for datatables.
load_datatables()
# Remove maxByte limit to prevent issues where entire table cannot be rendered
# due to size of data.
opts.maxBytes = 0
dfs = []
files = file_io.get_matching_files(source)
# Read data from file and write it to a DataFrame object.
if not variables.get("headers", False):
# If no headers are provided, use the first row as headers
for f in files:
dfs.append(pd.read_csv(f))
else:
# If headers are provided, do not set headers for DataFrames
for f in files:
dfs.append(pd.read_csv(f, header=None))
# Display DataFrame as output.
df = pd.concat(dfs)
if variables.get("headers", False):
df.columns = variables.get("headers")
show(df)
|
tudennis/LeetCode---kamyu104-11-24-2015
|
refs/heads/master
|
Python/max-area-of-island.py
|
2
|
# Time: O(m * n)
# Space: O(m * n), the max depth of dfs may be m * n
# Given a non-empty 2D array grid of 0's and 1's,
# an island is a group of 1's (representing land) connected 4-directionally (horizontal or vertical.)
# You may assume all four edges of the grid are surrounded by water.
#
# Find the maximum area of an island in the given 2D array. (If there is no island, the maximum area is 0.)
#
# Example 1:
# [[0,0,1,0,0,0,0,1,0,0,0,0,0],
# [0,0,0,0,0,0,0,1,1,1,0,0,0],
# [0,1,1,0,1,0,0,0,0,0,0,0,0],
# [0,1,0,0,1,1,0,0,1,0,1,0,0],
# [0,1,0,0,1,1,0,0,1,1,1,0,0],
# [0,0,0,0,0,0,0,0,0,0,1,0,0],
# [0,0,0,0,0,0,0,1,1,1,0,0,0],
# [0,0,0,0,0,0,0,1,1,0,0,0,0]]
#
# Given the above grid, return 6. Note the answer is not 11,
# because the island must be connected 4-directionally.
#
# Example 2:
# [[0,0,0,0,0,0,0,0]]
#
# Given the above grid, return 0.
#
# Note: The length of each dimension in the given grid does not exceed 50.
class Solution(object):
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
directions = [[-1, 0], [ 1, 0], [ 0, 1], [ 0, -1]]
def dfs(i, j, grid, area):
if not (0 <= i < len(grid) and \
0 <= j < len(grid[0]) and \
grid[i][j] > 0):
return False
grid[i][j] *= -1
area[0] += 1
for d in directions:
dfs(i+d[0], j+d[1], grid, area)
return True
result = 0
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
area = [0]
if dfs(i, j, grid, area):
result = max(result, area[0])
return result
|
MatthewWilkes/django
|
refs/heads/master
|
django/db/models/fields/related_lookups.py
|
287
|
from django.db.models.lookups import (
Exact, GreaterThan, GreaterThanOrEqual, In, LessThan, LessThanOrEqual,
)
class MultiColSource(object):
contains_aggregate = False
def __init__(self, alias, targets, sources, field):
self.targets, self.sources, self.field, self.alias = targets, sources, field, alias
self.output_field = self.field
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.field)
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias),
self.targets, self.sources, self.field)
def get_normalized_value(value, lhs):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
# A case like Restaurant.objects.filter(place=restaurant_instance),
# where place is a OneToOneField and the primary key of Restaurant.
if getattr(lhs.output_field, 'primary_key', False):
return (value.pk,)
sources = lhs.output_field.get_path_info()[-1].target_fields
for source in sources:
while not isinstance(value, source.model) and source.remote_field:
source = source.remote_field.model._meta.get_field(source.remote_field.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
if not isinstance(value, tuple):
return (value,)
return value
class RelatedIn(In):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs]
# We need to run the related field's get_prep_lookup(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if hasattr(self.lhs.output_field, 'get_path_info'):
# Run the target field's get_prep_lookup. We can safely assume there is
# only one as we don't get to the direct value branch otherwise.
self.rhs = self.lhs.output_field.get_path_info()[-1].target_fields[-1].get_prep_lookup(
self.lookup_name, self.rhs)
return super(RelatedIn, self).get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
# For multicolumn lookups we need to build a multicolumn where clause.
# This clause is either a SubqueryConstraint (for values that need to be compiled to
# SQL) or a OR-combined list of (col1 = val1 AND col2 = val2 AND ...) clauses.
from django.db.models.sql.where import WhereNode, SubqueryConstraint, AND, OR
root_constraint = WhereNode(connector=OR)
if self.rhs_is_direct_value():
values = [get_normalized_value(value, self.lhs) for value in self.rhs]
for value in values:
value_constraint = WhereNode()
for source, target, val in zip(self.lhs.sources, self.lhs.targets, value):
lookup_class = target.get_lookup('exact')
lookup = lookup_class(target.get_col(self.lhs.alias, source), val)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
root_constraint.add(
SubqueryConstraint(
self.lhs.alias, [target.column for target in self.lhs.targets],
[source.name for source in self.lhs.sources], self.rhs),
AND)
return root_constraint.as_sql(compiler, connection)
else:
return super(RelatedIn, self).as_sql(compiler, connection)
class RelatedLookupMixin(object):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = get_normalized_value(self.rhs, self.lhs)[0]
# We need to run the related field's get_prep_lookup(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if hasattr(self.lhs.output_field, 'get_path_info'):
# Get the target field. We can safely assume there is only one
# as we don't get to the direct value branch otherwise.
self.rhs = self.lhs.output_field.get_path_info()[-1].target_fields[-1].get_prep_lookup(
self.lookup_name, self.rhs)
return super(RelatedLookupMixin, self).get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
assert self.rhs_is_direct_value()
self.rhs = get_normalized_value(self.rhs, self.lhs)
from django.db.models.sql.where import WhereNode, AND
root_constraint = WhereNode()
for target, source, val in zip(self.lhs.targets, self.lhs.sources, self.rhs):
lookup_class = target.get_lookup(self.lookup_name)
root_constraint.add(
lookup_class(target.get_col(self.lhs.alias, source), val), AND)
return root_constraint.as_sql(compiler, connection)
return super(RelatedLookupMixin, self).as_sql(compiler, connection)
class RelatedExact(RelatedLookupMixin, Exact):
pass
class RelatedLessThan(RelatedLookupMixin, LessThan):
pass
class RelatedGreaterThan(RelatedLookupMixin, GreaterThan):
pass
class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual):
pass
class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual):
pass
|
sigma-random/scrapy
|
refs/heads/master
|
scrapy/utils/response.py
|
28
|
"""
This module provides some useful functions for working with
scrapy.http.Response objects
"""
import os
import re
import weakref
import webbrowser
import tempfile
from twisted.web import http
from twisted.web.http import RESPONSES
from w3lib import html
from scrapy.http import HtmlResponse, TextResponse
from scrapy.utils.decorator import deprecated
@deprecated
def body_or_str(*a, **kw):
from scrapy.utils.iterators import _body_or_str
return _body_or_str(*a, **kw)
_baseurl_cache = weakref.WeakKeyDictionary()
def get_base_url(response):
"""Return the base url of the given response, joined with the response url"""
if response not in _baseurl_cache:
text = response.body_as_unicode()[0:4096]
_baseurl_cache[response] = html.get_base_url(text, response.url, \
response.encoding)
return _baseurl_cache[response]
_noscript_re = re.compile(u'<noscript>.*?</noscript>', re.IGNORECASE | re.DOTALL)
_script_re = re.compile(u'<script.*?>.*?</script>', re.IGNORECASE | re.DOTALL)
_metaref_cache = weakref.WeakKeyDictionary()
def get_meta_refresh(response):
"""Parse the http-equiv refrsh parameter from the given response"""
if response not in _metaref_cache:
text = response.body_as_unicode()[0:4096]
text = _noscript_re.sub(u'', text)
text = _script_re.sub(u'', text)
_metaref_cache[response] = html.get_meta_refresh(text, response.url, \
response.encoding)
return _metaref_cache[response]
def response_status_message(status):
"""Return status code plus status text descriptive message
>>> response_status_message(200)
'200 OK'
>>> response_status_message(404)
'404 Not Found'
"""
return '%s %s' % (status, http.responses.get(int(status)))
def response_httprepr(response):
"""Return raw HTTP representation (as string) of the given response. This
is provided only for reference, since it's not the exact stream of bytes
that was received (that's not exposed by Twisted).
"""
s = "HTTP/1.1 %d %s\r\n" % (response.status, RESPONSES.get(response.status, ''))
if response.headers:
s += response.headers.to_string() + "\r\n"
s += "\r\n"
s += response.body
return s
def open_in_browser(response, _openfunc=webbrowser.open):
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if '<base' not in body:
body = body.replace('<head>', '<head><base href="%s">' % response.url)
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: %s" % \
response.__class__.__name__)
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc("file://%s" % fname)
|
fxfitz/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_vmpools.py
|
75
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vmpools
short_description: Module to manage VM pools in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage VM pools in oVirt/RHV."
options:
name:
description:
- "Name of the VM pool to manage."
required: true
state:
description:
- "Should the VM pool be present/absent."
- "Note that when C(state) is I(absent) all VMs in VM pool are stopped and removed."
choices: ['present', 'absent']
default: present
template:
description:
- "Name of the template, which will be used to create VM pool."
description:
description:
- "Description of the VM pool."
cluster:
description:
- "Name of the cluster, where VM pool should be created."
type:
description:
- "Type of the VM pool. Either manual or automatic."
- "C(manual) - The administrator is responsible for explicitly returning the virtual machine to the pool.
The virtual machine reverts to the original base image after the administrator returns it to the pool."
- "C(Automatic) - When the virtual machine is shut down, it automatically reverts to its base image and
is returned to the virtual machine pool."
- "Default value is set by engine."
choices: ['manual', 'automatic']
vm_per_user:
description:
- "Maximum number of VMs a single user can attach to from this pool."
- "Default value is set by engine."
prestarted:
description:
- "Number of pre-started VMs defines the number of VMs in run state, that are waiting
to be attached to Users."
- "Default value is set by engine."
vm_count:
description:
- "Number of VMs in the pool."
- "Default value is set by engine."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create VM pool from template
- ovirt_vmpools:
cluster: mycluster
name: myvmpool
template: rhel7
vm_count: 2
prestarted: 2
vm_per_user: 1
# Remove vmpool, note that all VMs in pool will be stopped and removed:
- ovirt_vmpools:
state: absent
name: myvmpool
'''
RETURN = '''
id:
description: ID of the VM pool which is managed
returned: On success if VM pool is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
vm_pool:
description: "Dictionary of all the VM pool attributes. VM pool attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool."
returned: On success if VM pool is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_params,
check_sdk,
create_connection,
equal,
get_link_name,
ovirt_full_argument_spec,
wait,
)
class VmPoolsModule(BaseModule):
def build_entity(self):
return otypes.VmPool(
name=self._module.params['name'],
description=self._module.params['description'],
comment=self._module.params['comment'],
cluster=otypes.Cluster(
name=self._module.params['cluster']
) if self._module.params['cluster'] else None,
template=otypes.Template(
name=self._module.params['template']
) if self._module.params['template'] else None,
max_user_vms=self._module.params['vm_per_user'],
prestarted_vms=self._module.params['prestarted'],
size=self._module.params['vm_count'],
type=otypes.VmPoolType(
self._module.params['type']
) if self._module.params['type'] else None,
)
def update_check(self, entity):
return (
equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('vm_per_user'), entity.max_user_vms) and
equal(self._module.params.get('prestarted'), entity.prestarted_vms) and
equal(self._module.params.get('vm_count'), entity.size)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
template=dict(default=None),
cluster=dict(default=None),
description=dict(default=None),
comment=dict(default=None),
vm_per_user=dict(default=None, type='int'),
prestarted=dict(default=None, type='int'),
vm_count=dict(default=None, type='int'),
type=dict(default=None, choices=['automatic', 'manual']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vm_pools_service = connection.system_service().vm_pools_service()
vm_pools_module = VmPoolsModule(
connection=connection,
module=module,
service=vm_pools_service,
)
state = module.params['state']
if state == 'present':
ret = vm_pools_module.create()
# Wait for all VM pool VMs to be created:
if module.params['wait']:
vms_service = connection.system_service().vms_service()
for vm in vms_service.list(search='pool=%s' % module.params['name']):
wait(
service=vms_service.service(vm.id),
condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
timeout=module.params['timeout'],
)
elif state == 'absent':
ret = vm_pools_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
Mhynlo/SickRage
|
refs/heads/master
|
lib/unidecode/x1d7.py
|
248
|
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'0', # 0xce
'1', # 0xcf
'2', # 0xd0
'3', # 0xd1
'4', # 0xd2
'5', # 0xd3
'6', # 0xd4
'7', # 0xd5
'8', # 0xd6
'9', # 0xd7
'0', # 0xd8
'1', # 0xd9
'2', # 0xda
'3', # 0xdb
'4', # 0xdc
'5', # 0xdd
'6', # 0xde
'7', # 0xdf
'8', # 0xe0
'9', # 0xe1
'0', # 0xe2
'1', # 0xe3
'2', # 0xe4
'3', # 0xe5
'4', # 0xe6
'5', # 0xe7
'6', # 0xe8
'7', # 0xe9
'8', # 0xea
'9', # 0xeb
'0', # 0xec
'1', # 0xed
'2', # 0xee
'3', # 0xef
'4', # 0xf0
'5', # 0xf1
'6', # 0xf2
'7', # 0xf3
'8', # 0xf4
'9', # 0xf5
'0', # 0xf6
'1', # 0xf7
'2', # 0xf8
'3', # 0xf9
'4', # 0xfa
'5', # 0xfb
'6', # 0xfc
'7', # 0xfd
'8', # 0xfe
'9', # 0xff
)
|
bjornlevi/5thpower
|
refs/heads/master
|
afmaeli/env/lib/python3.6/site-packages/setuptools/command/py36compat.py
|
286
|
import os
from glob import glob
from distutils.util import convert_path
from distutils.command import sdist
from setuptools.extern.six.moves import filter
class sdist_add_defaults:
"""
Mix-in providing forward-compatibility for functionality as found in
distutils on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils. Instead, override in the subclass.
"""
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
self._add_defaults_standards()
self._add_defaults_optional()
self._add_defaults_python()
self._add_defaults_data_files()
self._add_defaults_ext()
self._add_defaults_c_libs()
self._add_defaults_scripts()
@staticmethod
def _cs_path_exists(fspath):
"""
Case-sensitive path existence check
>>> sdist_add_defaults._cs_path_exists(__file__)
True
>>> sdist_add_defaults._cs_path_exists(__file__.upper())
False
"""
if not os.path.exists(fspath):
return False
# make absolute so we always have a directory
abspath = os.path.abspath(fspath)
directory, filename = os.path.split(abspath)
return filename in os.listdir(directory)
def _add_defaults_standards(self):
standards = [self.READMES, self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if self._cs_path_exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if self._cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
def _add_defaults_optional(self):
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
def _add_defaults_python(self):
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
def _add_defaults_data_files(self):
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str):
# plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else:
# a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
def _add_defaults_ext(self):
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
def _add_defaults_c_libs(self):
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
def _add_defaults_scripts(self):
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
if hasattr(sdist.sdist, '_add_defaults_standards'):
# disable the functionality already available upstream
class sdist_add_defaults:
pass
|
viewdy/phantomjs2
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/changelog_unittest.py
|
122
|
# Copyright (C) 2010 Apple Inc. All rights reserved.
# Copyright (C) 2011 Patrick Gansterer <paroga@paroga.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for changelog.py."""
import changelog
import unittest2 as unittest
class ChangeLogCheckerTest(unittest.TestCase):
"""Tests ChangeLogChecker class."""
def assert_no_error(self, lines_to_check, changelog_data):
def handle_style_error(line_number, category, confidence, message):
self.fail('Unexpected error: %d %s %d %s for\n%s' % (line_number, category, confidence, message, changelog_data))
self.lines_to_check = set(lines_to_check)
checker = changelog.ChangeLogChecker('ChangeLog', handle_style_error, self.mock_should_line_be_checked)
checker.check(changelog_data.split('\n'))
def assert_error(self, expected_line_number, lines_to_check, expected_category, changelog_data):
self.had_error = False
def handle_style_error(line_number, category, confidence, message):
self.had_error = True
self.assertEqual(expected_line_number, line_number)
self.assertEqual(expected_category, category)
self.lines_to_check = set(lines_to_check)
checker = changelog.ChangeLogChecker('ChangeLog', handle_style_error, self.mock_should_line_be_checked)
checker.check(changelog_data.split('\n'))
self.assertTrue(self.had_error)
def mock_handle_style_error(self):
pass
def mock_should_line_be_checked(self, line_number):
return line_number in self.lines_to_check
def test_init(self):
checker = changelog.ChangeLogChecker('ChangeLog', self.mock_handle_style_error, self.mock_should_line_be_checked)
self.assertEqual(checker.file_path, 'ChangeLog')
self.assertEqual(checker.handle_style_error, self.mock_handle_style_error)
self.assertEqual(checker.should_line_be_checked, self.mock_should_line_be_checked)
def test_missing_bug_number(self):
self.assert_error(1, range(1, 20), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n')
self.assert_error(1, range(1, 20), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' http://bugs.webkit.org/show_bug.cgi?id=\n')
self.assert_error(1, range(1, 20), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' https://bugs.webkit.org/show_bug.cgi?id=\n')
self.assert_error(1, range(1, 20), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' http://webkit.org/b/\n')
self.assert_error(1, range(1, 20), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug'
'\n'
' http://trac.webkit.org/changeset/12345\n')
self.assert_error(2, range(2, 5), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' Example bug\n'
' https://bugs.webkit.org/show_bug.cgi\n'
'\n'
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' Another change\n')
self.assert_error(2, range(2, 6), 'changelog/bugnumber',
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' Example bug\n'
' More text about bug.\n'
'\n'
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' No bug in this change.\n')
def test_file_descriptions(self):
self.assert_error(5, range(1, 20), 'changelog/filechangedescriptionwhitespace',
'2011-01-01 Dmitry Lomov <dslomov@google.com>\n'
' ExampleBug\n'
' http://bugs.webkit.org/show_bug.cgi?id=12345\n'
'\n'
' * Source/Tools/random-script.py:Fixed')
self.assert_error(6, range(1, 20), 'changelog/filechangedescriptionwhitespace',
'2011-01-01 Dmitry Lomov <dslomov@google.com>\n'
' ExampleBug\n'
' http://bugs.webkit.org/show_bug.cgi?id=12345\n'
'\n'
' * Source/Tools/another-file: Done\n'
' * Source/Tools/random-script.py:Fixed\n'
' * Source/Tools/one-morefile:\n')
def test_no_new_tests(self):
self.assert_error(5, range(1, 20), 'changelog/nonewtests',
'2011-01-01 Dmitry Lomov <dslomov@google.com>\n'
' ExampleBug\n'
' http://bugs.webkit.org/show_bug.cgi?id=12345\n'
'\n'
' No new tests. (OOPS!)\n'
' * Source/Tools/random-script.py: Fixed')
def test_no_error(self):
self.assert_no_error([],
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example ChangeLog entry out of range\n'
' http://example.com/\n')
self.assert_no_error([],
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' http://bugs.webkit.org/show_bug.cgi?id=12345\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' http://bugs.webkit.org/show_bug.cgi?id=12345\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' https://bugs.webkit.org/show_bug.cgi?id=12345\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Example bug\n'
' http://webkit.org/b/12345\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Unreview build fix for r12345.\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Fix build after a bad change.\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
'\n'
' Fix example port build.\n')
self.assert_no_error(range(2, 6),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' Example bug\n'
' https://bugs.webkit.org/show_bug.cgi?id=12345\n'
'\n'
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' No bug here!\n')
self.assert_no_error(range(1, 20),
'2011-01-01 Patrick Gansterer <paroga@paroga.com>\n'
' Example bug\n'
' https://bugs.webkit.org/show_bug.cgi?id=12345\n'
' * Source/WebKit/foo.cpp: \n'
' * Source/WebKit/bar.cpp:\n'
' * Source/WebKit/foobar.cpp: Description\n')
|
dcowden/cadquery-freecad-module
|
refs/heads/master
|
CadQuery/Examples/Ex009_Polylines.py
|
1
|
#This example is meant to be used from within the CadQuery module of FreeCAD.
import cadquery
import Part
#Set up our Length, Height, Width, and thickness that will be used to define the locations that the polyline
#is drawn to/thru
(L, H, W, t) = (100.0, 20.0, 20.0, 1.0)
#Define the locations that the polyline will be drawn to/thru
pts = [
(0, H/2.0),
(W/2.0, H/2.0),
(W/2.0, (H/2.0 - t)),
(t/2.0, (H/2.0-t)),
(t/2.0, (t - H/2.0)),
(W/2.0, (t - H/2.0)),
(W/2.0, H/-2.0),
(0, H/-2.0)
]
#We generate half of the I-beam outline and then mirror it to create the full I-beam
result = cadquery.Workplane("front").polyline(pts).mirrorY().extrude(L)
#Boiler plate code to render our solid in FreeCAD's GUI
Part.show(result.toFreecad())
|
ehashman/oh-mainline
|
refs/heads/master
|
vendor/packages/docutils/test/functional/tests/_standalone_rst_defaults.py
|
18
|
# Keyword parameters passed to publish_file.
reader_name = "standalone"
parser_name = "rst"
# Settings.
settings_overrides['sectsubtitle_xform'] = 1
settings_overrides['syntax_highlight'] = 'none'
|
tracierenea/gnuradio
|
refs/heads/master
|
gr-vocoder/python/vocoder/__init__.py
|
57
|
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
This is the gr-vocoder package. This package includes the various
vocoder blocks in GNU Radio.
'''
import os
try:
from vocoder_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from vocoder_swig import *
from cvsd import *
|
roblad/sensmon
|
refs/heads/master
|
sensnode/decoders/gaz.py
|
1
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
import time
import datetime
import inspect
import simplejson as json
import sys
import os
import cPickle as pickle
def gaz(data):
"""Pomiar:
- swiatła,
- wlgotności
- temperatury
- ciśnienia
- stanu baterii
- napięcia beterii
>> a = "OK 2 0 0 70 1 242 0 201 38 0 15 17"
>> raw = a.split(" ")
>> weathernode(raw, "weathernode")
'{"name": "weathernode", "temp": "242", "lobat": "0", "humi": "326", "timestamp": 1364553092, "light": "0", "press": "9929", "batvol": "4367"}'
"""
#----------------------------------wartosci pickle gaz i cena ---------------------
pickledir = os.path.abspath((os.path.dirname(__file__)) + '/../../cpickle')
picklefile = 'datarelay.pic'
openpicklefileread = open(pickledir + '/' + picklefile, 'rb')
get_data = pickle.load(openpicklefileread)
openpicklefileread.close()
g1 = get_data[6]
g2 = get_data[7]
a = float(data[2]) #temperatura
b = int(data[3])
c = float(data[4]) #bateria
d = float(data[5]) #pomiar gaz
#e = int(data[6])
#f = int(data[7])
#g = int(data[8])
#h = int(data[9])
#i = int(data[10])
#j = int(data[11])
#k = int(data[12])
#nodeid = str(data[1])
name = inspect.stack()[0][3] # z nazwy funcji
timestamp = int(time.mktime(datetime.datetime.now().timetuple())) # czas unixa
template = ({
'name':name,
#'humi': str((256 * d) + c),
'batvol': c,
'zzztemp': a,
'pulse': d,
'zuzycieoplata' : round((d * 1.97),2),
'zd' : round(g1,2),
'zdoplata' : round(g2,2),
'timestamp':timestamp
})
return dict((k,v) for (k,v) in template.iteritems())
|
ParticulateSolutions/django-sofortueberweisung
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import re
import os
import sys
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.match("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if 'tests' not in dirnames and not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
REQUIREMENTS = [
'Django>=1.8',
'xmltodict>=0.9.2',
'six>=1.10.0'
]
version = get_version('django_sofortueberweisung')
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
setup(
name='django-sofortueberweisung',
author='Particulate Solutions GmbH',
author_email='tech@particulate.me',
description=u'Django integration of Sofort.com',
version=version,
url='https://github.com/ParticulateSolutions/django-sofortueberweisung',
packages=get_packages('django_sofortueberweisung'),
package_data=get_package_data('django_sofortueberweisung'),
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules'],
install_requires=REQUIREMENTS,
zip_safe=False)
|
msshapira/yowsup
|
refs/heads/master
|
yowsup/layers/protocol_groups/protocolentities/iq_groups_list.py
|
20
|
from yowsup.common import YowConstants
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .iq_groups import GroupsIqProtocolEntity
class ListGroupsIqProtocolEntity(GroupsIqProtocolEntity):
'''
<iq id="{{id}}"" type="get" to="g.us" xmlns="w:g2">
<"{{participating | owning}}"></"{{participating | owning}}">
</iq>
result (processed in iq_result_groups_list.py):
<iq type="result" from="g.us" id="{{IQ_ID}}">
<groups>
<group s_t="{{SUBJECT_TIME}}" creation="{{CREATING_TIME}}" creator="{{OWNER_JID}}" id="{{GROUP_ID}}" s_o="{{SUBJECT_OWNER_JID}}" subject="{{SUBJECT}}">
<participant jid="{{JID}}" type="admin">
</participant>
<participant jid="{{JID}}">
</participant>
</group>
<group s_t="{{SUBJECT_TIME}}" creation="{{CREATING_TIME}}" creator="{{OWNER_JID}}" id="{{GROUP_ID}}" s_o="{{SUBJECT_OWNER_JID}}" subject="{{SUBJECT}}">
<participant jid="{{JID}}" type="admin">
</participant>
</group>
<groups>
</iq>
'''
GROUP_TYPE_PARTICIPATING = "participating"
GROUP_TYPE_OWNING = "owning"
GROUPS_TYPES = (GROUP_TYPE_PARTICIPATING, GROUP_TYPE_OWNING)
def __init__(self, groupsType = GROUP_TYPE_PARTICIPATING, _id = None):
super(ListGroupsIqProtocolEntity, self).__init__(_id=_id, to = YowConstants.WHATSAPP_GROUP_SERVER, _type = "get")
self.setProps(groupsType)
def setProps(self, groupsType):
assert groupsType in self.__class__.GROUPS_TYPES,\
"Groups type must be %s, not %s" % (" or ".join(self.__class__.GROUPS_TYPES), groupsType)
self.groupsType = groupsType
def toProtocolTreeNode(self):
node = super(ListGroupsIqProtocolEntity, self).toProtocolTreeNode()
node.addChild(ProtocolTreeNode(self.groupsType))
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = super(ListGroupsIqProtocolEntity, ListGroupsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = ListGroupsIqProtocolEntity
entity.setProps(node.getChild(0).tag)
return entity
|
idigbio-api-hackathon/LifemapperQgis
|
refs/heads/master
|
lifemapperTools/LmShared/LmCommon/common/lmAttObject.py
|
1
|
"""
@summary: Module containing some base Lifemapper objects
@author: CJ Grady
@status: beta
@version: 2.0
@license: gpl2
@copyright: Copyright (C) 2015, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
# .............................................................................
class LmAttObj(object):
"""
@summary: Object that includes attributes. Compare this to the attributes
attached to XML elements and the object members would be the
sub-elements of the element.
@note: <someElement att1="value1" att2="value2">
<subEl1>1</subEl1>
<subEl2>banana</subEl2>
</someElement>
translates to:
obj.subEl1 = 1
obj.subEl2 = 'banana'
obj.getAttributes() = {'att1': 'value1', 'att2': 'value2'}
obj.att1 = 'value1'
obj.att2 = 'value2'
"""
# ......................................
def __init__(self, attrib={}, name="LmObj"):
"""
@summary: Constructor
@param attrib: (optional) Dictionary of attributes to attach to the
object
@param name: (optional) The name of the object (useful for serialization)
"""
self.__name__ = name
self._attrib = attrib
# ......................................
def __getattr__(self, name):
"""
@summary: Called if the default getattribute method fails. This will
attempt to return the value from the attribute dictionary
@param name: The name of the attribute to return
@return: The value of the attribute
"""
return self._attrib[name]
# ......................................
def getAttributes(self):
"""
@summary: Gets the dictionary of attributes attached to the object
@return: The attribute dictionary
@rtype: Dictionary
"""
return self._attrib
# ......................................
def setAttribute(self, name, value):
"""
@summary: Sets the value of an attribute in the attribute dictionary
@param name: The name of the attribute to set
@param value: The new value for the attribute
"""
self._attrib[name] = value
# .............................................................................
class LmAttList(list, LmAttObj):
"""
@summary: Extension to lists that adds attributes
@note: obj = LmAttList([1, 2, 3], {'id': 'attList'})
print obj[0] >> 1
obj.append('apple')
print obj >> [1, 2, 3, 'apple']
print obj.id >> 'attList'
"""
def __init__(self, items=[], attrib={}, name="LmList"):
"""
@summary: Constructor
@param items: (optional) A list of initial values for the list
@param attrib: (optional) Dictionary of attributes to attach to the list
@param name: (optional) The name of the object (useful for serialization)
"""
LmAttObj.__init__(self, attrib, name)
for item in items:
self.append(item)
|
openstack/vitrage
|
refs/heads/master
|
vitrage/evaluator/template_functions/__init__.py
|
1
|
# Copyright 2019 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Function names
GET_ATTR = 'get_attr'
GET_PARAM = 'get_param'
|
int19h/PTVS
|
refs/heads/master
|
Python/Product/TestAdapter/testlauncher.py
|
3
|
# Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABILITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
import io
import os
import sys
import traceback
def main():
cwd, testRunner, secret, port, mixed_mode, coverage_file, test_file, args = parse_argv()
load_debugger(secret, port, mixed_mode)
os.chdir(cwd)
sys.path[0] = cwd
run(testRunner, coverage_file, test_file, args)
def parse_argv():
"""Parses arguments for use with the test launcher.
Arguments are:
1. Working directory.
2. Test runner, `pytest` or `nose`
3. debugSecret
4. debugPort
5. Mixed-mode debugging (non-empty string to enable, empty string to disable)
6. Enable code coverage and specify filename
7. TestFile, with a list of testIds to run
8. Rest of the arguments are passed into the test runner.
"""
return (sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]), sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8:])
def load_debugger(secret, port, mixed_mode):
try:
if secret and port:
# Start tests with legacy debugger
import ptvsd
from ptvsd.debugger import DONT_DEBUG, DEBUG_ENTRYPOINTS, get_code
from ptvsd import enable_attach, wait_for_attach
DONT_DEBUG.append(os.path.normcase(__file__))
DEBUG_ENTRYPOINTS.add(get_code(main))
enable_attach(secret, ('127.0.0.1', port), redirect_output = True)
wait_for_attach()
elif port:
# Start tests with new debugger
import debugpy
debugpy.listen(('localhost', port))
debugpy.wait_for_client()
elif mixed_mode:
# For mixed-mode attach, there's no ptvsd and hence no wait_for_attach(),
# so we have to use Win32 API in a loop to do the same thing.
from time import sleep
from ctypes import windll, c_char
while True:
if windll.kernel32.IsDebuggerPresent() != 0:
break
sleep(0.1)
try:
debugger_helper = windll['Microsoft.PythonTools.Debugger.Helper.x86.dll']
except WindowsError:
debugger_helper = windll['Microsoft.PythonTools.Debugger.Helper.x64.dll']
isTracing = c_char.in_dll(debugger_helper, "isTracing")
while True:
if isTracing.value != 0:
break
sleep(0.1)
except:
traceback.print_exc()
print('''
Internal error detected. Please copy the above traceback and report at
https://github.com/Microsoft/vscode-python/issues/new
Press Enter to close. . .''')
try:
raw_input()
except NameError:
input()
sys.exit(1)
def run(testRunner, coverage_file, test_file, args):
"""Runs the test
testRunner -- test runner to be used `pytest` or `nose`
args -- arguments passed into the test runner
"""
if test_file and os.path.exists(test_file):
with io.open(test_file, 'r', encoding='utf-8') as tests:
args.extend(t.strip() for t in tests)
cov = None
try:
if coverage_file:
try:
import coverage
cov = coverage.coverage(coverage_file)
cov.load()
cov.start()
except:
pass
if testRunner == 'pytest':
import pytest
patch_translate_non_printable()
_plugin = TestCollector()
pytest.main(args, [_plugin])
else:
import nose
nose.run(argv=args)
sys.exit(0)
finally:
pass
if cov is not None:
cov.stop()
cov.save()
cov.xml_report(outfile = coverage_file + '.xml', omit=__file__)
#note: this must match adapter\pytest\_discovery.py
def patch_translate_non_printable():
import _pytest.compat
translate_non_printable = getattr(_pytest.compat, "_translate_non_printable")
if translate_non_printable:
def _translate_non_printable_patched(s):
s = translate_non_printable(s)
s = s.replace(':', '/:') # pytest testcase not found error and VS TestExplorer FQN parsing
s = s.replace('.', '_') # VS TestExplorer FQN parsing
s = s.replace('\n', '/n') # pytest testcase not found error
s = s.replace('\\', '/') # pytest testcase not found error, fixes cases (actual backslash followed by n)
s = s.replace('\r', '/r') # pytest testcase not found error
return s
_pytest.compat._translate_non_printable = _translate_non_printable_patched
else:
print("ERROR: failed to patch pytest, _pytest.compat._translate_non_printable")
class TestCollector(object):
"""This is a pytest plugin that prevents notfound errors from ending execution of tests."""
def __init__(self, tests=None):
pass
#Pytest Hook
def pytest_collectstart(self, collector):
self.patch_collect_test_notfound(collector)
def patch_collect_test_notfound(self, collector):
originalCollect = getattr(collector, "collect")
if not originalCollect:
print("ERROR: failed to patch pytest, collector.collect")
pass
# Fix for RunAll in VS, when a single parameterized test isn't found
# Wrap the actual collect() call and clear any _notfound errors to prevent exceptions which skips remaining tests to run
# We still print the same errors to the user
def collectwapper():
for item in originalCollect():
yield item
notfound = getattr(collector, '_notfound', [])
if notfound:
for arg, exc in notfound:
line = "(no name {!r} in any of {!r})".format(arg, exc.args[0])
print("ERROR: not found: {}\n{}".format(arg, line))
#clear errors
collector._notfound = []
collector.collect = collectwapper
if __name__ == '__main__':
main()
|
datakortet/django-cms
|
refs/heads/master
|
cms/migrations/0022_login_required_added.py
|
385
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.User']"})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': ['auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['auth.User']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.User']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['auth.User']"}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
unreal666/outwiker
|
refs/heads/master
|
src/outwiker/gui/editorfilesdroptarget.py
|
2
|
# -*- coding: utf-8 -*-
import os.path
import wx
from outwiker.core.attachment import Attachment
class EditorFilesDropTarget(wx.FileDropTarget):
"""
Base class to drag files to the editors
"""
def __init__(self, application, editor):
wx.FileDropTarget.__init__(self)
self._application = application
self._editor = editor
self._editor.SetDropTarget(self)
def destroy(self):
self._editor.SetDropTarget(None)
self._editor = None
def OnDropFiles(self, x, y, files):
assert self._application.selectedPage is not None
if len(files) == 1 and '\n' in files[0]:
files = files[0].split('\n')
file_protocol = 'file://'
# Prepare absolute path for attach folder
attach = Attachment(self._application.selectedPage)
attach_path = os.path.realpath(
os.path.abspath(attach.getAttachPath(False)))
if not attach_path.endswith(os.sep):
attach_path += os.sep
correctedFiles = []
is_attached = False
for fname in files:
if not fname.strip():
continue
# Remove file:// protocol
if fname.startswith(file_protocol):
fname = fname[len(file_protocol):]
corrected_fname = os.path.realpath(os.path.abspath(fname))
# Is attached file?
prefix = os.path.commonprefix([corrected_fname, attach_path])
if prefix == attach_path:
is_attached = True
corrected_fname = corrected_fname[len(prefix):]
correctedFiles.append(corrected_fname)
if is_attached:
self._application.onAttachmentPaste(correctedFiles)
else:
text = ' '.join(correctedFiles)
self._editor.replaceText(text)
return True
|
Valera1978/android_kernel_samsung_viennalte
|
refs/heads/cm14.0
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
insideyourgovernment/insideyourgovernment_api
|
refs/heads/master
|
crawlers/socrata_dataset_catalog.py
|
1
|
from joblib import Parallel, delayed
import multiprocessing
import traceback
import requests
num_cores = multiprocessing.cpu_count()*20
import rethinkdb as r
import dateutil
import dateutil.parser
from pytz import timezone
from datetime import date
from datetime import datetime
tz = timezone('America/Los_Angeles')
def run_count(i, theid, api_url, app_token, tables_list, d):
conn = r.connect( "localhost", 28015).repl()
count_url = '%s?$select=count(*)&$$app_token=%s' % (api_url, app_token)
count_data = None
try:
count_data = requests.get(count_url, verify=False).json()
number_of_rows = int(count_data[0][count_data[0].keys()[0]]) # sometimes key is count_1 instead of count
r.db('public').table('datasets').get(theid).update({"number_of_rows": int(number_of_rows), "is_number_of_rows_error": False}).run(conn, noreply=True)
print i, theid, int(number_of_rows)
except Exception, err:
r.db('public').table('datasets').get(theid).update({"is_number_of_rows_error": True, "number_of_rows_error": traceback.format_exc()}).run(conn, noreply=True)
print count_url
print count_data, traceback.print_exc()
url = '%s?$select=:created_at&$order=:created_at&$limit=1&$$app_token=%s' % (api_url, app_token)
try:
created_at = requests.get(url).json()[0][':created_at']
r.db('public').table('datasets').get(theid).update({"created_at": created_at}).run(conn, noreply=True)
except Exception, err:
print url, traceback.print_exc()
def do():
import rethinkdb as r
import traceback
conn = r.connect( "localhost", 28015).repl()
import requests
app_token = r.db('nonpublic').table('third_party_creds').get('socrata').run()['app_token']
results = []
for i in range(10):
results.extend(requests.get('http://api.us.socrata.com/api/catalog/v1?only=datasets&limit=10000&offset='+str(10000*i)).json()['results'])
data = results
print 'number_of_datasets', len(data)
modified_data = []
inputs = []
tables_list = r.db('public').table_list().run(conn)
for i, row in enumerate(data):
d = {}
for key in row.keys():
if isinstance(row[key], dict):
d.update(row[key])
else:
d[key] = row[key]
d['api_url'] = d['permalink'].replace('https', 'http').replace('/d/', '/resource/') + '.json'
d['api_url'] = d['api_url'][:-14]+d['id']+'.json'
inputs.append([i, d['id'], d['api_url'], app_token, tables_list, d])
modified_data.append(d)
print 'trying insert'
for i in range(len(data)/200+1):
t = r.db('public').table('datasets').insert(modified_data[i*200:(i+1)*200]).run(conn, conflict='update', noreply=True)
results = Parallel(n_jobs=num_cores)(delayed(run_count)(*inp) for inp in inputs)
while True:
do()
|
mlperf/training_results_v0.6
|
refs/heads/master
|
Google/benchmarks/transformer/implementations/tpu-v3-1024-transformer/dataset_preproc/data_generators/lm1b_imdb.py
|
7
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for LM1B and IMDb combined data-set."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.data_generators import imdb
from tensor2tensor.data_generators import lm1b
from tensor2tensor.data_generators import multi_problem
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
@registry.register_problem
class LanguagemodelLm1bSentimentIMDB(multi_problem.MultiProblem):
"""LM1b and IMDb mixed problem class for multitask learning."""
def __init__(self, was_reversed=False, was_copy=False):
super(LanguagemodelLm1bSentimentIMDB, self).__init__(was_reversed, was_copy)
self.task_list.append(lm1b.LanguagemodelLm1bCharacters())
self.task_list.append(imdb.SentimentIMDBCharacters())
@property
def vocab_type(self):
return text_problems.VocabType.CHARACTER
|
orangeduck/PyAutoC
|
refs/heads/master
|
Python27/Lib/lib2to3/tests/data/py2_test_grammar.py
|
285
|
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
# NOTE: When you run this test as a script from the command line, you
# get warnings about certain hex/oct constants. Since those are
# issued by the parser, you can't suppress them by adding a
# filterwarnings() call to this module. Therefore, to shut up the
# regression test, the filterwarnings() call has been added to
# regrtest.py.
from test.test_support import run_unittest, check_syntax_error
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def testBackslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEquals(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEquals(x, 0, 'backslash ending comment')
def testPlainIntegers(self):
self.assertEquals(0xff, 255)
self.assertEquals(0377, 255)
self.assertEquals(2147483647, 017777777777)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxint
if maxint == 2147483647:
self.assertEquals(-2147483647-1, -020000000000)
# XXX -2147483648
self.assert_(037777777777 > 0)
self.assert_(0xffffffff > 0)
for s in '2147483648', '040000000000', '0x100000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxint == 9223372036854775807:
self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
self.assert_(01777777777777777777777 > 0)
self.assert_(0xffffffffffffffff > 0)
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxint value %r' % maxint)
def testLongIntegers(self):
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
def testFloats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def testStringLiterals(self):
x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assert_(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assert_(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEquals(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEquals(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEquals(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEquals(x, y)
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def testEvalInput(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def testFuncdef(self):
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
### | ('**'|'*' '*') NAME)
### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
### fpdef: NAME | '(' fplist ')'
### fplist: fpdef (',' fpdef)* [',']
### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
### argument: [test '='] test # Really [keyword '='] test
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
def f4(two, (compound, (argument, list))): pass
def f5((compound, first), two): pass
self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
if sys.platform.startswith('java'):
self.assertEquals(f4.func_code.co_varnames,
('two', '(compound, (argument, list))', 'compound', 'argument',
'list',))
self.assertEquals(f5.func_code.co_varnames,
('(compound, first)', 'two', 'compound', 'first'))
else:
self.assertEquals(f4.func_code.co_varnames,
('two', '.1', 'compound', 'argument', 'list'))
self.assertEquals(f5.func_code.co_varnames,
('.0', 'two', 'compound', 'first'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
def v3(a, (b, c), *rest): return a, b, c, rest
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
f4(1, (2, (3, 4)))
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
v3(1,(2,3))
v3(1,(2,3),4)
v3(1,(2,3),4,5,6,7,8,9,0)
# ceval unpacks the formal arguments into the first argcount names;
# thus, the names nested inside tuples must appear after these names.
if sys.platform.startswith('java'):
self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
else:
self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
def d31v((x)): pass
d31v(1)
def d32v((x,)): pass
d32v((1,))
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def testLambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEquals(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0L]]
self.assertEquals(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEquals(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEquals(l5(1, 2), 5)
self.assertEquals(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
### stmt: simple_stmt | compound_stmt
# Tested below
def testSimpleStmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
def testExprStmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
def testPrintStmt(self):
# 'print' (test ',')* [test]
import StringIO
# Can't test printing to real stdout without comparing output
# which is not available in unittest.
save_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
# 'print' '>>' test ','
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
# test print >> None
def driver():
oldstdout = sys.stdout
sys.stdout = Gulp()
try:
tellme(Gulp())
tellme()
finally:
sys.stdout = oldstdout
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
driver()
self.assertEqual(sys.stdout.getvalue(), '''\
1 2 3
1 2 3
1 1 1
1 2 3
1 2 3
1 1 1
hello world
''')
sys.stdout = save_stdout
# syntax errors
check_syntax_error(self, 'print ,')
check_syntax_error(self, 'print >> x,')
def testDelStmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def testPassStmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def testBreakStmt(self):
# 'break'
while 1: break
def testContinueStmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo <> 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def testReturn(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def testYield(self):
check_syntax_error(self, "class foo:yield 1")
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def testImport(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def testGlobal(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def testExec(self):
# 'exec' expr ['in' expr [',' expr]]
z = None
del z
exec 'z=1+1\n'
if z != 2: self.fail('exec \'z=1+1\'\\n')
del z
exec 'z=1+1'
if z != 2: self.fail('exec \'z=1+1\'')
z = None
del z
import types
if hasattr(types, "UnicodeType"):
exec r"""if 1:
exec u'z=1+1\n'
if z != 2: self.fail('exec u\'z=1+1\'\\n')
del z
exec u'z=1+1'
if z != 2: self.fail('exec u\'z=1+1\'')"""
g = {}
exec 'z = 1' in g
if g.has_key('__builtins__'): del g['__builtins__']
if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
g = {}
l = {}
import warnings
warnings.filterwarnings("ignore", "global statement", module="<string>")
exec 'global a; a = 1; b = 2' in g, l
if g.has_key('__builtins__'): del g['__builtins__']
if l.has_key('__builtins__'): del l['__builtins__']
if (g, l) != ({'a':1}, {'b':2}):
self.fail('exec ... in g (%s), l (%s)' %(g,l))
def testAssert(self):
# assert_stmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert 0, "msg"
except AssertionError, e:
self.assertEquals(e.args[0], "msg")
else:
if __debug__:
self.fail("AssertionError not raised by assert 0")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def testIf(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def testWhile(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEquals(x, 2)
def testFor(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def testTry(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [('as' | ',') expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
def testSuite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def testTest(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def testComparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
def testBinaryMaskOps(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def testShiftOps(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def testAdditiveOps(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def testMultiplicativeOps(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def testUnaryOps(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def testSelectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort()
self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def testAtoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictmaker: test ':' test (',' test ':' test)* [',']
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
self.assertEqual(`1,2`, '(1, 2)')
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def testClassdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x):
x.decorated = True
return x
@class_decorator
class G:
pass
self.assertEqual(G.decorated, True)
def testListcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [None < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def testGenexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(g.next(), [x for x in range(10)])
try:
g.next()
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
g.next()
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def testComprehensionSpecials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def testIfElseExpr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print x
return ret
self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_main():
run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
|
xenserver/xs-cim
|
refs/heads/master
|
test/pywbem-tests/MetricTests.py
|
1
|
#!/usr/bin/env python
'''Copyright (C) 2008 Citrix Systems Inc.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
=========================================================================
'''
import sys
import pywbem
import datetime
import time
import getpass
import os
from xen_cim_operations import *
from TestSetUp import *
'''
Exercises the methods in the Xen_MetricsService class to gather metrics.
This allows caller to get the historical metrics of VMs and hosts in Xport XML format
or instantaneous metrics for devices as CIM objects.
'''
class MetricTests(TestSetUp):
def __init__(self, Ip, userName, password):
TestSetUp.__init__(self, Ip, userName, password, False, True)
self.mss = self.conn.EnumerateInstanceNames("Xen_MetricService")
# start the PV VM, so we get some metrics
in_params = {'RequestedState':'2'}
ChangeVMState(self.conn, self.pv_test_vm, in_params, True, '2')
def get_historical_host_metrics (self):
self.TestBegin()
interval = 0
rc = 1
hosts = self.conn.EnumerateInstanceNames("Xen_HostComputerSystem")
for host in hosts:
in_params = { "System": host, "TimeDuration":pywbem.Uint32(60) } # last 1 hour of metrics
print 'Getting Metrics for host %s from the last 60 mins' % (host['Name'])
try:
[rc, out_params] = self.conn.InvokeMethod("GetPerformanceMetricsForSystem", self.mss[0], **in_params)
except pywbem.cim_operations.CIMError:
print 'Exception caught getting metrics'
if rc == 0:
print ' Metrics: %s' % out_params["Metrics"]
else:
print ' NO METRICS AVAILABLE'
self.TestEnd2(rc)
def get_historical_vm_metrics (self):
self.TestBegin()
vms = self.conn.EnumerateInstanceNames("Xen_ComputerSystem")
rc = 1
for vm in vms:
one_hour_delta = timedelta(hours=1)
one_hour_ago = datetime.now() - one_hour_delta
starttime = CIMDateTime(one_hour_ago)
in_params = {"System": vm, "StartTime" : starttime} # 'EndTime' defaults to 'Now'
print 'Getting Metrics for VM %s' % vm['Name']
try:
[rc, out_params] = self.conn.InvokeMethod("GetPerformanceMetricsForSystem", self.mss[0], **in_params)
except pywbem.cim_operations.CIMError:
print 'Exception caught getting metrics'
if rc == 0:
outxml = out_params["Metrics"]
print ' Metrics: %s' % outxml
else:
print ' NO METRICS AVAILABLE'
self.TestEnd2(rc)
def test_instantaneous_metrics (self):
self.TestBegin()
rc = 0
proc_utilizations = self.conn.EnumerateInstances("Xen_ProcessorUtilization")
print 'Xen_ProcessorUtilization (VM)'
for metric in proc_utilizations:
print ' InstanceID: %s, util:%s at %s' % (metric['InstanceID'], metric['MetricValue'], metric['TimeStamp'])
host_proc_utilizations = self.conn.EnumerateInstances("Xen_HostProcessorUtilization")
print 'Xen_HostProcessorUtilization (hosts)'
for metric in host_proc_utilizations:
print ' InstanceID: %s, util:%s at %s' % (metric['InstanceID'], metric['MetricValue'], metric['TimeStamp'])
network_port_tx_thrput = self.conn.EnumerateInstances("Xen_NetworkPortTransmitThroughput")
print 'Xen_NetworkPortTransmitThroughput (VM NICs)'
for metric in network_port_tx_thrput:
print ' InstanceID: %s, util:%s at %s' % (metric['InstanceID'], metric['MetricValue'], metric['TimeStamp'])
network_port_rx_thrput = self.conn.EnumerateInstances("Xen_NetworkPortReceiveThroughput")
print 'Xen_NetworkPortReceiveThroughput (VM NICs)'
for metric in network_port_rx_thrput:
print ' InstanceID: %s, util:%s at %s' % (metric['InstanceID'], metric['MetricValue'], metric['TimeStamp'])
host_network_port_tx_thrput = self.conn.EnumerateInstances("Xen_HostNetworkPortTransmitThroughput")
print 'Xen_HostNetworkPortTransmitThroughput (Host NICs)'
for metric in host_network_port_tx_thrput:
print ' InstanceID: %s, util:%s at %s' % (metric['InstanceID'], metric['MetricValue'], metric['TimeStamp'])
host_network_port_rx_thrput = self.conn.EnumerateInstances("Xen_HostNetworkPortReceiveThroughput")
print 'Xen_HostNetworkPortReceiveThroughput (Host NICs)'
for metric in host_network_port_rx_thrput:
print ' InstanceID: %s, util:%s at %s' % (metric['InstanceID'], metric['MetricValue'], metric['TimeStamp'])
disk_reads = self.conn.EnumerateInstances("Xen_DiskReadThroughput")
print 'Xen_DiskReadThroughput (VM Disks)'
for metric in disk_reads:
print ' InstanceID: %s, util:%s at %s' % (metric['InstanceID'], metric['MetricValue'], metric['TimeStamp'])
disk_writes = self.conn.EnumerateInstances("Xen_DiskWriteThroughput")
print 'Xen_DiskWriteThroughput (VM Disks)'
for metric in disk_writes:
print ' InstanceID: %s, util:%s at %s' % (metric['InstanceID'], metric['MetricValue'], metric['TimeStamp'])
disk_read_latencies = self.conn.EnumerateInstances("Xen_DiskReadLatency")
print 'Xen_DiskReadLatency (VM Disks)'
for metric in disk_read_latencies:
print ' InstanceID: %s, util:%s at %s' % (metric['InstanceID'], metric['MetricValue'], metric['TimeStamp'])
disk_write_latencies = self.conn.EnumerateInstances("Xen_DiskWriteLatency")
print 'Xen_DiskWriteLatency (VM Disks)'
for metric in disk_write_latencies:
print ' InstanceID: %s, util:%s at %s' % (metric['InstanceID'], metric['MetricValue'], metric['TimeStamp'])
# Test is successful if we got at least one object out of enumerating these classes
if(len(proc_utilizations) > 0 and
len(host_proc_utilizations) > 0 and
len(network_port_tx_thrput) > 0 and
len(network_port_rx_thrput) > 0 and
len(host_network_port_tx_thrput) > 0 and
len(host_network_port_rx_thrput) > 0 and
len(disk_reads) > 0 and
len(disk_writes) > 0 and
len(disk_read_latencies) > 0 and
len(disk_write_latencies) > 0):
rc = 1
self.TestEnd(rc)
def LocalCleanup (self):
in_params = {'RequestedState':'4'}
ChangeVMState(self.conn, self.pv_test_vm, in_params, True, '4')
if __name__ == '__main__':
count = len(sys.argv[1:])
if (count != 3):
print "Wrong arg count: Must pass Ip, username, and password as arguments"
print "Count is "+str(count)
sys.exit(0)
mt = MetricTests(sys.argv[1], sys.argv[2], sys.argv[3])
try:
mt.get_historical_host_metrics() # Get historical metrics for a Host, in Xport form
mt.get_historical_vm_metrics() # get historical metrics for a VM, in Xport form
mt.test_instantaneous_metrics() # Test all classes that represent instantaneous metrics (proc utilization, nic reads and writes/s etc)
finally:
mt.LocalCleanup()
mt.TestCleanup()
sys.exit(0)
|
CentreForResearchInAppliedLinguistics/clic
|
refs/heads/develop
|
clic/scripts/create_bookcountsjson.py
|
2
|
'''
TO CREATE bookcounts.json
Output format:
# [
# ["dickens",
# "All Dickens (15 novels)", [3835807, 1348611, 2487196]],
# [
# [
# "BH",
# "Bleak House",
# [
# 354362,
# 138453,
# 215909
# ],
# [
# 0,
# 2615,
# 5516,
# 13412,
# 18306,
# 24006,
# 32710,
# 36945,
# 45254,
# 51516,
# 55738,
# 61593,
# 67150,
# 73195,
# 81758,
# 88075,
# 91585,
# 97267,
# 104365,
# 109781,
# 115431,
# 122324,
# 127515,
# 134900,
# 142311,
# 145826,
# 150990,
# 155914,
# 160518,
# 164337,
# 170880,
# 177434,
# 182839,
# 188727,
# 194973,
# 201171,
# 207936,
# 215669,
# 219939,
# 226230,
# 230804,
# 234250,
# 237045,
# 243367,
# 246462,
# 251840,
# 255335,
# 261121,
# 267438,
# 272923,
# 277024,
# 281664,
# 286148,
# 290585,
# 299106,
# 305563,
# 308796,
# 316277,
# 321946,
# 327295,
# 332508,
# 336768,
# 340374,
# 343552,
# 348007,
# 350965,
# 352686
# ]
# ],
# ...
#
# ["ntc",
# "All 19thC (29 Non-Dickens novels)", [4513076, 1606974, 2906102]],
'''
import os
import re
from lxml import etree
import json
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
from cheshire3.baseObjects import Session
### read info from booklist: cumulative word count in chapters
# booklist = open('/home/aezros/workspace/testClic/staticFiles_test/booklist2')
# booklist = json.load(booklist)
# for b1 in booklist:
#
session = Session()
session.database = 'db_dickens'
serv = SimpleServer(session,
os.path.join(cheshire3Root, 'configs', 'serverConfig.xml')
)
db = serv.get_object(session, session.database)
qf = db.get_object(session, 'defaultQueryFactory')
resultSetStore = db.get_object(session, 'resultSetStore')
idxStore = db.get_object(session, 'indexStore')
dnov_books = ['BH', 'BR', 'DC', 'DS', 'ED', 'GE', 'HT', 'LD', 'MC', 'NN',
'OCS', 'OMF', 'OT', 'PP', 'TTC']
list_books = ['BH', 'BR', 'DC', 'DS', 'ED', 'GE', 'HT', 'LD', 'MC', 'NN',
'OCS', 'OMF', 'OT', 'PP', 'TTC',
'AgnesG', 'Antoni', 'arma', 'cran', 'Deronda', 'dracula', 'emma', 'frank', 'jane', 'Jude',
'LadyAud', 'mary', 'NorthS', 'persuasion', 'pride', 'sybil', 'Tess', 'basker', 'Pomp', 'mill',
'dorian', 'Prof', 'native', 'alli', 'Jekyll', 'wwhite', 'vanity', 'VivianG', 'wh'
]
titles = {'BH': 'Bleak House', 'BR': 'Barnaby Rudge', 'DC': 'David Copperfield', 'DS': 'Dombey and Son',
'ED': 'The Mystery of Edwin Drood', 'GE': 'Great Expectations', 'HT': 'Hard Times', 'LD': 'Little Dorrit',
'MC': 'Martin Chuzzlewit', 'NN': 'Nicholas Nickleby', 'OCS': 'The Old Curiosity Shop', 'OMF': 'Our Mutual Friend',
'OT': 'Oliver Twist', 'PP': 'Pickwick Papers', 'TTC': 'A Tale of Two Cities',
'AgnesG': 'Agnes Grey', 'Antoni': 'Antonina, or the Fall of Rome', 'arma': 'Armadale', 'cran': 'Cranford',
'Deronda': 'Daniel Deronda', 'dracula': 'Dracula', 'emma': 'Emma', 'frank': 'Frankenstein', 'jane': 'Jane Eyre',
'Jude': 'Jude the Obscure', 'LadyAud': 'Lady Audley\'s Secret', 'mary': 'Mary Barton', 'NorthS': 'North and South',
'persuasion': 'Persuasion', 'pride': 'Pride and Prejudice', 'sybil': 'Sybil, or the two nations',
'Tess': 'Tess of the D\'Urbervilles', 'basker': 'The Hound of the Baskervilles', 'Pomp': 'The Last Days of Pompeii',
'mill': 'The Mill on the Floss', 'dorian': 'The Picture of Dorian Gray', 'Prof': 'The Professor',
'native': 'The Return of the Native', 'alli': 'The Small House at Allington',
'Jekyll': 'The Strange Case of Dr Jekyll and Mr Hide', 'wwhite': 'The Woman in White',
'vanity': 'Vanity Fair', 'VivianG': 'Vivian Grey', 'wh': 'Wuthering Heights'
}
list_all_books = []
#list_all_books.insert(0, 'dickens')
all_dickens = 0
quotes_dickens = 0
nonquotes_dickens = 0
all_ntc = 0
quotes_ntc = 0
nonquotes_ntc = 0
within_dickens = []
within_ntc = []
for b in list_books:
query = qf.get_query(session, 'c3.book-idx = "%s"' % b)
results = db.search(session, query)
sent_idx = db.get_object(session, 'sentence-idx')
quote_idx = db.get_object(session, 'quote-idx')
nonquote_idx = db.get_object(session, 'non-quote-idx')
sent_facets = sent_idx.facets(session, results)
all_words = 0
for x in sent_facets:
all_words += x[1][2]
quote_facets = quote_idx.facets(session, results)
quote_words = 0
for x in quote_facets:
quote_words += x[1][2]
nonquote_facets = nonquote_idx.facets(session, results)
nonquote_words = 0
for x in nonquote_facets:
nonquote_words += x[1][2]
###
query = qf.get_query(session, 'c3.book-idx = "{0}"'.format(b))
results = db.search(session, query)
wordTotal = 0
wordCumulative = []
for i, r in enumerate(results):
rec = r.fetch_record(session)
tree = rec.get_dom(session).getroottree()
wordInChap = len(tree.xpath('//div/descendant::w'))
wordStartChap = wordTotal
wordTotal = wordStartChap + wordInChap
wordCumulative.append(wordStartChap)
## find title
book_title = ""
for t in titles.iteritems():
if b == t[0]:
book_title = t[1]
break
if b in dnov_books:
# update the total counts per corpus
all_dickens += all_words
quotes_dickens += quote_words
nonquotes_dickens += nonquote_words
# add the count per book
within_dickens.append([b, book_title, [all_words, quote_words, nonquote_words], wordCumulative])
else:
all_ntc += all_words
quotes_ntc += quote_words
nonquotes_ntc += nonquote_words
within_ntc.append([b, book_title, [all_words, quote_words, nonquote_words], wordCumulative])
#break
list_all_books.append(["dickens", "All Dickens (15 novels)", [all_dickens, quotes_dickens, nonquotes_dickens]])
list_all_books.append(within_dickens)
list_all_books.append(["ntc", "All 19thC (29 Non-Dickens novels)", [all_ntc, quotes_ntc, nonquotes_ntc]],)
list_all_books.append(within_ntc)
print json.dumps(list_all_books)
|
XiaodunServerGroup/xiaodun-platform
|
refs/heads/master
|
lms/lib/xblock/field_data.py
|
63
|
"""
:class:`~xblock.field_data.FieldData` subclasses used by the LMS
"""
from xblock.field_data import ReadOnlyFieldData, SplitFieldData
from xblock.fields import Scope
class LmsFieldData(SplitFieldData):
"""
A :class:`~xblock.field_data.FieldData` that
reads all UserScope.ONE and UserScope.ALL fields from `student_data`
and all UserScope.NONE fields from `authored_data`. It also prevents
writing to `authored_data`.
"""
def __init__(self, authored_data, student_data):
# Make sure that we don't repeatedly nest LmsFieldData instances
if isinstance(authored_data, LmsFieldData):
authored_data = authored_data._authored_data # pylint: disable=protected-member
else:
authored_data = ReadOnlyFieldData(authored_data)
self._authored_data = authored_data
self._student_data = student_data
super(LmsFieldData, self).__init__({
Scope.content: authored_data,
Scope.settings: authored_data,
Scope.parent: authored_data,
Scope.children: authored_data,
Scope.user_state_summary: student_data,
Scope.user_state: student_data,
Scope.user_info: student_data,
Scope.preferences: student_data,
})
|
jmuhlich/indra
|
refs/heads/master
|
indra/tests/test_statements.py
|
1
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
from nose.tools import raises
from indra.preassembler.hierarchy_manager import HierarchyManager
from indra.preassembler.hierarchy_manager import hierarchies
from indra.statements import *
from indra.util import unicode_strs
# Argument checking for ActiveForms ----------------------------
def test_activitymod_sitelist_of_ints():
"""Check that mod positions specified as ints get promoted to strings."""
st = ActiveForm(Agent('MAP2K1', mods=
[ModCondition('phosphorylation', 'serine', 218),
ModCondition('phosphorylation', 'serine', 222)]),
'kinase', True)
assert not isinstance(st.agent.mods[0].position, int)
assert not isinstance(st.agent.mods[1].position, int)
assert unicode_strs(st)
def testactivitymod_string_string():
"""Check that string mod position is preserved"""
st = ActiveForm(Agent('MAP2K1', mods=
[ModCondition('phosphorylation', 'serine', '218')]),
'kinase', True)
assert not isinstance(st.agent.mods[0].position, int)
assert unicode_strs(st)
def testactivitymod_string_none():
"""Check that None mod position is preserved"""
st = ActiveForm(Agent('MAP2K1', mods=
[ModCondition('phosphorylation', 'serine', None)]),
'kinase', True)
assert (st.agent.mods[0].position is None)
assert unicode_strs(st)
def testactivitymod_nolist():
"""Make sure mod is correctly turned into a list if it's
a single ModCondition"""
mc = ModCondition('phosphorylation')
st = ActiveForm(Agent('MAP2K1', mods=mc),
'kinase', True)
assert isinstance(st.agent.mods, list)
assert unicode_strs(st)
assert unicode_strs(mc)
# Checking for exact matching (except Evidence) between Agents/stmts ---------
def test_matches():
ras = Agent('Ras')
raf = Agent('Raf')
st1 = Complex([ras, raf])
st2 = Complex([ras, raf])
assert(st1.matches(st2))
assert unicode_strs(st1)
def test_matches_key():
ras = Agent('Ras')
raf = Agent('Raf')
st1 = Complex([ras, raf])
st2 = Complex([ras, raf])
assert(st1.matches_key() == st2.matches_key())
assert unicode_strs(st1)
def test_matches_key_unicode():
ras = Agent('Ras')
rasu = Agent(u'Ras')
raf = Agent('Raf')
st1 = Complex([ras, raf])
st2 = Complex([rasu, raf])
assert(st1.matches_key() == st2.matches_key())
assert unicode_strs(st1)
assert unicode_strs(st2)
def test_matches_key_unicode2():
raf = Agent('Raf')
mek = Agent('Mek')
st1 = Phosphorylation(raf, mek, u'S')
st2 = Phosphorylation(raf, mek, 'S')
assert(st1.matches_key() == st2.matches_key())
assert unicode_strs(st1)
assert unicode_strs(st2)
def test_matches_key_unicode3():
raf = Agent('Raf')
mek = Agent('Mek')
st1 = Phosphorylation(raf, mek, 'S', u'222')
st2 = Phosphorylation(raf, mek, 'S', '222')
assert(st1.matches_key() == st2.matches_key())
assert unicode_strs(st1)
assert unicode_strs(st2)
def test_matches2():
raf = Agent('Raf')
mek = Agent('Mek')
st1 = Phosphorylation(raf, mek)
st2 = Phosphorylation(raf, mek)
assert(st1.matches(st2))
assert unicode_strs(st1)
assert unicode_strs(st2)
def test_matches_key2():
raf = Agent('Raf')
mek = Agent('Mek')
st1 = Phosphorylation(raf, mek)
st2 = Phosphorylation(raf, mek)
assert(st1.matches_key() == st2.matches_key())
assert unicode_strs(st1)
assert unicode_strs(st2)
def test_not_matches():
raf = Agent('Raf')
mek = Agent('Mek')
st1 = Phosphorylation(raf, mek)
st2 = Phosphorylation(raf, mek, 'tyrosine')
assert(not st1.matches(st2))
assert unicode_strs(st1)
assert unicode_strs(st2)
def test_not_matches_key():
raf = Agent('Raf')
mek = Agent('Mek')
st1 = Phosphorylation(raf, mek)
st2 = Phosphorylation(raf, mek, 'tyrosine')
assert(st1.matches_key() != st2.matches_key())
assert unicode_strs(st1)
assert unicode_strs(st2)
def test_matches_dbrefs():
hras1 = Agent('HRAS', db_refs={'hgnc': 1111})
hras2 = Agent('HRAS', db_refs={'hgnc': 9999})
assert(hras1.matches(hras2))
assert unicode_strs(hras1)
assert unicode_strs(hras2)
def test_matches_key_dbrefs():
hras1 = Agent('HRAS', db_refs={'hgnc': 1111})
hras2 = Agent('HRAS', db_refs={'hgnc': 9999})
assert(hras1.matches_key() == hras2.matches_key())
assert unicode_strs((hras1, hras2))
def test_matches_bound():
hras1 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('BRAF'), True)])
hras2 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('BRAF'), True)])
assert(hras1.matches(hras2))
assert unicode_strs((hras1, hras2))
def test_matches_key_bound():
hras1 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('BRAF'), True)])
hras2 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('BRAF'), True)])
assert(hras1.matches_key() == hras2.matches_key())
assert unicode_strs((hras1, hras2))
def test_not_matches_bound():
hras1 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('BRAF'), True)])
hras2 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('RAF1'), True)])
assert(not hras1.matches(hras2))
assert unicode_strs((hras1, hras2))
def test_not_matches_key_bound():
hras1 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('BRAF'), True)])
hras2 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('RAF1'), True)])
assert(hras1.matches_key() != hras2.matches_key())
assert unicode_strs((hras1, hras2))
def test_not_matches_bound2():
hras1 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('BRAF'), True)])
hras2 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('BRAF'), False)])
assert(not hras1.matches(hras2))
assert unicode_strs((hras1, hras2))
def test_not_matches_key_bound2():
hras1 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('BRAF'), True)])
hras2 = Agent('HRAS',
bound_conditions=[BoundCondition(Agent('BRAF'), False)])
assert(hras1.matches_key() != hras2.matches_key())
assert unicode_strs((hras1, hras2))
def test_matches_bound_multiple():
hras1 = Agent('HRAS', bound_conditions=[BoundCondition(Agent('BRAF'), True),
BoundCondition(Agent('RAF1'), True)])
hras2 = Agent('HRAS', bound_conditions=[BoundCondition(Agent('BRAF'), True),
BoundCondition(Agent('RAF1'), True)])
assert(hras1.matches(hras2))
assert unicode_strs((hras1, hras2))
def test_matches_key_bound_multiple():
hras1 = Agent('HRAS', bound_conditions=[BoundCondition(Agent('BRAF'), True),
BoundCondition(Agent('RAF1'), True)])
hras2 = Agent('HRAS', bound_conditions=[BoundCondition(Agent('BRAF'), True),
BoundCondition(Agent('RAF1'), True)])
assert(hras1.matches_key() == hras2.matches_key())
assert unicode_strs((hras1, hras2))
def test_matches_bound_multiple_order():
hras1 = Agent('HRAS', bound_conditions=[BoundCondition(Agent('RAF1'), True),
BoundCondition(Agent('BRAF'), True)])
hras2 = Agent('HRAS', bound_conditions=[BoundCondition(Agent('BRAF'), True),
BoundCondition(Agent('RAF1'), True)])
assert(hras1.matches(hras2))
assert unicode_strs((hras1, hras2))
def test_matches_key_bound_multiple_order():
hras1 = Agent('HRAS', bound_conditions=[BoundCondition(Agent('RAF1'), True),
BoundCondition(Agent('BRAF'), True)])
hras2 = Agent('HRAS', bound_conditions=[BoundCondition(Agent('BRAF'), True),
BoundCondition(Agent('RAF1'), True)])
assert(hras1.matches_key() == hras2.matches_key())
assert unicode_strs((hras1, hras2))
def test_matches_agent_mod_order():
hras1 = Agent('MAP2K1',
mods=[ModCondition('phosphorylation'), ModCondition('ubiquitination')])
hras2 = Agent('MAP2K1',
mods=[ModCondition('ubiquitination'), ModCondition('phosphorylation')])
assert(hras1.matches(hras2))
assert unicode_strs((hras1, hras2))
def test_refinement_agent_mod_order():
hras1 = Agent('MAP2K1',
mods=[ModCondition('phosphorylation', 'S'),
ModCondition('ubiquitination')])
hras2 = Agent('MAP2K1',
mods=[ModCondition('ubiquitination'), ModCondition('phosphorylation')])
assert(hras1.refinement_of(hras2, hierarchies))
assert(not hras2.refinement_of(hras1, hierarchies))
assert unicode_strs((hras1, hras2))
def test_refinement_agent_mod_same_order():
hras1 = Agent('MAP2K1',
mods=[ModCondition('phosphorylation'),
ModCondition('phosphorylation')])
hras2 = Agent('MAP2K1',
mods=[ModCondition('phosphorylation')])
assert(hras1.refinement_of(hras2, hierarchies))
assert(not hras2.refinement_of(hras1, hierarchies))
assert unicode_strs((hras1, hras2))
def test_refinement_agent_mod_multiple():
mc1 = ModCondition('phosphorylation', 'S', '218')
mc2 = ModCondition('phosphorylation', 'S', '298')
mc3 = ModCondition('phosphorylation', 'S', '222')
mc4 = ModCondition('phosphorylation')
mc5 = ModCondition('phosphorylation')
mek1 = Agent('MAP2K1', mods=[mc1, mc2, mc3])
mek2 = Agent('MAP2K1', mods=[mc4, mc5])
erk = Agent('MAPK1')
st1 = Phosphorylation(mek2, erk)
st2 = Phosphorylation(mek1, erk, 'T', '185')
st3 = Phosphorylation(mek1, erk, 'Y', '187')
assert(st2.refinement_of(st1, hierarchies))
assert(st3.refinement_of(st1, hierarchies))
assert(not st1.refinement_of(st2, hierarchies))
assert(not st1.refinement_of(st3, hierarchies))
assert unicode_strs((st1, st2, st3))
def test_refinement_agent_mod_generic():
p = ModCondition('phosphorylation')
raf3p = Phosphorylation(Agent('RAF', mods=[p,p,p]), Agent('MAP2K1'))
raf2p = Phosphorylation(Agent('RAF', mods=[p,p]), Agent('MAP2K1'))
assert(raf3p.refinement_of(raf2p, hierarchies))
assert(not raf2p.refinement_of(raf3p, hierarchies))
assert unicode_strs((raf3p, raf2p))
# Check matches implementations for all statement types ---------------------
def test_matches_selfmod():
"""Test matching of entities only."""
nras1 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', db_refs = {'HGNC': 'dummy'})
st1 = Autophosphorylation(nras1, 'tyrosine', '32',
evidence=Evidence(text='foo'))
st2 = Autophosphorylation(nras1, 'tyrosine', '32',
evidence=Evidence(text='bar'))
st3 = Autophosphorylation(nras2, evidence=Evidence(text='bar'))
assert(st1.matches(st2))
assert(not st1.matches(st3))
assert unicode_strs((st1, st2, st3))
def test_matches_activation():
"""Test matching of entities only."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras1 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', db_refs = {'HGNC': 'dummy'})
st1 = Activation(src, nras1, 'gtpbound',
evidence=Evidence(text='foo'))
st2 = Activation(src, nras1, 'gtpbound',
evidence=Evidence(text='bar'))
st3 = Activation(src, nras2, 'phosphatase',
evidence=Evidence(text='bar'))
st4 = Inhibition(src, nras2, 'phosphatase',
evidence=Evidence(text='bar'))
assert(st1.matches(st2))
assert(not st1.matches(st3))
assert(not st3.matches(st4))
assert unicode_strs((st1, st2, st3))
def test_matches_activitymod():
"""Test matching of entities only."""
mc = ModCondition('phosphorylation', 'Y', '32')
mc2 = ModCondition('phosphorylation')
nras1 = Agent('NRAS', mods=[mc], db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', mods=[mc2], db_refs = {'HGNC': 'dummy'})
st1 = ActiveForm(nras1, 'gtpbound', True,
evidence=Evidence(text='foo'))
st2 = ActiveForm(nras1, 'gtpbound', True,
evidence=Evidence(text='bar'))
st3 = ActiveForm(nras2, 'phosphatase', True,
evidence=Evidence(text='bar'))
assert(st1.matches(st2))
assert(not st1.matches(st3))
assert unicode_strs((st1, st2, st3))
def test_matches_activatingsub():
"""Test matching of entities only."""
mut1 = MutCondition('12', 'G', 'D')
mut2 = MutCondition('61', 'Q', 'L')
nras1 = Agent('NRAS', mutations=[mut1], db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', mutations=[mut2], db_refs = {'HGNC': 'dummy'})
st1 = ActiveForm(nras1, 'gtpbound', True,
evidence=Evidence(text='foo'))
st2 = ActiveForm(nras1, 'gtpbound', True,
evidence=Evidence(text='bar'))
st3 = ActiveForm(nras2, 'phosphatase', True,
evidence=Evidence(text='bar'))
st4 = ActiveForm(nras2, 'phosphatase', False,
evidence=Evidence(text='bar'))
st5 = ActiveForm(nras2, 'kinase', True,
evidence=Evidence(text='bar'))
assert(st1.matches(st2))
assert(not st1.matches(st3))
assert(not st3.matches(st4)) # Differ only in relationship
assert(not st3.matches(st5)) # Differ only in activity
assert unicode_strs((st1, st2, st3, st4, st5))
def test_matches_rasgef():
"""Test matching of entities only."""
sos1 = Agent('SOS1', db_refs = {'HGNC': 'sos1'})
sos2 = Agent('SOS1', db_refs = {'HGNC': 'sos2'})
nras1 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', db_refs = {'HGNC': 'dummy'})
st1 = RasGef(sos1, nras1,
evidence=Evidence(text='foo'))
st2 = RasGef(sos1, nras1,
evidence=Evidence(text='bar'))
st3 = RasGef(sos2, nras2,
evidence=Evidence(text='bar'))
assert(st1.matches(st2))
assert(not st1.matches(st3))
assert unicode_strs((st1, st2, st3))
def test_matches_rasgap():
rasa1 = Agent('RASA1', db_refs = {'HGNC': 'rasa1'})
rasa2 = Agent('RASA1', db_refs = {'HGNC': 'rasa2'})
nras1 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', db_refs = {'HGNC': 'dummy'})
st1 = RasGap(rasa1, nras1,
evidence=Evidence(text='foo'))
st2 = RasGap(rasa1, nras1,
evidence=Evidence(text='bar'))
st3 = RasGap(rasa2, nras2,
evidence=Evidence(text='bar'))
assert(st1.matches(st2))
assert(not st1.matches(st3))
assert unicode_strs((st1, st2, st3))
def test_matches_complex():
ksr1 = Agent('KSR1', db_refs = {'HGNC': 'ksr1'})
ksr2 = Agent('KSR1', db_refs = {'HGNC': 'ksr2'})
braf1 = Agent('BRAF', db_refs = {'HGNC': 'braf1'})
braf2 = Agent('BRAF', db_refs = {'HGNC': 'braf2'})
map2k1 = Agent('MAP2K1', db_refs = {'HGNC': 'map2k1'})
map2k2 = Agent('MAP2K1', db_refs = {'HGNC': 'map2k2'})
st1 = Complex([ksr1, braf1, map2k1], evidence=Evidence(text='foo'))
st2 = Complex([ksr1, braf1, map2k1], evidence=Evidence(text='bar'))
st3 = Complex([braf1, map2k1, ksr1], evidence=Evidence(text='bax'))
assert(st1.matches(st2))
assert(st2.matches(st3))
assert(st3.matches(st1))
assert unicode_strs((st1, st2, st3))
# Entity matching between statements ----------------------------------------
def test_agent_entity_match():
"""Agents match on name and grounding."""
nras1 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras3 = Agent('NRAS', db_refs = {'HGNC': 'dummy'})
assert(nras1.entity_matches(nras2))
assert(not nras1.entity_matches(nras3))
assert unicode_strs((nras1, nras2, nras3))
def test_entities_match_mod():
"""Test matching of entities only, entities match on name and grounding."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras1 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras3 = Agent('NRAS', db_refs = {'HGNC': 'dummy'})
st1 = Phosphorylation(src, nras1, 'tyrosine', '32',
evidence=Evidence(text='foo'))
st2 = Phosphorylation(src, nras2,
evidence=Evidence(text='bar'))
st3 = Phosphorylation(src, nras3,
evidence=Evidence(text='baz'))
assert(st1.entities_match(st2))
assert(not st1.entities_match(st3))
assert unicode_strs((st1, st2, st3))
def test_entities_match_selfmod():
"""Test matching of entities only, entities match on name and grounding."""
nras1 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras3 = Agent('NRAS', db_refs = {'HGNC': 'dummy'})
st1 = Autophosphorylation(nras1, 'tyrosine', '32',
evidence=Evidence(text='foo'))
st2 = Autophosphorylation(nras2,
evidence=Evidence(text='bar'))
st3 = Autophosphorylation(nras3,
evidence=Evidence(text='baz'))
assert(st1.entities_match(st2))
assert(not st1.entities_match(st3))
assert unicode_strs((st1, st2, st3))
def test_entities_match_activation():
"""Test matching of entities only, entities match on name and grounding."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras1 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras3 = Agent('NRAS', db_refs = {'HGNC': 'dummy'})
st1 = Activation(src, nras1, 'gtpbound',
evidence=Evidence(text='foo'))
st2 = Activation(src, nras2, 'phosphatase',
evidence=Evidence(text='bar'))
st3 = Activation(src, nras3, 'phosphatase',
evidence=Evidence(text='baz'))
assert(st1.entities_match(st2))
assert(not st1.entities_match(st3))
assert unicode_strs((st1, st2, st3))
def test_entities_match_activitymod():
"""Test matching of entities only, entities match on name and grounding."""
mc1 = ModCondition('phosphorylation', 'tyrosine', '32')
mc2 = ModCondition('phosphorylation')
nras1 = Agent('NRAS', mods=[mc1], db_refs={'HGNC': '7989'})
nras2 = Agent('NRAS', mods=[mc2], db_refs={'HGNC': '7989'})
nras3 = Agent('NRAS', mods=[mc1], db_refs={'HGNC': 'dummy'})
st1 = ActiveForm(nras1, 'gtpbound', True,
evidence=Evidence(text='foo'))
st2 = ActiveForm(nras2, 'phosphatase', False,
evidence=Evidence(text='bar'))
st3 = ActiveForm(nras3, 'gtpbound', False,
evidence=Evidence(text='baz'))
assert(st1.entities_match(st2))
assert(not st1.entities_match(st3))
assert unicode_strs((st1, st2, st3))
def test_entities_match_activatingsub():
"""Test matching of entities only, entities match on name and grounding."""
mc1 = MutCondition('12', 'G', 'D')
mc2 = MutCondition('61', 'Q', 'L')
nras1 = Agent('NRAS', mutations=[mc1], db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', mutations=[mc2], db_refs = {'HGNC': '7989'})
nras3 = Agent('NRAS', mutations=[mc1], db_refs = {'HGNC': 'dummy'})
st1 = ActiveForm(nras1, 'gtpbound', True,
evidence=Evidence(text='foo'))
st2 = ActiveForm(nras2, 'phosphatase', False,
evidence=Evidence(text='bar'))
st3 = ActiveForm(nras3, 'gtpbound', False,
evidence=Evidence(text='baz'))
assert(st1.entities_match(st2))
assert(not st1.entities_match(st3))
assert unicode_strs((st1, st2, st3))
def test_entities_match_rasgef():
"""Test matching of entities only, entities match on name and grounding."""
sos1 = Agent('SOS1', db_refs = {'HGNC': 'sos1'})
sos2 = Agent('SOS1', db_refs = {'HGNC': 'sos2'})
sos3 = Agent('SOS1', db_refs = {'HGNC': 'sos1'})
nras1 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras3 = Agent('NRAS', db_refs = {'HGNC': 'dummy'})
st1 = RasGef(sos1, nras1,
evidence=Evidence(text='foo'))
st2 = RasGef(sos2, nras2,
evidence=Evidence(text='bar'))
st3 = RasGef(sos1, nras2,
evidence=Evidence(text='bar'))
assert(not st1.entities_match(st2))
assert(not st2.entities_match(st3))
assert(st1.entities_match(st3))
assert unicode_strs((st1, st2, st3))
def test_entities_match_rasgap():
"""Test matching of entities only, entities match on name and grounding."""
rasa1 = Agent('RASA1', db_refs = {'HGNC': 'rasa1'})
rasa2 = Agent('RASA1', db_refs = {'HGNC': 'rasa2'})
nras1 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', db_refs = {'HGNC': 'dummy'})
st1 = RasGap(rasa1, nras1,
evidence=Evidence(text='foo'))
st2 = RasGap(rasa2, nras2,
evidence=Evidence(text='bar'))
assert(not st1.entities_match(st2))
def test_entities_match_complex():
"""Test matching of entities only, entities match on name and grounding."""
ksr1 = Agent('KSR1', db_refs = {'HGNC': 'ksr1'})
ksr2 = Agent('KSR1', db_refs = {'HGNC': 'ksr2'})
braf1 = Agent('BRAF', db_refs = {'HGNC': 'braf1'})
braf2 = Agent('BRAF', db_refs = {'HGNC': 'braf2'})
map2k1 = Agent('MAP2K1', db_refs = {'HGNC': 'map2k1'})
map2k2 = Agent('MAP2K1', db_refs = {'HGNC': 'map2k2'})
st1 = Complex([ksr1, braf1, map2k1], evidence=Evidence(text='foo'))
st2 = Complex([ksr2, braf2, map2k2], evidence=Evidence(text='bar'))
st3 = Complex([braf2, map2k2, ksr2], evidence=Evidence(text='baz'))
assert(not st1.entities_match(st2))
assert(st2.entities_match(st3))
assert(not st3.entities_match(st1))
def test_agent_superfamily_refinement():
"""A gene-level statement should be supported by a family-level
statement."""
ras = Agent('RAS', db_refs = {'BE': 'RAS'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
assert nras.refinement_of(ras, hierarchies)
assert not ras.refinement_of(nras, hierarchies)
# The top-level list should contain only one statement, the gene-level
# one, supported by the family one.
def test_agent_boundcondition_refinement():
"""A gene-level statement should be supported by a family-level
statement."""
bc1 = BoundCondition(Agent('BRAF', db_refs = {'HGNC': '1097'}), True)
bc2 = BoundCondition(Agent('RAF1', db_refs = {'HGNC': '9829'}), True)
bc3 = BoundCondition(Agent('RAF1', db_refs = {'HGNC': '9829'}), False)
bc4 = BoundCondition(Agent('RAF', db_refs = {'BE': 'RAF'}), True)
nras1 = Agent('NRAS', db_refs = {'HGNC': '7989'}, bound_conditions=[bc1])
nras2 = Agent('NRAS', db_refs = {'HGNC': '7989'}, bound_conditions=[bc2])
nras3 = Agent('NRAS', db_refs = {'HGNC': '7989'}, bound_conditions=[bc3])
nras4 = Agent('NRAS', db_refs = {'HGNC': '7989'})
nras5 = Agent('NRAS', db_refs = {'HGNC': '7989'},
bound_conditions=[bc4])
# nras1 (bound to BRAF)
assert not nras2.refinement_of(nras1, hierarchies)
assert not nras3.refinement_of(nras1, hierarchies)
assert not nras4.refinement_of(nras1, hierarchies)
assert not nras5.refinement_of(nras1, hierarchies)
# nras2 (bound to CRAF)
assert not nras1.refinement_of(nras2, hierarchies)
assert not nras3.refinement_of(nras2, hierarchies) # Not bound condition
assert not nras4.refinement_of(nras2, hierarchies)
assert not nras5.refinement_of(nras2, hierarchies)
# nras3 (not bound to CRAF)
assert not nras1.refinement_of(nras3, hierarchies)
assert not nras2.refinement_of(nras3, hierarchies) # Not bound condition
assert not nras4.refinement_of(nras3, hierarchies)
assert not nras5.refinement_of(nras3, hierarchies)
# nras4 (no bound condition)
assert nras1.refinement_of(nras4, hierarchies)
assert nras2.refinement_of(nras4, hierarchies)
assert nras3.refinement_of(nras4, hierarchies)
assert nras5.refinement_of(nras4, hierarchies)
# nras5 (RAF family bound condition)
assert nras1.refinement_of(nras5, hierarchies)
assert nras2.refinement_of(nras5, hierarchies)
assert not nras3.refinement_of(nras5, hierarchies)
assert not nras4.refinement_of(nras5, hierarchies)
def test_agent_modification_refinement():
"""A gene-level statement should be supported by a family-level
statement."""
mek1 = Agent('MAP2K1', db_refs = {'HGNC': 'asdf'},
mods=ModCondition('phosphorylation'))
mek2 = Agent('MAP2K1', db_refs = {'HGNC': 'asdf'},
mods=ModCondition('phosphorylation', position='218'))
mek3 = Agent('MAP2K1', db_refs = {'HGNC': 'asdf'},
mods=ModCondition('phosphorylation', position='222'))
mek4 = Agent('MAP2K1', db_refs = {'HGNC': 'asdf'},
mods=[ModCondition('phosphorylation', position='218'),
ModCondition('phosphorylation', position='222')])
mek5 = Agent('MAP2K1', db_refs = {'HGNC': 'asdf'},
mods=ModCondition('phosphorylation', 'serine', None))
mek6 = Agent('MAP2K1', db_refs = {'HGNC': 'asdf'},
mods=ModCondition('phosphorylation', 'serine', '218'))
mek7 = Agent('MAP2K1', db_refs = {'HGNC': 'asdf'},
mods=ModCondition('phosphorylation', 'serine', '222'))
mek8 = Agent('MAP2K1', db_refs = {'HGNC': 'asdf'},
mods=[ModCondition('phosphorylation', 'serine', '218'),
ModCondition('phosphorylation', 'serine', '222')])
# mek1 agent is refined by all others
assert mek2.refinement_of(mek1, hierarchies)
assert mek3.refinement_of(mek1, hierarchies)
assert mek4.refinement_of(mek1, hierarchies)
assert mek5.refinement_of(mek1, hierarchies)
assert mek6.refinement_of(mek1, hierarchies)
assert mek7.refinement_of(mek1, hierarchies)
assert mek8.refinement_of(mek1, hierarchies)
# mek2
assert not mek1.refinement_of(mek2, hierarchies)
assert not mek3.refinement_of(mek2, hierarchies) # Different site
assert mek4.refinement_of(mek2, hierarchies)
assert not mek5.refinement_of(mek2, hierarchies) # Cross-relationship
assert mek6.refinement_of(mek2, hierarchies)
assert not mek7.refinement_of(mek2, hierarchies) # Different site
assert mek8.refinement_of(mek2, hierarchies)
# mek3
assert not mek1.refinement_of(mek3, hierarchies)
assert not mek2.refinement_of(mek3, hierarchies)
assert mek4.refinement_of(mek3, hierarchies)
assert not mek5.refinement_of(mek3, hierarchies)
assert not mek6.refinement_of(mek3, hierarchies)
assert mek7.refinement_of(mek3, hierarchies)
assert mek8.refinement_of(mek3, hierarchies)
# mek4
assert not mek1.refinement_of(mek4, hierarchies)
assert not mek2.refinement_of(mek4, hierarchies)
assert not mek3.refinement_of(mek4, hierarchies)
assert not mek5.refinement_of(mek4, hierarchies)
assert not mek6.refinement_of(mek4, hierarchies)
assert not mek7.refinement_of(mek4, hierarchies)
assert mek8.refinement_of(mek4, hierarchies)
# mek5
assert not mek1.refinement_of(mek5, hierarchies)
assert not mek2.refinement_of(mek5, hierarchies)
assert not mek3.refinement_of(mek5, hierarchies)
assert not mek4.refinement_of(mek5, hierarchies)
assert mek6.refinement_of(mek5, hierarchies)
assert mek7.refinement_of(mek5, hierarchies)
assert mek8.refinement_of(mek5, hierarchies)
# mek6
assert not mek1.refinement_of(mek6, hierarchies)
assert not mek2.refinement_of(mek6, hierarchies)
assert not mek3.refinement_of(mek6, hierarchies)
assert not mek4.refinement_of(mek6, hierarchies)
assert not mek5.refinement_of(mek6, hierarchies)
assert not mek7.refinement_of(mek6, hierarchies)
assert mek8.refinement_of(mek6, hierarchies)
# mek7
assert not mek1.refinement_of(mek7, hierarchies)
assert not mek2.refinement_of(mek7, hierarchies)
assert not mek3.refinement_of(mek7, hierarchies)
assert not mek4.refinement_of(mek7, hierarchies)
assert not mek5.refinement_of(mek7, hierarchies)
assert not mek6.refinement_of(mek7, hierarchies)
assert mek8.refinement_of(mek7, hierarchies)
# mek8
assert not mek1.refinement_of(mek8, hierarchies)
assert not mek2.refinement_of(mek8, hierarchies)
assert not mek3.refinement_of(mek8, hierarchies)
assert not mek4.refinement_of(mek8, hierarchies)
assert not mek5.refinement_of(mek8, hierarchies)
assert not mek6.refinement_of(mek8, hierarchies)
assert not mek7.refinement_of(mek8, hierarchies)
def test_phosphorylation_modification_refinement():
braf = Agent('BRAF', db_refs = {'HGNC': 'braf'})
mek1 = Agent('MAP2K1', db_refs = {'HGNC': 'map2k1'})
p1 = Phosphorylation(braf, mek1)
p2 = Phosphorylation(braf, mek1, position='218')
p3 = Phosphorylation(braf, mek1, position='222')
p4 = Phosphorylation(braf, mek1, 'serine')
p5 = Phosphorylation(braf, mek1, 'serine', '218')
p6 = Phosphorylation(braf, mek1, 'serine', '222')
# p1
assert p2.refinement_of(p1, hierarchies)
assert p3.refinement_of(p1, hierarchies)
assert p4.refinement_of(p1, hierarchies)
assert p5.refinement_of(p1, hierarchies)
assert p6.refinement_of(p1, hierarchies)
# p2
assert not p1.refinement_of(p2, hierarchies)
assert not p3.refinement_of(p2, hierarchies)
assert not p4.refinement_of(p2, hierarchies)
assert p5.refinement_of(p2, hierarchies)
assert not p6.refinement_of(p2, hierarchies)
# p3
assert not p1.refinement_of(p3, hierarchies)
assert not p2.refinement_of(p3, hierarchies)
assert not p4.refinement_of(p3, hierarchies)
assert not p5.refinement_of(p3, hierarchies)
assert p6.refinement_of(p3, hierarchies)
# p4
assert not p1.refinement_of(p4, hierarchies)
assert not p2.refinement_of(p4, hierarchies)
assert not p3.refinement_of(p4, hierarchies)
assert p5.refinement_of(p4, hierarchies)
assert p6.refinement_of(p4, hierarchies)
# p5
assert not p1.refinement_of(p5, hierarchies)
assert not p2.refinement_of(p5, hierarchies)
assert not p3.refinement_of(p5, hierarchies)
assert not p4.refinement_of(p5, hierarchies)
assert not p6.refinement_of(p5, hierarchies)
# p6
assert not p1.refinement_of(p6, hierarchies)
assert not p2.refinement_of(p6, hierarchies)
assert not p3.refinement_of(p6, hierarchies)
assert not p4.refinement_of(p6, hierarchies)
assert not p5.refinement_of(p6, hierarchies)
def test_autophosphorylation_modification_refinement():
braf = Agent('BRAF', db_refs = {'HGNC': 'braf'})
p1 = Autophosphorylation(braf,)
p2 = Autophosphorylation(braf, position='218')
p3 = Autophosphorylation(braf, position='222')
p4 = Autophosphorylation(braf, 'serine')
p5 = Autophosphorylation(braf, 'serine', '218')
p6 = Autophosphorylation(braf, 'serine', '222')
# p1
assert p2.refinement_of(p1, hierarchies)
assert p3.refinement_of(p1, hierarchies)
assert p4.refinement_of(p1, hierarchies)
assert p5.refinement_of(p1, hierarchies)
assert p6.refinement_of(p1, hierarchies)
# p2
assert not p1.refinement_of(p2, hierarchies)
assert not p3.refinement_of(p2, hierarchies)
assert not p4.refinement_of(p2, hierarchies)
assert p5.refinement_of(p2, hierarchies)
assert not p6.refinement_of(p2, hierarchies)
# p3
assert not p1.refinement_of(p3, hierarchies)
assert not p2.refinement_of(p3, hierarchies)
assert not p4.refinement_of(p3, hierarchies)
assert not p5.refinement_of(p3, hierarchies)
assert p6.refinement_of(p3, hierarchies)
# p4
assert not p1.refinement_of(p4, hierarchies)
assert not p2.refinement_of(p4, hierarchies)
assert not p3.refinement_of(p4, hierarchies)
assert p5.refinement_of(p4, hierarchies)
assert p6.refinement_of(p4, hierarchies)
# p5
assert not p1.refinement_of(p5, hierarchies)
assert not p2.refinement_of(p5, hierarchies)
assert not p3.refinement_of(p5, hierarchies)
assert not p4.refinement_of(p5, hierarchies)
assert not p6.refinement_of(p5, hierarchies)
# p6
assert not p1.refinement_of(p6, hierarchies)
assert not p2.refinement_of(p6, hierarchies)
assert not p3.refinement_of(p6, hierarchies)
assert not p4.refinement_of(p6, hierarchies)
assert not p5.refinement_of(p6, hierarchies)
def test_activation_modification_refinement():
raf = Agent('RAF', db_refs={'BE': 'RAF'})
braf = Agent('BRAF', db_refs={'HGNC': '1097'})
mek = Agent('MEK', db_refs={'BE': 'MEK'})
mek1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
st1 = Activation(raf, mek, 'kinase')
st2 = Activation(braf, mek, 'kinase')
st3 = Activation(raf, mek1, 'kinase')
st4 = Activation(braf, mek1, 'kinase')
st5 = Inhibition(braf, mek1, 'kinase')
# st1
assert st2.refinement_of(st1, hierarchies)
assert st3.refinement_of(st1, hierarchies)
assert st4.refinement_of(st1, hierarchies)
assert not st5.refinement_of(st1, hierarchies)
# st2
assert not st1.refinement_of(st2, hierarchies)
assert not st3.refinement_of(st2, hierarchies)
assert st4.refinement_of(st2, hierarchies)
assert not st5.refinement_of(st2, hierarchies)
# st3
assert not st1.refinement_of(st3, hierarchies)
assert not st2.refinement_of(st3, hierarchies)
assert st4.refinement_of(st3, hierarchies)
assert not st5.refinement_of(st3, hierarchies)
# st4
assert not st1.refinement_of(st4, hierarchies)
assert not st2.refinement_of(st4, hierarchies)
assert not st3.refinement_of(st4, hierarchies)
assert not st5.refinement_of(st4, hierarchies)
# st5
assert not st1.refinement_of(st5, hierarchies)
assert not st2.refinement_of(st5, hierarchies)
assert not st3.refinement_of(st5, hierarchies)
assert not st4.refinement_of(st5, hierarchies)
def test_activation_activity_hierarchy_refinement():
raf_k = Agent('RAF', activity=ActivityCondition('kinase', True),
db_refs={'BE': 'RAF'})
raf_c = Agent('RAF', activity=ActivityCondition('catalytic', True),
db_refs={'BE': 'RAF'})
raf_a = Agent('RAF', activity=ActivityCondition('activity', True),
db_refs={'BE': 'RAF'})
mek = Agent('MEK', db_refs={'BE': 'MEK'})
st1 = Activation(raf_k, mek, 'kinase')
st2 = Inhibition(raf_k, mek, 'kinase')
st3 = Activation(raf_c, mek, 'kinase')
st4 = Activation(raf_k, mek, 'catalytic')
st5 = Activation(raf_c, mek, 'activity')
st6 = Activation(raf_a, mek, 'activity')
assert(not st1.refinement_of(st2, hierarchies))
assert(not st2.refinement_of(st1, hierarchies))
assert(st1.refinement_of(st3, hierarchies))
assert(st1.refinement_of(st4, hierarchies))
assert(st5.refinement_of(st6, hierarchies))
assert(st1.refinement_of(st6, hierarchies))
assert(not st3.refinement_of(st4, hierarchies))
assert(not st4.refinement_of(st3, hierarchies))
def test_activitymod_refinement():
mc1 = ModCondition('phosphorylation')
mc2 = ModCondition('phosphorylation', 'S')
mc3 = ModCondition('phosphorylation', 'S', '218')
mc4 = ModCondition('phosphorylation', 'S', '222')
mek_fam = Agent('MEK')
mek1 = Agent('MAP2K1')
p1 = ActiveForm(Agent('MEK', mods=[mc1], db_refs={'BE':'MEK'}),
'kinase', True)
p2 = ActiveForm(Agent('MEK', mods=[mc3], db_refs={'BE':'MEK'}),
'kinase', True)
p3 = ActiveForm(Agent('MAP2K1', mods=[mc1], db_refs={'HGNC':'6840'}),
'kinase', True)
p4 = ActiveForm(Agent('MAP2K1', mods=[mc2], db_refs={'HGNC':'6840'}),
'kinase', True)
p5 = ActiveForm(Agent('MAP2K1', mods=[mc3], db_refs={'HGNC':'6840'}),
'kinase', True)
p6 = ActiveForm(Agent('MAP2K1', mods=[mc4], db_refs={'HGNC':'6840'}),
'kinase', True)
p7 = ActiveForm(Agent('MAP2K1', mods=[mc3, mc4], db_refs={'HGNC':'6840'}),
'kinase', True)
# p1
assert p2.refinement_of(p1, hierarchies)
assert p3.refinement_of(p1, hierarchies)
assert p4.refinement_of(p1, hierarchies)
assert p5.refinement_of(p1, hierarchies)
assert p6.refinement_of(p1, hierarchies)
assert p7.refinement_of(p1, hierarchies)
# p2
assert not p1.refinement_of(p2, hierarchies)
assert not p3.refinement_of(p2, hierarchies)
assert not p4.refinement_of(p2, hierarchies)
assert p5.refinement_of(p2, hierarchies)
assert not p6.refinement_of(p2, hierarchies)
assert p7.refinement_of(p2, hierarchies)
# p3
assert not p1.refinement_of(p3, hierarchies)
assert not p2.refinement_of(p3, hierarchies)
assert p4.refinement_of(p3, hierarchies)
assert p5.refinement_of(p3, hierarchies)
assert p6.refinement_of(p3, hierarchies)
assert p7.refinement_of(p3, hierarchies)
# p4
assert not p1.refinement_of(p4, hierarchies)
assert not p2.refinement_of(p4, hierarchies)
assert not p3.refinement_of(p4, hierarchies)
assert p5.refinement_of(p4, hierarchies)
assert p6.refinement_of(p4, hierarchies)
assert p7.refinement_of(p4, hierarchies)
# p5
assert not p1.refinement_of(p5, hierarchies)
assert not p2.refinement_of(p5, hierarchies)
assert not p3.refinement_of(p5, hierarchies)
assert not p4.refinement_of(p5, hierarchies)
assert not p6.refinement_of(p5, hierarchies)
assert p7.refinement_of(p5, hierarchies)
# p6
assert not p1.refinement_of(p6, hierarchies)
assert not p2.refinement_of(p6, hierarchies)
assert not p3.refinement_of(p6, hierarchies)
assert not p4.refinement_of(p6, hierarchies)
assert not p5.refinement_of(p6, hierarchies)
assert p7.refinement_of(p6, hierarchies)
# p7
assert not p1.refinement_of(p7, hierarchies)
assert not p2.refinement_of(p7, hierarchies)
assert not p3.refinement_of(p7, hierarchies)
assert not p4.refinement_of(p7, hierarchies)
assert not p5.refinement_of(p7, hierarchies)
assert not p6.refinement_of(p7, hierarchies)
def test_activeform_activity_hierarchy_refinement():
p1 = ActiveForm(Agent('MEK'), 'kinase', True)
p2 = ActiveForm(Agent('MEK'), 'kinase', False)
p3 = ActiveForm(Agent('MEK'), 'catalytic', True)
p4 = ActiveForm(Agent('MEK'), 'activity', True)
assert(not p1.refinement_of(p2, hierarchies))
assert(p1.refinement_of(p3, hierarchies))
assert(p1.refinement_of(p4, hierarchies))
assert(p3.refinement_of(p4, hierarchies))
assert(not p4.refinement_of(p3, hierarchies))
def test_activatingsub_family_refinement():
mc = MutCondition('12', 'G', 'D')
ras = Agent('RAS', mutations=[mc], db_refs={'BE':'RAS'})
kras = Agent('KRAS', mutations=[mc], db_refs={'HGNC':'6407'})
nras = Agent('NRAS', mutations=[mc], db_refs={'HGNC':'7989'})
st1 = ActiveForm(ras, 'activity', True)
st2 = ActiveForm(kras, 'activity', True)
st3 = ActiveForm(nras, 'activity', True)
st4 = ActiveForm(kras, 'activity', False)
# st1
assert st2.refinement_of(st1, hierarchies)
assert st3.refinement_of(st1, hierarchies)
assert not st4.refinement_of(st1, hierarchies)
# st2
assert not st1.refinement_of(st2, hierarchies)
assert not st3.refinement_of(st2, hierarchies)
assert not st4.refinement_of(st2, hierarchies)
# st3
assert not st1.refinement_of(st3, hierarchies)
assert not st2.refinement_of(st3, hierarchies)
assert not st4.refinement_of(st3, hierarchies)
# st4
assert not st1.refinement_of(st4, hierarchies)
assert not st2.refinement_of(st4, hierarchies)
assert not st3.refinement_of(st4, hierarchies)
def test_rasgef_family_refinement():
sos = Agent('SOS', db_refs={'BE':'SOS'})
sos1 = Agent('SOS1', db_refs={'HGNC':'11187'})
sos1_a = Agent('SOS1', activity=ActivityCondition('activity', True),
db_refs={'HGNC': '11187'})
sos1_c = Agent('SOS1', activity=ActivityCondition('catalytic', True),
db_refs={'HGNC': '11187'})
ras = Agent('RAS', db_refs={'BE':'RAS'})
kras = Agent('KRAS', db_refs={'HGNC':'6407'})
# Statements
st1 = RasGef(sos, ras)
st2 = RasGef(sos1, ras)
st3 = RasGef(sos, kras)
st4 = RasGef(sos1, kras)
st5 = RasGef(sos1_a, kras)
st6 = RasGef(sos1_c, kras)
# st1
assert st2.refinement_of(st1, hierarchies)
assert st3.refinement_of(st1, hierarchies)
assert st4.refinement_of(st1, hierarchies)
assert st5.refinement_of(st1, hierarchies)
assert st6.refinement_of(st1, hierarchies)
# st2
assert not st1.refinement_of(st2, hierarchies)
assert not st3.refinement_of(st2, hierarchies)
assert st4.refinement_of(st2, hierarchies)
assert st5.refinement_of(st2, hierarchies)
assert st6.refinement_of(st2, hierarchies)
# st3
assert not st1.refinement_of(st3, hierarchies)
assert not st2.refinement_of(st3, hierarchies)
assert st4.refinement_of(st3, hierarchies)
assert st5.refinement_of(st3, hierarchies)
assert st6.refinement_of(st3, hierarchies)
# st4
assert not st1.refinement_of(st4, hierarchies)
assert not st2.refinement_of(st4, hierarchies)
assert not st3.refinement_of(st4, hierarchies)
assert st5.refinement_of(st4, hierarchies)
assert st6.refinement_of(st4, hierarchies)
# st5
assert not st1.refinement_of(st5, hierarchies)
assert not st2.refinement_of(st5, hierarchies)
assert not st3.refinement_of(st5, hierarchies)
assert not st4.refinement_of(st5, hierarchies)
assert st6.refinement_of(st5, hierarchies)
# st6
assert not st5.refinement_of(st6, hierarchies)
def test_rasgap_family_refinement():
rasa = Agent('RASA', db_refs={'BE':'RASA'})
rasa1 = Agent('RASA1', db_refs={'HGNC':'9871'})
ras = Agent('RAS', db_refs={'BE':'RAS'})
kras = Agent('KRAS', db_refs={'HGNC':'6407'})
rasa1_a = Agent('RASA1', activity=ActivityCondition('activity', True),
db_refs={'HGNC': '9871'})
rasa1_c = Agent('RASA1', activity=ActivityCondition('catalytic', True),
db_refs={'HGNC': '9871'})
# Statements
st1 = RasGap(rasa, ras)
st2 = RasGap(rasa1, ras)
st3 = RasGap(rasa, kras)
st4 = RasGap(rasa1, kras)
st5 = RasGap(rasa1_a, kras)
st6 = RasGap(rasa1_c, kras)
# st1
assert st2.refinement_of(st1, hierarchies)
assert st3.refinement_of(st1, hierarchies)
assert st4.refinement_of(st1, hierarchies)
assert st5.refinement_of(st1, hierarchies)
assert st6.refinement_of(st1, hierarchies)
# st2
assert not st1.refinement_of(st2, hierarchies)
assert not st3.refinement_of(st2, hierarchies)
assert st4.refinement_of(st2, hierarchies)
assert st5.refinement_of(st2, hierarchies)
assert st6.refinement_of(st2, hierarchies)
# st3
assert not st1.refinement_of(st3, hierarchies)
assert not st2.refinement_of(st3, hierarchies)
assert st4.refinement_of(st3, hierarchies)
assert st5.refinement_of(st3, hierarchies)
assert st6.refinement_of(st3, hierarchies)
# st4
assert not st1.refinement_of(st4, hierarchies)
assert not st2.refinement_of(st4, hierarchies)
assert not st3.refinement_of(st4, hierarchies)
assert st5.refinement_of(st4, hierarchies)
assert st6.refinement_of(st4, hierarchies)
# st5
assert not st1.refinement_of(st5, hierarchies)
assert not st2.refinement_of(st5, hierarchies)
assert not st3.refinement_of(st5, hierarchies)
assert not st4.refinement_of(st5, hierarchies)
assert st6.refinement_of(st5, hierarchies)
# st6
assert not st5.refinement_of(st6, hierarchies)
def test_complex_family_refinement():
raf = Agent('RAF', db_refs={'BE':'RAF'})
braf = Agent('BRAF', db_refs={'HGNC':'1097'})
raf1 = Agent('RAF1', db_refs={'HGNC':'9829'})
mek = Agent('MEK', db_refs={'BE':'MEK'})
mek1 = Agent('MAP2K1', db_refs={'HGNC':'6840'})
st1 = Complex([raf, mek])
st2 = Complex([braf, mek])
st3 = Complex([mek1, raf])
st4 = Complex([braf, mek1])
st5 = Complex([braf, raf1])
# st1
assert st2.refinement_of(st1, hierarchies)
assert st3.refinement_of(st1, hierarchies)
assert st4.refinement_of(st1, hierarchies)
assert not st5.refinement_of(st1, hierarchies)
# st2
assert not st1.refinement_of(st2, hierarchies)
assert not st3.refinement_of(st2, hierarchies)
assert st4.refinement_of(st2, hierarchies)
assert not st5.refinement_of(st2, hierarchies)
# st3
assert not st1.refinement_of(st3, hierarchies)
assert not st2.refinement_of(st3, hierarchies)
assert st4.refinement_of(st3, hierarchies)
assert not st5.refinement_of(st3, hierarchies)
# st4
assert not st1.refinement_of(st4, hierarchies)
assert not st2.refinement_of(st4, hierarchies)
assert not st3.refinement_of(st4, hierarchies)
assert not st5.refinement_of(st4, hierarchies)
# st5
assert not st1.refinement_of(st5, hierarchies)
assert not st2.refinement_of(st5, hierarchies)
assert not st3.refinement_of(st5, hierarchies)
assert not st4.refinement_of(st5, hierarchies)
def test_mismatched_complex_refinement():
ras = Agent('RAS')
raf = Agent('RAF')
mek = Agent('MEK')
st1 = Complex([ras, raf])
st2 = Complex([mek, ras, raf])
assert not st1.refinement_of(st2, hierarchies)
assert not st2.refinement_of(st1, hierarchies)
@raises(InvalidResidueError)
def test_residue_mod_condition():
mc = ModCondition('phosphorylation', 'xyz')
@raises(InvalidResidueError)
def test_residue_mod():
Phosphorylation(Agent('a'), Agent('b'), 'xyz')
@raises(InvalidResidueError)
def test_residue_selfmod():
Autophosphorylation(Agent('a'), 'xyz')
def test_valid_mod_residue():
mc = ModCondition('phosphorylation', 'serine')
assert(mc.residue == 'S')
assert unicode_strs(mc)
def test_valid_residue():
assert(get_valid_residue('serine') == 'S')
assert(get_valid_residue('ser') == 'S')
assert(get_valid_residue('Serine') == 'S')
assert(get_valid_residue('SERINE') == 'S')
def test_modcondition_order_actmod():
mc1 = ModCondition('phoshporylation', 'S', '222')
mc2 = ModCondition('phoshporylation', 'S', '224')
p1 = ActiveForm(Agent('MAP2K1', mods=[mc1, mc2]), 'kinase', True)
p2 = ActiveForm(Agent('MAP2K1', mods=[mc2, mc1]), 'kinase', True)
assert(p1.matches(p2))
assert unicode_strs((p1, p2))
def test_modcondition_order_agent():
mc1 = ModCondition('phoshporylation', 'S', '222')
mc2 = ModCondition('phoshporylation', 'S', '224')
p1 = Agent('MAP2K1', mods=[mc1, mc2])
p2 = Agent('MAP2K1', mods=[mc2, mc1])
assert(p1.matches(p2))
assert unicode_strs((p1, p2))
def test_eq_mut():
assert(MutCondition('600', 'V', 'E').equals(MutCondition('600', 'V', 'E')))
assert(not MutCondition('600', 'V', 'E').equals(
MutCondition('600', 'V', 'D')))
def test_eq_agent():
assert(Agent('one').equals(Agent('one')))
assert(not Agent('one').equals(Agent('two')))
assert(not Agent('one', db_refs={'UP': '123'}).equals(
Agent('one', db_refs={'UP': '999'})))
assert(Agent('one', mods=[ModCondition('phosphorylation')]).equals(
Agent('one', mods=[ModCondition('phosphorylation')])))
assert(not Agent('one', mods=[ModCondition('phosphorylation')]).equals(
Agent('one', mods=[ModCondition('ubiquitination')])))
assert(Agent('one', mutations=[MutCondition('600', 'V', 'E')]).equals(
Agent('one', mutations=[MutCondition('600', 'V', 'E')])))
assert(not Agent('one', mutations=[MutCondition('600', 'V', 'E')]).equals(
Agent('one', mutations=[MutCondition('600', 'V', 'D')])))
assert(Agent('one',
bound_conditions=[BoundCondition(Agent('two'), True)]).equals(
Agent('one',
bound_conditions=[BoundCondition(Agent('two'), True)])))
assert(not Agent('one',
bound_conditions=[BoundCondition(Agent('two'),
True)]).equals(
Agent('one', bound_conditions=[BoundCondition(Agent('two'),
False)])))
assert(not Agent('one', bound_conditions=[BoundCondition(Agent('two'),
True)]).equals(
Agent('one', bound_conditions=[BoundCondition(Agent('three'),
True)])))
def test_eq_stmt():
ev1 = Evidence(text='1')
ev2 = Evidence(text='2')
assert(Phosphorylation(Agent('a'), Agent('b'), evidence=[ev1]).equals(
Phosphorylation(Agent('a'), Agent('b'), evidence=[ev1])))
assert(not Phosphorylation(Agent('a'), Agent('b'), evidence=[ev1]).equals(
Phosphorylation(Agent('a'), Agent('b'), evidence=[ev2])))
assert(not Phosphorylation(Agent('a'), Agent('b'), evidence=[ev1]).equals(
Phosphorylation(Agent('a'), Agent('c'), evidence=[ev2])))
assert(not Phosphorylation(Agent('a'), Agent('b'), evidence=[ev1]).equals(
Phosphorylation(Agent('a'), Agent('b'), 'S', evidence=[ev2])))
assert(Complex([Agent('a'), Agent('b')], evidence=[ev1]).equals(
Complex([Agent('a'), Agent('b')], evidence=[ev1])))
assert(not Complex([Agent('a'), Agent('b')], evidence=[ev1]).equals(
Complex([Agent('a'), Agent('b')], evidence=[ev2])))
assert(Activation(Agent('a'), Agent('b'), evidence=[ev1]).equals(
Activation(Agent('a'), Agent('b'), evidence=[ev1])))
assert(not Activation(Agent('a'), Agent('b'), evidence=[ev1]).equals(
Activation(Agent('a'), Agent('c'), evidence=[ev1])))
assert(not Activation(Agent('a'), Agent('b'), evidence=[ev1]).equals(
Activation(Agent('a'), Agent('b'), 'kinase', evidence=[ev1])))
assert(not Activation(Agent('a'), Agent('b'), evidence=[ev1]).equals(
Activation(Agent('a'), Agent('b'), evidence=[ev2])))
def test_serialize():
ev1 = Evidence(text='1\U0001F4A9')
st = Phosphorylation(Agent('a\U0001F4A9'), Agent('b'), evidence=[ev1])
jstr = st.to_json()
st2 = Phosphorylation.from_json(jstr)
assert(st.equals(st2))
assert unicode_strs((ev1, st, st2))
def test_serialize_errors():
st = Phosphorylation(Agent('a\U0001F4A9'), Agent('b\U0001F4A9'))
jstr = st.to_json()
st2 = Complex.from_json(jstr)
assert(st2 is None)
st3 = Phosphorylation.from_json('{}')
assert(st3 is None)
st4 = Phosphorylation.from_json('xyz' + jstr)
assert(st4 is None)
assert unicode_strs((st, st2, st3, st4))
def test_location_refinement():
a1 = Agent('a', location='plasma membrane')
a2 = Agent('a', location='cell')
a3 = Agent('a', location='cytoplasm')
a4 = Agent('a')
a5 = Agent('a')
assert(a1.refinement_of(a2, hierarchies))
assert(not a2.refinement_of(a3, hierarchies))
assert(a4.refinement_of(a5, hierarchies))
assert(not a1.refinement_of(a3, hierarchies))
assert(not a3.refinement_of(a1, hierarchies))
assert(a2.refinement_of(a4, hierarchies))
assert(a3.refinement_of(a4, hierarchies))
def test_activity_refinement():
a1 = Agent('a', activity=ActivityCondition('kinase', True))
a2 = Agent('a', activity=ActivityCondition('activity', True))
a3 = Agent('a', activity=ActivityCondition('catalytic', True))
a4 = Agent('a')
a5 = Agent('a', activity=ActivityCondition('catalytic', False))
a6 = Agent('a', activity=ActivityCondition('kinase', False))
assert(a1.refinement_of(a2, hierarchies))
assert(not a2.refinement_of(a3, hierarchies))
assert(not a4.refinement_of(a1, hierarchies))
assert(a1.refinement_of(a3, hierarchies))
assert(a3.refinement_of(a2, hierarchies))
assert(not a3.refinement_of(a1, hierarchies))
assert(a1.refinement_of(a4, hierarchies))
assert(a2.refinement_of(a4, hierarchies))
assert(a5.refinement_of(a4, hierarchies))
assert(not a5.refinement_of(a3, hierarchies))
assert(not a5.refinement_of(a1, hierarchies))
assert(a6.refinement_of(a5, hierarchies))
assert(not a5.refinement_of(a6, hierarchies))
def test_translocation_refinement():
st1 = Translocation(Agent('a'), 'plasma membrane', 'cytoplasm')
st2 = Translocation(Agent('a'), 'plasma membrane', None)
st3 = Translocation(Agent('a'), None, 'cytoplasm')
st4 = Translocation(Agent('a'), 'cell', 'cytoplasm')
st5 = Translocation(Agent('a'), 'cell', 'cell')
st6 = Translocation(Agent('a'), 'plasma membrane', 'cell')
st7 = Translocation(Agent('a'), 'nucleus', 'cytoplasm')
st8 = Translocation(Agent('a'), None, 'cell')
st9 = Translocation(Agent('a'), None, None)
assert(st3.refinement_of(st8, hierarchies))
assert(st1.refinement_of(st2, hierarchies))
assert(st1.refinement_of(st3, hierarchies))
assert(not st2.refinement_of(st3, hierarchies))
assert(st1.refinement_of(st4, hierarchies))
assert(not st2.refinement_of(st4, hierarchies))
assert(st4.refinement_of(st5, hierarchies))
assert(st6.refinement_of(st5, hierarchies))
assert(not st1.refinement_of(st7, hierarchies))
assert(st7.refinement_of(st4, hierarchies))
assert(st8.refinement_of(st9, hierarchies))
assert(st7.refinement_of(st9, hierarchies))
def test_decrease_amt_refinement():
raf = Agent('RAF', db_refs={'BE':'RAF'})
braf = Agent('BRAF', db_refs={'HGNC':'1097'})
brafk = Agent('BRAF', activity=ActivityCondition('kinase', True),
db_refs={'HGNC':'1097'})
raf1 = Agent('RAF1', db_refs={'HGNC':'9829'})
mek = Agent('MEK', db_refs={'BE':'MEK'})
mek1 = Agent('MAP2K1', db_refs={'HGNC':'6840'})
st1 = DecreaseAmount(raf, mek)
st2 = DecreaseAmount(braf, mek)
st3 = DecreaseAmount(raf, mek1)
st4 = DecreaseAmount(brafk, mek1)
assert unicode_strs((st1, st2, st3, st4))
# st1
assert st2.refinement_of(st1, hierarchies)
assert st3.refinement_of(st1, hierarchies)
assert st4.refinement_of(st1, hierarchies)
# st2
assert not st1.refinement_of(st2, hierarchies)
assert not st3.refinement_of(st2, hierarchies)
assert st4.refinement_of(st2, hierarchies)
# st3
assert not st1.refinement_of(st3, hierarchies)
assert not st2.refinement_of(st3, hierarchies)
assert st4.refinement_of(st3, hierarchies)
# st4
assert not st1.refinement_of(st4, hierarchies)
assert not st2.refinement_of(st4, hierarchies)
assert not st3.refinement_of(st4, hierarchies)
def test_increase_amt_refinement():
raf = Agent('RAF', db_refs={'BE':'RAF'})
braf = Agent('BRAF', db_refs={'HGNC':'1097'})
brafk = Agent('BRAF', activity=ActivityCondition('kinase', True),
db_refs={'HGNC':'1097'})
raf1 = Agent('RAF1', db_refs={'HGNC':'9829'})
mek = Agent('MEK', db_refs={'BE':'MEK'})
mek1 = Agent('MAP2K1', db_refs={'HGNC':'6840'})
st1 = IncreaseAmount(raf, mek)
st2 = IncreaseAmount(braf, mek)
st3 = IncreaseAmount(raf, mek1)
st4 = IncreaseAmount(brafk, mek1)
assert unicode_strs((st1, st2, st3, st4))
# st1
assert st2.refinement_of(st1, hierarchies)
assert st3.refinement_of(st1, hierarchies)
assert st4.refinement_of(st1, hierarchies)
# st2
assert not st1.refinement_of(st2, hierarchies)
assert not st3.refinement_of(st2, hierarchies)
assert st4.refinement_of(st2, hierarchies)
# st3
assert not st1.refinement_of(st3, hierarchies)
assert not st2.refinement_of(st3, hierarchies)
assert st4.refinement_of(st3, hierarchies)
# st4
assert not st1.refinement_of(st4, hierarchies)
assert not st2.refinement_of(st4, hierarchies)
assert not st3.refinement_of(st4, hierarchies)
def test_complex_refinement_order():
st1 = Complex([Agent('MED23'), Agent('ELK1')])
st2 = Complex([Agent('ELK1', mods=[ModCondition('phosphorylation')]),
Agent('MED23')])
assert(st2.refinement_of(st1, hierarchies))
assert(not st1.refinement_of(st2, hierarchies))
def test_homodimer_bound_to():
KRAS = Agent('KRAS')
HRAS = Agent('HRAS')
NRAS = Agent('NRAS')
BRAFK = Agent('BRAF', bound_conditions=[BoundCondition(KRAS, True)])
BRAFH = Agent('BRAF', bound_conditions=[BoundCondition(HRAS, True)])
BRAFN = Agent('BRAF', bound_conditions=[BoundCondition(NRAS, True)])
st1 = Complex([BRAFK, BRAFN])
st2 = Complex([BRAFN, BRAFK])
st3 = Complex([BRAFK, BRAFH])
assert st1.matches(st2)
assert st2.matches(st1)
assert not st1.matches(st3)
assert not st3.matches(st2)
def test_mod_condition_is_mod():
mc1 = ModCondition('ubiquitination', 'K', '99', True)
mc2 = ModCondition('ubiquitination', 'K', '99', False)
assert not mc1.refinement_of(mc2, hierarchies)
def test_unicode_str_methods():
ag = Agent('MAPK1\U0001F4A9')
print(ag)
ev = Evidence(text='foo \U0001F4A9 bar')
print(ev)
print(repr(ev))
st = Phosphorylation(ag, ag, evidence=ev)
print(st)
print(repr(st))
st1 = Autophosphorylation(ag, evidence=ev)
print(st1)
print(repr(st1))
st = Activation(ag, ag, 'activity', evidence=ev)
print(st)
print(repr(st))
st = Inhibition(ag, ag, 'activity', evidence=ev)
print(st)
print(repr(st))
st = ActiveForm(ag, 'activity', True)
print(st)
print(repr(st))
st = HasActivity(ag, 'activity', True)
print(st)
print(repr(st))
st = RasGef(ag, ag, evidence=ev)
print(st)
print(repr(st))
st = RasGap(ag, ag, evidence=ev)
print(st)
print(repr(st))
st = Complex([ag, ag], evidence=ev)
print(st)
print(repr(st))
|
mixja/eap-sim-lab
|
refs/heads/master
|
lib/pyserial-2.7/build/lib/serial/serialutil.py
|
143
|
#! python
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# (C) 2001-2010 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
# compatibility for older Python < 2.6
try:
bytes
bytearray
except (NameError, AttributeError):
# Python older than 2.6 do not have these types. Like for Python 2.6 they
# should behave like str. For Python older than 3.0 we want to work with
# strings anyway, only later versions have a true bytes type.
bytes = str
# bytearray is a mutable type that is easily turned into an instance of
# bytes
class bytearray(list):
# for bytes(bytearray()) usage
def __str__(self): return ''.join(self)
def __repr__(self): return 'bytearray(%r)' % ''.join(self)
# append automatically converts integers to characters
def append(self, item):
if isinstance(item, str):
list.append(self, item)
else:
list.append(self, chr(item))
# +=
def __iadd__(self, other):
for byte in other:
self.append(byte)
return self
def __getslice__(self, i, j):
return bytearray(list.__getslice__(self, i, j))
def __getitem__(self, item):
if isinstance(item, slice):
return bytearray(list.__getitem__(self, item))
else:
return ord(list.__getitem__(self, item))
def __eq__(self, other):
if isinstance(other, basestring):
other = bytearray(other)
return list.__eq__(self, other)
# ``memoryview`` was introduced in Python 2.7 and ``bytes(some_memoryview)``
# isn't returning the contents (very unfortunate). Therefore we need special
# cases and test for it. Ensure that there is a ``memoryview`` object for older
# Python versions. This is easier than making every test dependent on its
# existence.
try:
memoryview
except (NameError, AttributeError):
# implementation does not matter as we do not realy use it.
# it just must not inherit from something else we might care for.
class memoryview:
pass
# all Python versions prior 3.x convert ``str([17])`` to '[17]' instead of '\x11'
# so a simple ``bytes(sequence)`` doesn't work for all versions
def to_bytes(seq):
"""convert a sequence to a bytes type"""
if isinstance(seq, bytes):
return seq
elif isinstance(seq, bytearray):
return bytes(seq)
elif isinstance(seq, memoryview):
return seq.tobytes()
else:
b = bytearray()
for item in seq:
b.append(item) # this one handles int and str for our emulation and ints for Python 3.x
return bytes(b)
# create control bytes
XON = to_bytes([17])
XOFF = to_bytes([19])
CR = to_bytes([13])
LF = to_bytes([10])
PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE = 'N', 'E', 'O', 'M', 'S'
STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO = (1, 1.5, 2)
FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5, 6, 7, 8)
PARITY_NAMES = {
PARITY_NONE: 'None',
PARITY_EVEN: 'Even',
PARITY_ODD: 'Odd',
PARITY_MARK: 'Mark',
PARITY_SPACE: 'Space',
}
class SerialException(IOError):
"""Base class for serial port related exceptions."""
class SerialTimeoutException(SerialException):
"""Write timeouts give an exception"""
writeTimeoutError = SerialTimeoutException('Write timeout')
portNotOpenError = SerialException('Attempting to use a port that is not open')
class FileLike(object):
"""An abstract file like class.
This class implements readline and readlines based on read and
writelines based on write.
This class is used to provide the above functions for to Serial
port objects.
Note that when the serial port was opened with _NO_ timeout that
readline blocks until it sees a newline (or the specified size is
reached) and that readlines would never return and therefore
refuses to work (it raises an exception in this case)!
"""
def __init__(self):
self.closed = True
def close(self):
self.closed = True
# so that ports are closed when objects are discarded
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
def writelines(self, sequence):
for line in sequence:
self.write(line)
def flush(self):
"""flush of file like objects"""
pass
# iterator for e.g. "for line in Serial(0): ..." usage
def next(self):
line = self.readline()
if not line: raise StopIteration
return line
def __iter__(self):
return self
def readline(self, size=None, eol=LF):
"""read a line which is terminated with end-of-line (eol) character
('\n' by default) or until timeout."""
leneol = len(eol)
line = bytearray()
while True:
c = self.read(1)
if c:
line += c
if line[-leneol:] == eol:
break
if size is not None and len(line) >= size:
break
else:
break
return bytes(line)
def readlines(self, sizehint=None, eol=LF):
"""read a list of lines, until timeout.
sizehint is ignored."""
if self.timeout is None:
raise ValueError("Serial port MUST have enabled timeout for this function!")
leneol = len(eol)
lines = []
while True:
line = self.readline(eol=eol)
if line:
lines.append(line)
if line[-leneol:] != eol: # was the line received with a timeout?
break
else:
break
return lines
def xreadlines(self, sizehint=None):
"""Read lines, implemented as generator. It will raise StopIteration on
timeout (empty read). sizehint is ignored."""
while True:
line = self.readline()
if not line: break
yield line
# other functions of file-likes - not used by pySerial
#~ readinto(b)
def seek(self, pos, whence=0):
raise IOError("file is not seekable")
def tell(self):
raise IOError("file is not seekable")
def truncate(self, n=None):
raise IOError("file is not seekable")
def isatty(self):
return False
class SerialBase(object):
"""Serial port base class. Provides __init__ function and properties to
get/set port settings."""
# default values, may be overridden in subclasses that do not support all values
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200, 230400, 460800, 500000,
576000, 921600, 1000000, 1152000, 1500000, 2000000, 2500000,
3000000, 3500000, 4000000)
BYTESIZES = (FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS)
PARITIES = (PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE)
STOPBITS = (STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO)
def __init__(self,
port = None, # number of device, numbering starts at
# zero. if everything fails, the user
# can specify a device string, note
# that this isn't portable anymore
# port will be opened if one is specified
baudrate=9600, # baud rate
bytesize=EIGHTBITS, # number of data bits
parity=PARITY_NONE, # enable parity checking
stopbits=STOPBITS_ONE, # number of stop bits
timeout=None, # set a timeout value, None to wait forever
xonxoff=False, # enable software flow control
rtscts=False, # enable RTS/CTS flow control
writeTimeout=None, # set a timeout for writes
dsrdtr=False, # None: use rtscts setting, dsrdtr override if True or False
interCharTimeout=None # Inter-character timeout, None to disable
):
"""Initialize comm port object. If a port is given, then the port will be
opened immediately. Otherwise a Serial port object in closed state
is returned."""
self._isOpen = False
self._port = None # correct value is assigned below through properties
self._baudrate = None # correct value is assigned below through properties
self._bytesize = None # correct value is assigned below through properties
self._parity = None # correct value is assigned below through properties
self._stopbits = None # correct value is assigned below through properties
self._timeout = None # correct value is assigned below through properties
self._writeTimeout = None # correct value is assigned below through properties
self._xonxoff = None # correct value is assigned below through properties
self._rtscts = None # correct value is assigned below through properties
self._dsrdtr = None # correct value is assigned below through properties
self._interCharTimeout = None # correct value is assigned below through properties
# assign values using get/set methods using the properties feature
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.timeout = timeout
self.writeTimeout = writeTimeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.dsrdtr = dsrdtr
self.interCharTimeout = interCharTimeout
if port is not None:
self.open()
def isOpen(self):
"""Check if the port is opened."""
return self._isOpen
# - - - - - - - - - - - - - - - - - - - - - - - -
# TODO: these are not really needed as the is the BAUDRATES etc. attribute...
# maybe i remove them before the final release...
def getSupportedBaudrates(self):
return [(str(b), b) for b in self.BAUDRATES]
def getSupportedByteSizes(self):
return [(str(b), b) for b in self.BYTESIZES]
def getSupportedStopbits(self):
return [(str(b), b) for b in self.STOPBITS]
def getSupportedParities(self):
return [(PARITY_NAMES[b], b) for b in self.PARITIES]
# - - - - - - - - - - - - - - - - - - - - - - - -
def setPort(self, port):
"""Change the port. The attribute portstr is set to a string that
contains the name of the port."""
was_open = self._isOpen
if was_open: self.close()
if port is not None:
if isinstance(port, basestring):
self.portstr = port
else:
self.portstr = self.makeDeviceName(port)
else:
self.portstr = None
self._port = port
self.name = self.portstr
if was_open: self.open()
def getPort(self):
"""Get the current port setting. The value that was passed on init or using
setPort() is passed back. See also the attribute portstr which contains
the name of the port as a string."""
return self._port
port = property(getPort, setPort, doc="Port setting")
def setBaudrate(self, baudrate):
"""Change baud rate. It raises a ValueError if the port is open and the
baud rate is not possible. If the port is closed, then the value is
accepted and the exception is raised when the port is opened."""
try:
b = int(baudrate)
except TypeError:
raise ValueError("Not a valid baudrate: %r" % (baudrate,))
else:
if b <= 0:
raise ValueError("Not a valid baudrate: %r" % (baudrate,))
self._baudrate = b
if self._isOpen: self._reconfigurePort()
def getBaudrate(self):
"""Get the current baud rate setting."""
return self._baudrate
baudrate = property(getBaudrate, setBaudrate, doc="Baud rate setting")
def setByteSize(self, bytesize):
"""Change byte size."""
if bytesize not in self.BYTESIZES: raise ValueError("Not a valid byte size: %r" % (bytesize,))
self._bytesize = bytesize
if self._isOpen: self._reconfigurePort()
def getByteSize(self):
"""Get the current byte size setting."""
return self._bytesize
bytesize = property(getByteSize, setByteSize, doc="Byte size setting")
def setParity(self, parity):
"""Change parity setting."""
if parity not in self.PARITIES: raise ValueError("Not a valid parity: %r" % (parity,))
self._parity = parity
if self._isOpen: self._reconfigurePort()
def getParity(self):
"""Get the current parity setting."""
return self._parity
parity = property(getParity, setParity, doc="Parity setting")
def setStopbits(self, stopbits):
"""Change stop bits size."""
if stopbits not in self.STOPBITS: raise ValueError("Not a valid stop bit size: %r" % (stopbits,))
self._stopbits = stopbits
if self._isOpen: self._reconfigurePort()
def getStopbits(self):
"""Get the current stop bits setting."""
return self._stopbits
stopbits = property(getStopbits, setStopbits, doc="Stop bits setting")
def setTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
try:
timeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % (timeout,))
if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
self._timeout = timeout
if self._isOpen: self._reconfigurePort()
def getTimeout(self):
"""Get the current timeout setting."""
return self._timeout
timeout = property(getTimeout, setTimeout, doc="Timeout setting for read()")
def setWriteTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
try:
timeout + 1 #test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % timeout)
self._writeTimeout = timeout
if self._isOpen: self._reconfigurePort()
def getWriteTimeout(self):
"""Get the current timeout setting."""
return self._writeTimeout
writeTimeout = property(getWriteTimeout, setWriteTimeout, doc="Timeout setting for write()")
def setXonXoff(self, xonxoff):
"""Change XON/XOFF setting."""
self._xonxoff = xonxoff
if self._isOpen: self._reconfigurePort()
def getXonXoff(self):
"""Get the current XON/XOFF setting."""
return self._xonxoff
xonxoff = property(getXonXoff, setXonXoff, doc="XON/XOFF setting")
def setRtsCts(self, rtscts):
"""Change RTS/CTS flow control setting."""
self._rtscts = rtscts
if self._isOpen: self._reconfigurePort()
def getRtsCts(self):
"""Get the current RTS/CTS flow control setting."""
return self._rtscts
rtscts = property(getRtsCts, setRtsCts, doc="RTS/CTS flow control setting")
def setDsrDtr(self, dsrdtr=None):
"""Change DsrDtr flow control setting."""
if dsrdtr is None:
# if not set, keep backwards compatibility and follow rtscts setting
self._dsrdtr = self._rtscts
else:
# if defined independently, follow its value
self._dsrdtr = dsrdtr
if self._isOpen: self._reconfigurePort()
def getDsrDtr(self):
"""Get the current DSR/DTR flow control setting."""
return self._dsrdtr
dsrdtr = property(getDsrDtr, setDsrDtr, "DSR/DTR flow control setting")
def setInterCharTimeout(self, interCharTimeout):
"""Change inter-character timeout setting."""
if interCharTimeout is not None:
if interCharTimeout < 0: raise ValueError("Not a valid timeout: %r" % interCharTimeout)
try:
interCharTimeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % interCharTimeout)
self._interCharTimeout = interCharTimeout
if self._isOpen: self._reconfigurePort()
def getInterCharTimeout(self):
"""Get the current inter-character timeout setting."""
return self._interCharTimeout
interCharTimeout = property(getInterCharTimeout, setInterCharTimeout, doc="Inter-character timeout setting for read()")
# - - - - - - - - - - - - - - - - - - - - - - - -
_SETTINGS = ('baudrate', 'bytesize', 'parity', 'stopbits', 'xonxoff',
'dsrdtr', 'rtscts', 'timeout', 'writeTimeout', 'interCharTimeout')
def getSettingsDict(self):
"""Get current port settings as a dictionary. For use with
applySettingsDict"""
return dict([(key, getattr(self, '_'+key)) for key in self._SETTINGS])
def applySettingsDict(self, d):
"""apply stored settings from a dictionary returned from
getSettingsDict. it's allowed to delete keys from the dictionary. these
values will simply left unchanged."""
for key in self._SETTINGS:
if d[key] != getattr(self, '_'+key): # check against internal "_" value
setattr(self, key, d[key]) # set non "_" value to use properties write function
# - - - - - - - - - - - - - - - - - - - - - - - -
def __repr__(self):
"""String representation of the current port settings and its state."""
return "%s<id=0x%x, open=%s>(port=%r, baudrate=%r, bytesize=%r, parity=%r, stopbits=%r, timeout=%r, xonxoff=%r, rtscts=%r, dsrdtr=%r)" % (
self.__class__.__name__,
id(self),
self._isOpen,
self.portstr,
self.baudrate,
self.bytesize,
self.parity,
self.stopbits,
self.timeout,
self.xonxoff,
self.rtscts,
self.dsrdtr,
)
# - - - - - - - - - - - - - - - - - - - - - - - -
# compatibility with io library
def readable(self): return True
def writable(self): return True
def seekable(self): return False
def readinto(self, b):
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError, err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
if __name__ == '__main__':
import sys
s = SerialBase()
sys.stdout.write('port name: %s\n' % s.portstr)
sys.stdout.write('baud rates: %s\n' % s.getSupportedBaudrates())
sys.stdout.write('byte sizes: %s\n' % s.getSupportedByteSizes())
sys.stdout.write('parities: %s\n' % s.getSupportedParities())
sys.stdout.write('stop bits: %s\n' % s.getSupportedStopbits())
sys.stdout.write('%s\n' % s)
|
hilaskis/UAV_MissionPlanner
|
refs/heads/master
|
Lib/email/utils.py
|
114
|
# Copyright (C) 2001-2010 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Miscellaneous utilities."""
__all__ = [
'collapse_rfc2231_value',
'decode_params',
'decode_rfc2231',
'encode_rfc2231',
'formataddr',
'formatdate',
'getaddresses',
'make_msgid',
'mktime_tz',
'parseaddr',
'parsedate',
'parsedate_tz',
'unquote',
]
import os
import re
import time
import base64
import random
import socket
import urllib
import warnings
from email._parseaddr import quote
from email._parseaddr import AddressList as _AddressList
from email._parseaddr import mktime_tz
# We need wormarounds for bugs in these methods in older Pythons (see below)
from email._parseaddr import parsedate as _parsedate
from email._parseaddr import parsedate_tz as _parsedate_tz
from quopri import decodestring as _qdecode
# Intrapackage imports
from email.encoders import _bencode, _qencode
COMMASPACE = ', '
EMPTYSTRING = ''
UEMPTYSTRING = u''
CRLF = '\r\n'
TICK = "'"
specialsre = re.compile(r'[][\\()<>@,:;".]')
escapesre = re.compile(r'[][\\()"]')
# Helpers
def _identity(s):
return s
def _bdecode(s):
"""Decodes a base64 string.
This function is equivalent to base64.decodestring and it's retained only
for backward compatibility. It used to remove the last \n of the decoded
string, if it had any (see issue 7143).
"""
if not s:
return s
return base64.decodestring(s)
def fix_eols(s):
"""Replace all line-ending characters with \r\n."""
# Fix newlines with no preceding carriage return
s = re.sub(r'(?<!\r)\n', CRLF, s)
# Fix carriage returns with no following newline
s = re.sub(r'\r(?!\n)', CRLF, s)
return s
def formataddr(pair):
"""The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From, To or Cc header.
If the first element of pair is false, then the second element is
returned unmodified.
"""
name, address = pair
if name:
quotes = ''
if specialsre.search(name):
quotes = '"'
name = escapesre.sub(r'\\\g<0>', name)
return '%s%s%s <%s>' % (quotes, name, quotes, address)
return address
def getaddresses(fieldvalues):
"""Return a list of (REALNAME, EMAIL) for each fieldvalue."""
all = COMMASPACE.join(fieldvalues)
a = _AddressList(all)
return a.addresslist
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<atom>.*?) # non-greedy up to the next ?= is the atom
\?= # literal ?=
''', re.VERBOSE | re.IGNORECASE)
def formatdate(timeval=None, localtime=False, usegmt=False):
"""Returns a date string as specified by RFC 2822, e.g.:
Fri, 09 Nov 2001 01:08:47 -0000
Optional timeval if given is a floating point time value as accepted by
gmtime() and localtime(), otherwise the current time is used.
Optional localtime is a flag that when True, interprets timeval, and
returns a date relative to the local timezone instead of UTC, properly
taking daylight savings time into account.
Optional argument usegmt means that the timezone is written out as
an ascii string, not numeric one (so "GMT" instead of "+0000"). This
is needed for HTTP, and is only used when localtime==False.
"""
# Note: we cannot use strftime() because that honors the locale and RFC
# 2822 requires that day and month names be the English abbreviations.
if timeval is None:
timeval = time.time()
if localtime:
now = time.localtime(timeval)
# Calculate timezone offset, based on whether the local zone has
# daylight savings time, and whether DST is in effect.
if time.daylight and now[-1]:
offset = time.altzone
else:
offset = time.timezone
hours, minutes = divmod(abs(offset), 3600)
# Remember offset is in seconds west of UTC, but the timezone is in
# minutes east of UTC, so the signs differ.
if offset > 0:
sign = '-'
else:
sign = '+'
zone = '%s%02d%02d' % (sign, hours, minutes // 60)
else:
now = time.gmtime(timeval)
# Timezone offset is always -0000
if usegmt:
zone = 'GMT'
else:
zone = '-0000'
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now[6]],
now[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now[1] - 1],
now[0], now[3], now[4], now[5],
zone)
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
pid = os.getpid()
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = socket.getfqdn()
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
# These functions are in the standalone mimelib version only because they've
# subsequently been fixed in the latest Python versions. We use this to worm
# around broken older Pythons.
def parsedate(data):
if not data:
return None
return _parsedate(data)
def parsedate_tz(data):
if not data:
return None
return _parsedate_tz(data)
def parseaddr(addr):
addrs = _AddressList(addr).addresslist
if not addrs:
return '', ''
return addrs[0]
# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str.startswith('"') and str.endswith('"'):
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if str.startswith('<') and str.endswith('>'):
return str[1:-1]
return str
# RFC2231-related functions - parameter encoding and decoding
def decode_rfc2231(s):
"""Decode string according to RFC 2231"""
parts = s.split(TICK, 2)
if len(parts) <= 2:
return None, None, s
return parts
def encode_rfc2231(s, charset=None, language=None):
"""Encode string according to RFC 2231.
If neither charset nor language is given, then s is returned as-is. If
charset is given but not language, the string is encoded using the empty
string for language.
"""
import urllib
s = urllib.quote(s, safe='')
if charset is None and language is None:
return s
if language is None:
language = ''
return "%s'%s'%s" % (charset, language, s)
rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$')
def decode_params(params):
"""Decode parameters list according to RFC 2231.
params is a sequence of 2-tuples containing (param name, string value).
"""
# Copy params so we don't mess with the original
params = params[:]
new_params = []
# Map parameter's name to a list of continuations. The values are a
# 3-tuple of the continuation number, the string value, and a flag
# specifying whether a particular segment is %-encoded.
rfc2231_params = {}
name, value = params.pop(0)
new_params.append((name, value))
while params:
name, value = params.pop(0)
if name.endswith('*'):
encoded = True
else:
encoded = False
value = unquote(value)
mo = rfc2231_continuation.match(name)
if mo:
name, num = mo.group('name', 'num')
if num is not None:
num = int(num)
rfc2231_params.setdefault(name, []).append((num, value, encoded))
else:
new_params.append((name, '"%s"' % quote(value)))
if rfc2231_params:
for name, continuations in rfc2231_params.items():
value = []
extended = False
# Sort by number
continuations.sort()
# And now append all values in numerical order, converting
# %-encodings for the encoded segments. If any of the
# continuation names ends in a *, then the entire string, after
# decoding segments and concatenating, must have the charset and
# language specifiers at the beginning of the string.
for num, s, encoded in continuations:
if encoded:
s = urllib.unquote(s)
extended = True
value.append(s)
value = quote(EMPTYSTRING.join(value))
if extended:
charset, language, value = decode_rfc2231(value)
new_params.append((name, (charset, language, '"%s"' % value)))
else:
new_params.append((name, '"%s"' % value))
return new_params
def collapse_rfc2231_value(value, errors='replace',
fallback_charset='us-ascii'):
if isinstance(value, tuple):
rawval = unquote(value[2])
charset = value[0] or 'us-ascii'
try:
return unicode(rawval, charset, errors)
except LookupError:
# XXX charset is unknown to Python.
return unicode(rawval, fallback_charset, errors)
else:
return unquote(value)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.