code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2016, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_routedomain
short_description: Manage route domains on a BIG-IP
description:
- Manage route domains on a BIG-IP.
version_added: 2.2
options:
name:
description:
- The name of the route domain.
version_added: 2.5
bwc_policy:
description:
- The bandwidth controller for the route domain.
connection_limit:
description:
- The maximum number of concurrent connections allowed for the
route domain. Setting this to C(0) turns off connection limits.
description:
description:
- Specifies descriptive text that identifies the route domain.
flow_eviction_policy:
description:
- The eviction policy to use with this route domain. Apply an eviction
policy to provide customized responses to flow overflows and slow
flows on the route domain.
id:
description:
- The unique identifying integer representing the route domain.
- This field is required when creating a new route domain.
- In version 2.5, this value is no longer used to reference a route domain when
making modifications to it (for instance during update and delete operations).
Instead, the C(name) parameter is used. In version 2.6, the C(name) value will
become a required parameter.
parent:
description:
- Specifies the route domain the system searches when it cannot
find a route in the configured domain.
partition:
description:
- Partition to create the route domain on. Partitions cannot be updated
once they are created.
default: Common
version_added: 2.5
routing_protocol:
description:
- Dynamic routing protocols for the system to use in the route domain.
choices:
- none
- BFD
- BGP
- IS-IS
- OSPFv2
- OSPFv3
- PIM
- RIP
- RIPng
service_policy:
description:
- Service policy to associate with the route domain.
state:
description:
- Whether the route domain should exist or not.
default: present
choices:
- present
- absent
strict:
description:
- Specifies whether the system enforces cross-routing restrictions or not.
type: bool
vlans:
description:
- VLANs for the system to use in the route domain.
fw_enforced_policy:
description:
- Specifies AFM policy to be attached to route domain.
version_added: 2.8
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a route domain
bigip_routedomain:
name: foo
id: 1234
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Set VLANs on the route domain
bigip_routedomain:
name: bar
state: present
vlans:
- net1
- foo
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
id:
description: The ID of the route domain that was changed.
returned: changed
type: int
sample: 2
description:
description: The description of the route domain.
returned: changed
type: string
sample: route domain foo
strict:
description: The new strict isolation setting.
returned: changed
type: string
sample: enabled
parent:
description: The new parent route domain.
returned: changed
type: int
sample: 0
vlans:
description: List of new VLANs the route domain is applied to.
returned: changed
type: list
sample: ['/Common/http-tunnel', '/Common/socks-tunnel']
routing_protocol:
description: List of routing protocols applied to the route domain.
returned: changed
type: list
sample: ['bfd', 'bgp']
bwc_policy:
description: The new bandwidth controller.
returned: changed
type: string
sample: /Common/foo
connection_limit:
description: The new connection limit for the route domain.
returned: changed
type: int
sample: 100
flow_eviction_policy:
description: The new eviction policy to use with this route domain.
returned: changed
type: string
sample: /Common/default-eviction-policy
service_policy:
description: The new service policy to use with this route domain.
returned: changed
type: string
sample: /Common-my-service-policy
fw_enforced_policy:
description: Specfies AFM policy to be attached to route domain.
returned: changed
type: string
sample: /Common/afm-blocking-policy
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.compare import cmp_simple_list
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.compare import cmp_simple_list
class Parameters(AnsibleF5Parameters):
api_map = {
'connectionLimit': 'connection_limit',
'servicePolicy': 'service_policy',
'bwcPolicy': 'bwc_policy',
'flowEvictionPolicy': 'flow_eviction_policy',
'routingProtocol': 'routing_protocol',
'fwEnforcedPolicy': 'fw_enforced_policy',
'fwEnforcedPolicyReference': 'fw_policy_link',
}
api_attributes = [
'connectionLimit',
'description',
'strict',
'parent',
'servicePolicy',
'bwcPolicy',
'flowEvictionPolicy',
'routingProtocol',
'vlans',
'id',
'fwEnforcedPolicy',
'fwEnforcedPolicyReference',
]
returnables = [
'description',
'strict',
'parent',
'service_policy',
'bwc_policy',
'flow_eviction_policy',
'routing_protocol',
'vlans',
'connection_limit',
'id',
]
updatables = [
'description',
'strict',
'parent',
'service_policy',
'bwc_policy',
'flow_eviction_policy',
'routing_protocol',
'vlans',
'connection_limit',
'id',
'fw_enforced_policy',
'fw_policy_link',
]
@property
def connection_limit(self):
if self._values['connection_limit'] is None:
return None
return int(self._values['connection_limit'])
@property
def id(self):
if self._values['id'] is None:
return None
return int(self._values['id'])
class ApiParameters(Parameters):
@property
def strict(self):
if self._values['strict'] is None:
return None
if self._values['strict'] == 'enabled':
return True
return False
@property
def domains(self):
domains = self.read_domains_from_device()
result = [x['fullPath'] for x in domains['items']]
return result
def read_domains_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/net/route-domain/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
class ModuleParameters(Parameters):
@property
def bwc_policy(self):
if self._values['bwc_policy'] is None:
return None
return fq_name(self.partition, self._values['bwc_policy'])
@property
def flow_eviction_policy(self):
if self._values['flow_eviction_policy'] is None:
return None
return fq_name(self.partition, self._values['flow_eviction_policy'])
@property
def service_policy(self):
if self._values['service_policy'] is None:
return None
return fq_name(self.partition, self._values['service_policy'])
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def vlans(self):
if self._values['vlans'] is None:
return None
if len(self._values['vlans']) == 1 and self._values['vlans'][0] == '':
return ''
return [fq_name(self.partition, x) for x in self._values['vlans']]
@property
def name(self):
if self._values['name'] is None:
return str(self.id)
return self._values['name']
@property
def routing_protocol(self):
if self._values['routing_protocol'] is None:
return None
if len(self._values['routing_protocol']) == 1 and self._values['routing_protocol'][0] in ['', 'none']:
return ''
return self._values['routing_protocol']
@property
def fw_enforced_policy(self):
if self._values['fw_enforced_policy'] is None:
return None
if self._values['fw_enforced_policy'] in ['none', '']:
return None
name = self._values['fw_enforced_policy']
return fq_name(self.partition, name)
@property
def fw_policy_link(self):
policy = self.fw_enforced_policy
if policy is None:
return None
tmp = policy.split('/')
link = dict(link='https://localhost/mgmt/tm/security/firewall/policy/~{0}~{1}'.format(tmp[1], tmp[2]))
return link
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def strict(self):
if self._values['strict'] is None:
return None
if self._values['strict']:
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def strict(self):
if self._values['strict'] is None:
return None
if self._values['strict'] == 'enabled':
return 'yes'
return 'no'
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def routing_protocol(self):
return cmp_simple_list(self.want.routing_protocol, self.have.routing_protocol)
@property
def vlans(self):
return cmp_simple_list(self.want.vlans, self.have.vlans)
@property
def fw_policy_link(self):
if self.want.fw_enforced_policy is None:
return None
if self.want.fw_enforced_policy == self.have.fw_enforced_policy:
return None
if self.want.fw_policy_link != self.have.fw_policy_link:
return self.want.fw_policy_link
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params, client=self.client)
self.have = ApiParameters(client=self.client)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.want.parent and self.want.parent not in self.have.domains:
raise F5ModuleError(
"The parent route domain was not found."
)
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
if self.want.id is None:
raise F5ModuleError(
"The 'id' parameter is required when creating new route domains."
)
if self.want.parent and self.want.parent not in self.have.domains:
raise F5ModuleError(
"The parent route domain was not found."
)
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/net/route-domain/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/net/route-domain/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.want.fw_enforced_policy:
payload = dict(
fwEnforcedPolicy=self.want.fw_enforced_policy,
fwEnforcedPolicyReference=self.want.fw_policy_link
)
uri = "https://{0}:{1}/mgmt/tm/net/route-domain/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.patch(uri, json=payload)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/net/route-domain/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/net/route-domain/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/net/route-domain/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response, client=self.client)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(),
id=dict(type='int'),
description=dict(),
strict=dict(type='bool'),
parent=dict(),
vlans=dict(type='list'),
routing_protocol=dict(
type='list',
choices=['BFD', 'BGP', 'IS-IS', 'OSPFv2', 'OSPFv3', 'PIM', 'RIP', 'RIPng', 'none']
),
bwc_policy=dict(),
connection_limit=dict(type='int'),
flow_eviction_policy=dict(),
service_policy=dict(),
fw_enforced_policy=dict(),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_one_of = [
['name', 'id']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| veger/ansible | lib/ansible/modules/network/f5/bigip_routedomain.py | Python | gpl-3.0 | 22,327 |
from __future__ import unicode_literals
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
from django.contrib.auth import get_user_model
from . import models
User = get_user_model()
class UserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Field('name'),
)
class Meta:
model = User
fields = ['name']
class ProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Field('picture'),
Field('bio'),
Field('age'),
Field('gender'),
Field('ethenic'),
Field('interest'),
Submit('update', 'Update', css_class="btn-success"),
)
class Meta:
model = models.Profile
fields = ['picture', 'bio','age','gender','ethenic','interest']
| focode/buddy2 | src/profiles/forms.py | Python | mit | 1,321 |
# Copyright 2013 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.common import exceptions as n_exc
from neutron import manager
from neutron.plugins.common import constants
from neutron.services import provider_configuration as provconf
from neutron.tests import base
class ParseServiceProviderConfigurationTestCase(base.BaseTestCase):
def test_default_service_provider_configuration(self):
providers = cfg.CONF.service_providers.service_provider
self.assertEqual(providers, [])
def test_parse_single_service_provider_opt(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path'],
'service_providers')
expected = {'service_type': constants.LOADBALANCER,
'name': 'lbaas',
'driver': 'driver_path',
'default': False}
res = provconf.parse_service_provider_opt()
self.assertEqual(len(res), 1)
self.assertEqual(res, [expected])
def test_parse_single_default_service_provider_opt(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path:default'],
'service_providers')
expected = {'service_type': constants.LOADBALANCER,
'name': 'lbaas',
'driver': 'driver_path',
'default': True}
res = provconf.parse_service_provider_opt()
self.assertEqual(len(res), 1)
self.assertEqual(res, [expected])
def test_parse_multi_service_provider_opt(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
constants.LOADBALANCER + ':name1:path1',
constants.LOADBALANCER +
':name2:path2:default'],
'service_providers')
res = provconf.parse_service_provider_opt()
# This parsing crosses repos if additional projects are installed,
# so check that at least what we expect is there; there may be more.
self.assertTrue(len(res) >= 3)
def test_parse_service_provider_opt_not_allowed_raises(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
'svc_type:name1:path1'],
'service_providers')
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
def test_parse_service_provider_invalid_format(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
'svc_type:name1:path1:def'],
'service_providers')
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':',
'svc_type:name1:path1:def'],
'service_providers')
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
def test_parse_service_provider_name_too_long(self):
name = 'a' * 256
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':' + name + ':driver_path',
'svc_type:name1:path1:def'],
'service_providers')
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
class ProviderConfigurationTestCase(base.BaseTestCase):
def setUp(self):
super(ProviderConfigurationTestCase, self).setUp()
def test_ensure_driver_unique(self):
pconf = provconf.ProviderConfiguration([])
pconf.providers[('svctype', 'name')] = {'driver': 'driver',
'default': True}
self.assertRaises(n_exc.Invalid,
pconf._ensure_driver_unique, 'driver')
self.assertIsNone(pconf._ensure_driver_unique('another_driver1'))
def test_ensure_default_unique(self):
pconf = provconf.ProviderConfiguration([])
pconf.providers[('svctype', 'name')] = {'driver': 'driver',
'default': True}
self.assertRaises(n_exc.Invalid,
pconf._ensure_default_unique,
'svctype', True)
self.assertIsNone(pconf._ensure_default_unique('svctype', False))
self.assertIsNone(pconf._ensure_default_unique('svctype1', True))
self.assertIsNone(pconf._ensure_default_unique('svctype1', False))
def test_add_provider(self):
pconf = provconf.ProviderConfiguration([])
prov = {'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False}
pconf.add_provider(prov)
self.assertEqual(len(pconf.providers), 1)
self.assertEqual(list(pconf.providers.keys()),
[(constants.LOADBALANCER, 'name')])
self.assertEqual(list(pconf.providers.values()),
[{'driver': 'path', 'default': False}])
def test_add_duplicate_provider(self):
pconf = provconf.ProviderConfiguration([])
prov = {'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False}
pconf.add_provider(prov)
self.assertRaises(n_exc.Invalid, pconf.add_provider, prov)
self.assertEqual(len(pconf.providers), 1)
def test_get_service_providers(self):
provs = [{'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False},
{'service_type': constants.LOADBALANCER,
'name': 'name2',
'driver': 'path2',
'default': False},
{'service_type': 'st2',
'name': 'name',
'driver': 'driver',
'default': True
},
{'service_type': 'st3',
'name': 'name2',
'driver': 'driver2',
'default': True}]
pconf = provconf.ProviderConfiguration(provs)
for prov in provs:
p = pconf.get_service_providers(
filters={'name': [prov['name']],
'service_type': prov['service_type']}
)
self.assertEqual(p, [prov])
def test_get_service_providers_with_fields(self):
provs = [{'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False},
{'service_type': constants.LOADBALANCER,
'name': 'name2',
'driver': 'path2',
'default': False}]
pconf = provconf.ProviderConfiguration(provs)
for prov in provs:
p = pconf.get_service_providers(
filters={'name': [prov['name']],
'service_type': prov['service_type']},
fields=['name']
)
self.assertEqual(p, [{'name': prov['name']}])
class GetProviderDriverClassTestCase(base.BaseTestCase):
def test_get_provider_driver_class_hit(self):
driver = 'ml2'
expected = 'neutron.plugins.ml2.plugin.Ml2Plugin'
actual = provconf.get_provider_driver_class(
driver,
namespace=manager.CORE_PLUGINS_NAMESPACE)
self.assertEqual(expected, actual)
def test_get_provider_driver_class_miss(self):
retval = provconf.get_provider_driver_class('foo')
self.assertEqual('foo', retval)
| paninetworks/neutron | neutron/tests/unit/services/test_provider_configuration.py | Python | apache-2.0 | 8,876 |
#!/usr/bin/python
# -* encoding: utf-8 *-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
import sys
from distutils.core import setup
from setuptools import find_packages
_package_root = "src/py"
_root_package = 'gopythongo'
_HERE = os.path.abspath(os.path.dirname(__file__))
with open("src/py/gopythongo/__init__.py", "rt", encoding="utf-8") as vf:
lines = vf.readlines()
_version = "0.0.0+local"
for l in lines:
m = re.match("version = \"(.*?)\"", l)
if m:
_version = m.group(1)
_packages = find_packages(_package_root, exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
_requirements = [
'Jinja2==3.0.1',
'ConfigArgParse==1.5.2',
'Sphinx==4.1.2',
'sphinx-rtd-theme==0.5.2',
'colorama==0.4.4',
'semantic-version==2.8.5',
'packaging==21.0',
'typing-extensions==3.10.0.0',
'hvac==0.11.0',
'docker-py==1.10.6',
'dockerpty==0.4.1',
'pyopenssl==20.0.1',
'bumpversion==0.6.0',
'aptly-api-client==0.2.3',
]
if sys.version_info.major < 3 or (sys.version_info.major == 3 and sys.version_info.minor < 3):
_requirements.append('backports.shutil_get_terminal_size==1.0.0')
try:
long_description = open(os.path.join(_HERE, 'README.md')).read()
except IOError:
long_description = None
setup(
name='gopythongo',
version=_version,
packages=_packages,
package_dir={
'': _package_root,
},
entry_points={
"console_scripts": [
"gopythongo = gopythongo.main:main",
"vaultwrapper = gopythongo.vaultwrapper:main",
"vaultgetcert = gopythongo.vaultgetcert:main",
]
},
install_requires=_requirements,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Environment :: Console",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
"Operating System :: POSIX",
],
author="Jonas Maurus (@jdelic)",
author_email="jonas@gopythongo.com",
maintainer="GoPythonGo.com",
maintainer_email="info@gopythongo.com",
description="Build shippable virtualenvs",
long_description=long_description,
)
| gopythongo/gopythongo | setup.py | Python | mpl-2.0 | 2,451 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from collections import OrderedDict
import datetime
from app import app
db = SQLAlchemy(app)
class CommonFuncs(object):
def _asdict(self):
result = OrderedDict()
for key in self.__mapper__.c.keys():
attr = getattr(self,key)
#check for non-serializable types
if type(attr) is datetime.date:
result[key] = attr.isoformat()
else:
result[key] = attr
return result
class Users(db.Model,CommonFuncs):
__tablename__='users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
firstName = db.Column(db.String(20))
lastName = db.Column(db.String(20))
passwordHash = db.Column(db.String(128))
toDoLists = db.relationship('ToDoLists')
def __init__(self, username,password,email):
self.username = username
self.email = email
self.passwordHash = password
def __repr__(self):
return '<User %r>' % self.username
class ToDoLists(db.Model,CommonFuncs):
__tablename__ = 'toDoLists'
id = db.Column(db.Integer, primary_key=True)
userId = db.Column(db.Integer,db.ForeignKey('users.id'))
name = db.Column(db.String(64))
private = db.Column(db.Boolean)
date = db.Column(db.Date)
items = db.relationship('ToDoItems', cascade="save-update, merge, delete")
class ToDoItems(db.Model,CommonFuncs):
__tablename__ = 'toDoItems'
id = db.Column(db.Integer, primary_key=True)
listId = db.Column(db.Integer,db.ForeignKey('toDoLists.id'))
task = db.Column(db.String(128))
completed = db.Column(db.Boolean)
| agilman/flask-template | app/models.py | Python | mit | 1,788 |
import sys
import networkx as nx
import cPickle as pickle
import re
class Structure(object):
def __init__(self, structure, root):
self.structure = structure
self.root = root
def getStructure(self):
nodes = []
for n in nx.dfs_preorder_nodes(self.structure, self.root):
p = self.structure.in_edges(n)
if len(p) == 0:
p = ""
else:
p = p[0][0]
nodes.append((n, self.structure.node[n]['name'], p))
return nodes
def getDescendents(self, root):
nodes = []
edges = []
names = []
for n in nx.dfs_preorder_nodes(self.structure, root):
p = self.structure.in_edges(n)
if len(p) == 0:
p = ""
else:
p = p[0][0]
nodes.append(str(n))
names.append(self.structure.node[n]['name'])
if n == root:
continue
edges.append((p,str(n)))
return nodes, names, edges
| yhalpern/anchorExplorer | Structures.py | Python | bsd-2-clause | 1,065 |
#from django.contrib import admin
from django.contrib.gis import admin
from modeltranslation.admin import TranslationAdmin, TranslationTabularInline
from django.contrib.contenttypes.generic import GenericTabularInline
from cigno.mdtools.models import Connection
from django.utils.translation import ugettext_lazy as _
from geonode.core.models import UserObjectRoleMapping
from django.http import HttpResponseRedirect
from models import *
# riferimento per resize-fields-in-django-admin
# http://stackoverflow.com/questions/910169/resize-fields-in-django-admin
translation_js = (
'/static/modeltranslation/js/force_jquery.js',
'http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js',
'/static/modeltranslation/js/tabbed_translation_fields.js',
)
translation_css = {
'screen': ('/static/modeltranslation/css/tabbed_translation_fields.css',),
}
class ConnectionInline(GenericTabularInline):
model = Connection
ct_field = 'o_content_type'
ct_fk_field = 'o_object_id'
class InverseConnectionInline(GenericTabularInline):
model = Connection
ct_field = 'd_content_type'
ct_fk_field = 'd_object_id'
class OnlineResourceInline(admin.TabularInline):
model = OnlineResource
classes = ('collapse closed',)
class TemporalExtentInline(admin.TabularInline):
model = TemporalExtent
classes = ('collapse closed',)
extra = 1
class ResourceTemporalExtentInline(admin.TabularInline):
model = ResourceTemporalExtent
classes = ('collapse closed',)
class ReferenceDateInline(admin.TabularInline):
model = ReferenceDate
classes = ('collapse closed',)
extra = 1
class ResourceReferenceDateInline(admin.TabularInline):
model = ResourceReferenceDate
classes = ('collapse closed',)
extra = 1
class ConformityInline(admin.TabularInline):
model = Conformity
classes = ('collapse closed',)
extra = 1
class ResourceConformityInline(admin.TabularInline):
model = ResourceConformity
classes = ('collapse closed',)
extra = 1
class ResponsiblePartyRoleInline(admin.TabularInline):
model = ResponsiblePartyRole
classes = ('collapse closed',)
extra = 1
class ResourceResponsiblePartyRoleInline(admin.TabularInline):
model = ResourceResponsiblePartyRole
classes = ('collapse closed',)
extra = 1
class MdResponsiblePartyRoleInline(admin.TabularInline):
model = MdResponsiblePartyRole
#exclude = ('role',)
readonly_fields = ('role',)
classes = ('collapse closed',)
extra = 1
class ResourceMdResponsiblePartyRoleInline(admin.TabularInline):
model = ResourceMdResponsiblePartyRole
#exclude = ('role',)
readonly_fields = ('role',)
classes = ('collapse closed',)
extra = 1
class BaseCodeAdmin(TranslationAdmin):
list_editable = ['label',]
list_display = ['id', 'label']
class Media:
js = translation_js
css = translation_css
class BaseCodeIsoAdmin(TranslationAdmin):
list_editable = ['label','isoid']
list_display = ['id', 'label', 'isoid']
class Media:
js = translation_js
css = translation_css
class CodeRefSysAdmin(TranslationAdmin):
list_editable = ['label', 'srid']
list_display = ['id', 'label', 'srid']
class Media:
js = translation_js
css = translation_css
class CodeLicenseAdmin(TranslationAdmin):
list_editable = ['label', 'abstract']
list_display = ['id', 'label', 'abstract']
class Media:
js = translation_js
css = translation_css
class CodeDistributionFormatAdmin(TranslationAdmin):
list_editable = ['format','label', 'version', 'mimetype', 'ordering']
list_display = ['id', 'format', 'label', 'version', 'mimetype', 'ordering']
class Media:
js = translation_js
css = translation_css
class ResponsiblePartyAdmin(TranslationAdmin):
# list_editable = ['label', 'version', 'ordering']
# list_display = ['id', 'label', 'version', 'ordering']
class Media:
js = translation_js
css = translation_css
class LayerExtAdmin(TranslationAdmin):
# row-level permissions
# http://www.ibm.com/developerworks/opensource/library/os-django-admin/index.html
def queryset(self, request):
qs = super(LayerExtAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(id__in = UserObjectRoleMapping.objects.filter(user=request.user,
role__codename__in =('layer_readwrite','layer_admin')
).values_list('object_id',flat=True)
)
list_display = ('titleml',)
inlines = [ # OnlineResourceInline,
TemporalExtentInline,
ReferenceDateInline,
ConformityInline,
ResponsiblePartyRoleInline,
MdResponsiblePartyRoleInline,
# ConnectionInline,
# InverseConnectionInline,
]
#raw_id_fields = ("parent_identifier",)
filter_horizontal = ['presentation_form','spatial_representation_type_ext','topic_category_ext','responsible_party_role','distribution_format','md_responsible_party_role']
# filter_horizontal
#readonly_fields = ['uuid', 'geographic_bounding_box']
# readonly_fields = ['uuid', 'md_uuid', 'geographic_bounding_box', 'md_standard_name', 'md_version_name', 'md_character_set']
search_fields = ['titleml', 'abstractml']
search_fields_verbose = ['Titolo', 'Descrizione'] #GRAPPELLI
list_filter = ('resource_type', 'spatial_representation_type_ext', 'topic_category', 'distribution_format')
list_display = ('id', 'titleml', 'inspire', 'completeness_bar')
fieldsets = (
(_('Metadata'), {
'classes': ('collapse closed',),
'fields': (
'md_uuid',
#'lingua_metadata',
'md_date_stamp',
('md_character_set', 'md_standard_name', 'md_version_name')
)
}),
(_('Identification'), {
'classes': ('collapse closed',),
'fields': (
'titleml', 'abstractml', # 'source_document', # override by resources connections
#'resource_type', 'parent_identifier', 'other_citation_details',
'other_citation_details',
'presentation_form',
'distribution_format'
)
}),
(_('Identification2'), {
'classes': ('collapse closed',),
'fields': (
('resource_type', 'uuid'),
('language', 'character_set'),
'supplemental_information_ml',
'update_frequency',
'spatial_representation_type_ext'
)
}),
(_('Responsible Party'), {
'classes': ('collapse closed',),
'fields': []
}),
(_('Classification e Keywords'), {
'classes': ('collapse closed',),
'fields': (
'inspire', 'topic_category_ext', 'gemetkeywords'
)
}),
(_('Geographic extent'), {
'classes': ('collapse',),
'fields': (
('ref_sys', 'geographic_bounding_box'),
#'geo',
('vertical_datum', 'vertical_extent_min', 'vertical_extent_max', 'uom_vertical_extent')
)
}),
(_('Temporal extent'), {
'classes': ('collapse',),
'fields': []
}),
(_('DataQuality'), {
'classes': ('collapse closed',),
'fields': (
'lineage', ('equivalent_scale', 'distance', 'uom_distance')
)
}),
(_('Conformity'), {
'classes': ('collapse closed',),
'fields': []
}),
# ('Distribution', {
# 'classes': ('collapse closed',),
# 'fields': (
# )
# }),
(_('Constraints'), {
'classes': ('collapse closed',),
'fields': (
'license',
'use_limitation',
('access_constraints', 'use_constraints'),
'other_constraints',
'security_constraints',
)
}),
# ('Relations', {
# 'classes': ('collapse closed',),
# 'fields': []
# }),
#('Sezione sistema - non compilabile', {
# 'classes': ('collapse closed',),
# 'fields': (
# 'geonode_tipo_layer',
# )
# }),
)
class Media:
js = translation_js
css = translation_css
def response_change(self, request, obj):
res = super(LayerExtAdmin, self).response_change(request, obj)
if request.POST.has_key("_save"):
return HttpResponseRedirect(obj.get_absolute_url())
else:
return res
class ResourceAdmin(TranslationAdmin):
# row-level permissions
# http://www.ibm.com/developerworks/opensource/library/os-django-admin/index.html
def queryset(self, request):
qs = super(ResourceAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(id__in = UserObjectRoleMapping.objects.filter(user=request.user,
role__codename__in =('resource_readwrite','resource_admin')
).values_list('object_id',flat=True)
)
list_display = ('titleml',)
inlines = [ # OnlineResourceInline,
ResourceTemporalExtentInline,
ResourceReferenceDateInline,
ResourceConformityInline,
ResourceResponsiblePartyRoleInline,
ResourceMdResponsiblePartyRoleInline,
# ConnectionInline,
# InverseConnectionInline,
]
#raw_id_fields = ("parent_identifier",)
filter_horizontal = ['presentation_form','spatial_representation_type_ext','topic_category_ext','responsible_party_role','distribution_format','md_responsible_party_role']
# filter_horizontal
#readonly_fields = ['uuid', 'geographic_bounding_box']
#readonly_fields = ['uuid', 'md_uuid', 'geographic_bounding_box', 'md_standard_name', 'md_version_name', 'md_character_set']
readonly_fields = ['uuid', 'md_uuid', 'md_standard_name', 'md_version_name', 'md_character_set']
search_fields = ['titleml', 'abstractml']
search_fields_verbose = ['Titolo', 'Descrizione'] #GRAPPELLI
list_filter = ('resource_type', 'spatial_representation_type_ext', 'topic_category', 'distribution_format')
list_display = ('id', 'titleml', 'inspire') #, 'completeness_bar')
list_editable = ['titleml',]
fieldsets = (
(_('Metadata'), {
'classes': ('collapse closed',),
'fields': (
'md_uuid',
#'lingua_metadata',
'md_date_stamp',
('md_character_set', 'md_standard_name', 'md_version_name')
)
}),
(_('Identification'), {
'classes': ('collapse closed',),
'fields': (
'titleml', 'abstractml', # 'source_document', # override by resources connections
#'resource_type', 'parent_identifier', 'other_citation_details',
'other_citation_details',
'presentation_form',
'distribution_format'
)
}),
(_('Identification2'), {
'classes': ('collapse closed',),
'fields': (
('resource_type', 'uuid'),
('language', 'character_set'),
'supplemental_information_ml',
'update_frequency',
'spatial_representation_type_ext'
)
}),
(_('Responsible Party'), {
'classes': ('collapse closed',),
'fields': []
}),
(_('Classification e Keywords'), {
'classes': ('collapse closed',),
'fields': (
'inspire', 'topic_category_ext', 'gemetkeywords'
)
}),
(_('Geographic extent'), {
'classes': ('collapse',),
'fields': (
#('ref_sys', 'geographic_bounding_box'),
#'geo',
('vertical_datum', 'vertical_extent_min', 'vertical_extent_max', 'uom_vertical_extent')
)
}),
(_('Temporal extent'), {
'classes': ('collapse',),
'fields': []
}),
(_('DataQuality'), {
'classes': ('collapse closed',),
'fields': (
'lineage', ('equivalent_scale', 'distance', 'uom_distance')
)
}),
(_('Conformity'), {
'classes': ('collapse closed',),
'fields': []
}),
# ('Distribution', {
# 'classes': ('collapse closed',),
# 'fields': (
# )
# }),
(_('Constraints'), {
'classes': ('collapse closed',),
'fields': (
'license',
'use_limitation',
('access_constraints', 'use_constraints'),
'other_constraints',
'security_constraints',
)
}),
# ('Relations', {
# 'classes': ('collapse closed',),
# 'fields': []
# }),
#('Sezione sistema - non compilabile', {
# 'classes': ('collapse closed',),
# 'fields': (
# 'geonode_tipo_layer',
# )
# }),
)
class Media:
js = translation_js
css = translation_css
def response_change(self, request, obj):
res = super(ResourceAdmin, self).response_change(request, obj)
if request.POST.has_key("_save"):
return HttpResponseRedirect(obj.get_absolute_url())
else:
return res
admin.site.register(DcCodeResourceType, BaseCodeAdmin)
admin.site.register(CodeScope, BaseCodeIsoAdmin)
admin.site.register(CodeTopicCategory, BaseCodeIsoAdmin)
admin.site.register(CodePresentationForm, BaseCodeIsoAdmin)
admin.site.register(CodeSpatialRepresentationType, BaseCodeIsoAdmin)
admin.site.register(CodeRefSys, CodeRefSysAdmin)
admin.site.register(CodeLicense, CodeLicenseAdmin)
admin.site.register(CodeCharacterSet, BaseCodeIsoAdmin)
admin.site.register(CodeVerticalDatum, BaseCodeAdmin)
admin.site.register(CodeMaintenanceFrequency, BaseCodeIsoAdmin)
admin.site.register(CodeSampleFrequency, BaseCodeIsoAdmin)
admin.site.register(CodeRestriction, BaseCodeIsoAdmin)
admin.site.register(CodeClassification, BaseCodeIsoAdmin)
admin.site.register(CodeTitle, BaseCodeAdmin)
admin.site.register(CodeDateType, BaseCodeIsoAdmin)
admin.site.register(CodeRole, BaseCodeIsoAdmin)
admin.site.register(CodeDistributionFormat, CodeDistributionFormatAdmin)
admin.site.register(ResponsibleParty, ResponsiblePartyAdmin)
admin.site.register(LayerExt, LayerExtAdmin)
admin.site.register(Resource, ResourceAdmin)
class ConnectionTypeAdmin(admin.ModelAdmin):
list_display = ('id', 'url', 'label', 'code', 'inverse')
list_editable = ('url', 'label', 'code', 'inverse')
pass
admin.site.register(ConnectionType, ConnectionTypeAdmin)
admin.site.register(Connection)
| CIGNo-project/CIGNo | cigno/metadata/admin.py | Python | gpl-3.0 | 16,379 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import visvis as vv
def title(text, axes=None):
""" title(text, axes=None)
Show title above axes. Remove title by suplying an empty string.
Parameters
----------
text : string
The text to display.
axes : Axes instance
Display the image in this axes, or the current axes if not given.
"""
if axes is None:
axes = vv.gca()
# seek Title object
for child in axes.children:
if isinstance(child, vv.Title):
ob = child
ob.text = text
break
else:
ob = vv.Title(axes, text)
# destroy if no text, return object otherwise
if not text:
ob.Destroy()
return None
else:
return ob
if __name__=='__main__':
a = vv.gca()
vv.title('test title')
| chiluf/visvis.dev | functions/title.py | Python | bsd-3-clause | 1,001 |
"""
flask-urls
----------
A collection of URL-related functions for Flask applications.
Links
`````
* `documentation <http://sjl.bitbucket.org/flask-urls/>`_
* `development version
<http://bitbucket.org/sjl/flask-urls/get/tip.gz#egg=flask-urls-dev`_
"""
from setuptools import setup
setup(
name='flask-urls',
version='0.9.2',
url='http://sjl.bitbucket.org/flask-urls/',
license='MIT',
author='Steve Losh',
author_email='steve@stevelosh.com',
description='A collection of URL-related functions for Flask applications.',
long_description=__doc__,
packages=['flaskext'],
namespace_packages=['flaskext'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| sjl/flask-urls | setup.py | Python | mit | 1,159 |
import unittest
from python_template import python_template
class Test(unittest.TestCase):
def test_returns_true(self):
self.assertEquals(True, python_template.returns_true())
self.assertTrue(python_template.returns_true())
def test_will_pass(self):
self.assertTrue(True)
| VDBWRAIR/python_template | tests/test_example.py | Python | gpl-2.0 | 310 |
"""Configuration options for the web_theme app"""
from django.utils.translation import ugettext_lazy as _
from smart_settings.api import register_settings
register_settings(
namespace=u'web_theme',
module=u'web_theme.settings',
settings=[
{'name': u'THEME', 'global_name': u'WEB_THEME_THEME', 'default': u'activo', 'description': _(u'CSS theme to apply, options are: amro, bec, bec-green, blue, default, djime-cerulean, drastic-dark, kathleene, olive, orange, red, reidb-greenish and warehouse.')},
{'name': u'ENABLE_SCROLL_JS', 'global_name': u'WEB_THEME_ENABLE_SCROLL_JS', 'default': True, 'hidden': True},
{'name': u'VERBOSE_LOGIN', 'global_name': u'WEB_THEME_VERBOSE_LOGIN', 'default': True, 'description': _(u'Display extra information in the login screen.')},
]
)
| rosarior/rua | rua/apps/web_theme/settings.py | Python | gpl-3.0 | 812 |
import requests
from pyfibot import pyfibot
from pyfibot import botcore
class BotMock(botcore.CoreCommands):
config = {}
def __init__(self, config={}, network=None):
self.config = config
if network:
self.network = network
self.nickname = self.network.nickname
self.lineRate = self.network.linerate
self.password = self.network.password
def get_url(self, url, params={}, nocache=False, cookies=None):
print("Getting url %s" % url)
if cookies:
return requests.get(url, params=params, cookies=cookies)
return requests.get(url, params=params)
def say(self, channel, message, length=None):
#return("%s|%s" % (channel, message))
return (channel, message)
def to_utf8(self, _string):
"""Convert string to UTF-8 if it is unicode"""
if isinstance(_string, unicode):
_string = _string.encode("UTF-8")
return _string
def to_unicode(self, _string):
"""Convert string to UTF-8 if it is unicode"""
if not isinstance(_string, unicode):
try:
_string = unicode(_string)
except:
try:
_string = _string.decode('utf-8')
except:
_string = _string.decode('iso-8859-1')
return _string
class FactoryMock(pyfibot.PyFiBotFactory):
protocol = BotMock
def __init__(self, config={}):
pyfibot.PyFiBotFactory.__init__(self, config)
self.createNetwork(('localhost', 6667), 'nerv', 'pyfibot', ['#pyfibot'], 0.5, None, False)
self.createNetwork(('localhost', 6667), 'localhost', 'pyfibot', ['#pyfibot'], 0.5, None, False)
self.startFactory()
self.buildProtocol(None)
def startFactory(self):
self.moduledir = './pyfibot/modules/'
self.allBots = {}
def buildProtocol(self, address):
# Go through all defined networks
for network, server in self.data['networks'].items():
p = self.protocol(network=server)
self.allBots[server.alias] = p
p.factory = self
| EArmour/pyfibot | tests/bot_mock.py | Python | bsd-3-clause | 2,165 |
# This file is part of the Minecraft Overviewer.
#
# Minecraft Overviewer is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# Minecraft Overviewer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the Overviewer. If not, see <http://www.gnu.org/licenses/>.
"""This module has supporting functions for the caching logic used in world.py.
Each cache class should implement the standard container type interface
(__getitem__ and __setitem__), as well as provide a "hits" and "misses"
attribute.
"""
class LRUCache(object):
"""A simple, generic, in-memory LRU cache that implements the standard
python container interface.
An ordered dict type would simplify this implementation a bit, but we want
Python 2.6 compatibility and the standard library ordereddict was added in
2.7. It's probably okay because this implementation can be tuned for
exactly what we need and nothing more.
This implementation keeps a linked-list of cache keys and values, ordered
in least-recently-used order. A dictionary maps keys to linked-list nodes.
On cache hit, the link is moved to the end of the list. On cache miss, the
first item of the list is evicted. All operations have constant time
complexity (dict lookups are worst case O(n) time)
"""
class _LinkNode(object):
__slots__ = ['left', 'right', 'key', 'value']
def __init__(self, l=None, r=None, k=None, v=None):
self.left = l
self.right = r
self.key = k
self.value = v
def __init__(self, size=100, destructor=None):
"""Initialize a new LRU cache with the given size.
destructor, if given, is a callable that is called upon an item being
evicted from the cache. It takes one argument, the value stored in the
cache.
"""
self.cache = {}
# Two sentinel nodes at the ends of the linked list simplify boundary
# conditions in the code below.
self.listhead = LRUCache._LinkNode()
self.listtail = LRUCache._LinkNode()
self.listhead.right = self.listtail
self.listtail.left = self.listhead
self.hits = 0
self.misses = 0
self.size = size
self.destructor = destructor
# Initialize an empty cache of the same size for worker processes
def __getstate__(self):
return self.size
def __setstate__(self, size):
self.__init__(size)
def __getitem__(self, key):
try:
link = self.cache[key]
except KeyError:
self.misses += 1
raise
# Disconnect the link from where it is
link.left.right = link.right
link.right.left = link.left
# Insert the link at the end of the list
tail = self.listtail
link.left = tail.left
link.right = tail
tail.left.right = link
tail.left = link
self.hits += 1
return link.value
def __setitem__(self, key, value):
cache = self.cache
if key in cache:
# Shortcut this case
cache[key].value = value
return
if len(cache) >= self.size:
# Evict a node
link = self.listhead.right
del cache[link.key]
link.left.right = link.right
link.right.left = link.left
d = self.destructor
if d:
d(link.value)
del link
# The node doesn't exist already, and we have room for it. Let's do this.
tail = self.listtail
link = LRUCache._LinkNode(tail.left, tail, key, value)
tail.left.right = link
tail.left = link
cache[key] = link
def __delitem__(self, key):
# Used to flush the cache of this key
cache = self.cache
link = cache[key]
del cache[key]
link.left.right = link.right
link.right.left = link.left
# Call the destructor
d = self.destructor
if d:
d(link.value)
| overviewer/Minecraft-Overviewer | overviewer_core/cache.py | Python | gpl-3.0 | 4,522 |
# Wake-On-LAN
import socket, ping, os, sys, time
import xbmc, xbmcgui, xbmcaddon
def main(isAutostart=False):
print 'script.advanced.wol: Starting WOL script'
####### Read Settings
settings = xbmcaddon.Addon( id="script.advanced.wol" )
language = settings.getLocalizedString
# basic settings
macAddress = settings.getSetting("macAddress")
hostOrIp = settings.getSetting("hostOrIp")
#notification settings
enableLaunchNotifies = settings.getSetting("enableLaunchNotifies")
enablePingCounterNotifies = settings.getSetting("enablePingCounterNotifies")
enableHostupNotifies = settings.getSetting("enableHostupNotifies")
enableErrorNotifies = settings.getSetting("enableErrorNotifies")
#advanced settings
pingTimeout = int(settings.getSetting("pingTimeout"))
hostupWaitTime = int(settings.getSetting("hostupWaitTime"))
disablePingHostupCheck = settings.getSetting("disablePingHostupCheck")
continuousWol = settings.getSetting("continuousWol")
continuousWolDelay = int(settings.getSetting("continuousWolDelay"))
continuousWolAfterStandby = settings.getSetting("continuousWolAfterStandby")
updateVideoLibraryAfterWol = settings.getSetting("updateVideoLibraryAfterWol")
updateMusicLibraryAfterWol = settings.getSetting("updateMusicLibraryAfterWol")
libraryUpdatesDelay = int(settings.getSetting("libraryUpdatesDelay"))
#if the scrpit was called with a 3rd parameter,
#use the mac-address and host/ip from there
try:
if (len(sys.argv[3])>0):
arrCustomServer = sys.argv[3].split('@')
hostOrIp = arrCustomServer[0]
macAddress = arrCustomServer[1]
except:
pass
# Set Icons
rootDir = settings.getAddonInfo('path')
if rootDir[-1] == ';':rootDir = rootDir[0:-1]
resDir = os.path.join(rootDir, 'resources')
iconDir = os.path.join(resDir, 'icons')
iconConnect = os.path.join(iconDir, 'server_connect.png')
iconError = os.path.join(iconDir, 'server_error.png')
iconSuccess = os.path.join(iconDir, 'server.png')
launchcommand = False
delaycommand = False
try:
if (len(sys.argv[1])>0):
launchcommand = True
if (str(sys.argv[2]) == str(True)):
delaycommand = True
except:
pass
# Launch additional command passed with parameters, if it should not be delayed to after successful wakeup
if ((launchcommand == True) & (delaycommand == False)):
xbmc.executebuiltin(sys.argv[1])
# Send WOL-Packet
xbmc.executebuiltin('XBMC.WakeOnLan("'+macAddress+'")')
print 'script.advanced.wol: WakeOnLan signal sent to MAC-Address '+macAddress
# Send Connection Notification
if (enableLaunchNotifies == "true"):
xbmc.executebuiltin('XBMC.Notification("'+language(60000).replace("%hostOrIp%",hostOrIp)+'","",5000,"'+iconConnect+'")')
# Determine wakeup-success
hostupConfirmed = False
if (disablePingHostupCheck == "true"):
#with this setting, we just wait for "hostupWaitTime" seconds and assume a successful wakeup then.
timecount = 1
while timecount <= hostupWaitTime:
xbmc.sleep(1000)
if (enablePingCounterNotifies == "true"):
xbmc.executebuiltin('XBMC.Notification("'+language(60001).replace("%hostOrIp%",hostOrIp)+'","'+language(60002).replace("%timecount%",str(timecount)).replace("%timeout%",str(hostupWaitTime))+'",5000,"'+iconConnect+'")')
timecount = timecount+1
if (enableHostupNotifies == "true"):
xbmc.executebuiltin('XBMC.Notification("'+language(60011).replace("%hostOrIp%",hostOrIp)+'","",5000,"'+iconSuccess+'")')
hostupConfirmed = True
else:
#otherwise we determine the success by pinging (default behaviour)
try:
timecount = 1
while timecount <= pingTimeout:
delay = ping.do_one(hostOrIp, 1)
if delay == None:
if (enablePingCounterNotifies == "true"):
xbmc.executebuiltin('XBMC.Notification("'+language(60001).replace("%hostOrIp%",hostOrIp)+'","'+language(60002).replace("%timecount%",str(timecount)).replace("%timeout%",str(pingTimeout))+'",5000,"'+iconConnect+'")')
timecount = timecount+1
else:
break
if delay == None:
xbmc.sleep(1000)
if (enableHostupNotifies == "true"):
xbmc.executebuiltin('XBMC.Notification("'+language(60003).replace("%hostOrIp%",hostOrIp)+'","",5000,"'+iconError+'")')
else:
xbmc.sleep(1000)
if (enableHostupNotifies == "true"):
xbmc.executebuiltin('XBMC.Notification("'+language(60004).replace("%hostOrIp%",hostOrIp)+'","",5000,"'+iconSuccess+'")')
hostupConfirmed = True
except socket.error, (errno, msg):
xbmc.sleep(1000)
print 'script.advanced.wol: Error No.: '+str(errno)+' / Error Msg.: '+msg.decode("utf-8","ignore")
if (enablePingCounterNotifies == "true"):
if errno == 11004:
xbmc.executebuiltin('XBMC.Notification("'+language(60005)+'","'+language(60006).replace("%hostOrIp%",hostOrIp)+'",10000,"'+iconError+'")')
elif errno == 10013:
if sys.platform == 'win32':
xbmc.executebuiltin('XBMC.Notification("'+language(60005)+'","'+language(60009)+'",20000,"'+iconError+'")')
elif errno == 1:
if sys.platform == 'linux2':
xbmc.executebuiltin('XBMC.Notification("'+language(60005)+'","'+language(60010)+'",20000,"'+iconError+'")')
else:
xbmc.executebuiltin('XBMC.Notification("'+language(60005)+'","'+msg.decode("utf-8","ignore")+'",20000,"'+iconError+'")')
# Things to perform after successful wake-up
if (hostupConfirmed == True):
# Launch additional command passed with parameters, if it should be delayed to after successful wakeup
if ((launchcommand == True) & (delaycommand == True)):
if (enableHostupNotifies == "true"):
xbmc.executebuiltin('XBMC.Notification("'+language(60004).replace("%hostOrIp%",hostOrIp)+'","'+language(60007)+'",5000,"'+iconSuccess+'")')
xbmc.sleep(1000)
xbmc.executebuiltin(sys.argv[1])
# Initiate XBMC-library-updates, if we are in autostart and it is set in the settings.
if (isAutostart == True):
if (((updateVideoLibraryAfterWol == "true") or (updateMusicLibraryAfterWol == "true")) and (libraryUpdatesDelay > 0)):
xbmc.sleep(libraryUpdatesDelay*1000)
if (updateVideoLibraryAfterWol == "true"):
print 'script.advanced.wol: Initiating Video Library Update'
xbmc.executebuiltin('UpdateLibrary("video")')
if (updateMusicLibraryAfterWol == "true"):
print 'script.advanced.wol: Initiating Music Library Update'
xbmc.executebuiltin('UpdateLibrary("music")')
# Continue sending WOL-packets, if configured in the settings
if (continuousWol == "true"):
xbmc.sleep(5000)
if (enableLaunchNotifies == "true"):
# Send Notification regarding continuous WOL-packets
xbmc.executebuiltin('XBMC.Notification("'+language(53020)+'","'+language(60008).replace("%continuousWolDelay%",str(continuousWolDelay))+'",5000,"'+iconSuccess+'")')
# the previousTime-functionality to stop continuous WOL-packets after XBMC returns from standby was suggested by XBMC-forum-user "jandias" (THANKS!)
previousTime = time.time()
countingSeconds = 0
while (not xbmc.abortRequested):
if ((continuousWolAfterStandby == "false") and ( time.time()-previousTime > 5)):
break
else:
previousTime = time.time()
xbmc.sleep(1000)
if (countingSeconds == continuousWolDelay):
xbmc.executebuiltin('XBMC.WakeOnLan("'+macAddress+'")')
print 'script.advanced.wol: WakeOnLan signal sent to MAC-Address '+macAddress
countingSeconds = 0
else:
countingSeconds+=1
print 'script.advanced.wol: Closing WOL script'
return
if __name__ == '__main__':
main() | mandark/xbmc-script.advanced.wol | default.py | Python | gpl-2.0 | 7,465 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RElemstatlearn(RPackage):
"""ElemStatLearn: Data Sets, Functions and Examples from the Book: "The
Elements of Statistical Learning, Data Mining, Inference, and Prediction" by
Trevor Hastie, Robert Tibshirani and Jerome Friedman"""
homepage = "https://cloud.r-project.org/package=ElemStatLearn"
url = "https://cloud.r-project.org/src/contrib/Archive/ElemStatLearn/ElemStatLearn_2015.6.26.2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/ElemStatLearn"
version('2015.6.26.2', sha256='a0f94a72f6188a0a5c855e3362b6b849bf7fd14efc2d824a8d2581f8bb1bd7fa')
depends_on('r@2.10.0:', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/r-elemstatlearn/package.py | Python | lgpl-2.1 | 883 |
# -*- coding: cp1252 -*-
##
# <p> Portions copyright © 2005-2012 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
# 2010-04-25 SJM fix zoom factors cooking logic
# 2010-04-15 CW r4253 fix zoom factors cooking logic
# 2010-04-09 CW r4248 add a flag so xlutils knows whether or not to write a PANE record
# 2010-03-29 SJM Fixed bug in adding new empty rows in put_cell_ragged
# 2010-03-28 SJM Tailored put_cell method for each of ragged_rows=False (fixed speed regression) and =True (faster)
# 2010-03-25 CW r4236 Slight refactoring to remove method calls
# 2010-03-25 CW r4235 Collapse expand_cells into put_cell and enhance the raggedness. This should save even more memory!
# 2010-03-25 CW r4234 remove duplicate chunks for extend_cells; refactor to remove put_number_cell and put_blank_cell which essentially duplicated the code of put_cell
# 2010-03-10 SJM r4222 Added reading of the PANE record.
# 2010-03-10 SJM r4221 Preliminary work on "cooked" mag factors; use at own peril
# 2010-03-01 SJM Reading SCL record
# 2010-03-01 SJM Added ragged_rows functionality
# 2009-08-23 SJM Reduced CPU time taken by parsing MULBLANK records.
# 2009-08-18 SJM Used __slots__ and sharing to reduce memory consumed by Rowinfo instances
# 2009-05-31 SJM Fixed problem with no CODEPAGE record on extremely minimal BIFF2.x 3rd-party file
# 2009-04-27 SJM Integrated on_demand patch by Armando Serrano Lombillo
# 2008-02-09 SJM Excel 2.0: build XFs on the fly from cell attributes
# 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files.
# 2007-10-11 SJM Added missing entry for blank cell type to ctype_text
# 2007-07-11 SJM Allow for BIFF2/3-style FORMAT record in BIFF4/8 file
# 2007-04-22 SJM Remove experimental "trimming" facility.
from __future__ import print_function
from struct import unpack, calcsize
import time
from .biffh import *
from .timemachine import *
from .formula import dump_formula, decompile_formula, rangename2d, FMLA_TYPE_CELL, FMLA_TYPE_SHARED
from .formatting import nearest_colour_index, Format
DEBUG = 0
OBJ_MSO_DEBUG = 0
_WINDOW2_options = (
# Attribute names and initial values to use in case
# a WINDOW2 record is not written.
("show_formulas", 0),
("show_grid_lines", 1),
("show_sheet_headers", 1),
("panes_are_frozen", 0),
("show_zero_values", 1),
("automatic_grid_line_colour", 1),
("columns_from_right_to_left", 0),
("show_outline_symbols", 1),
("remove_splits_if_pane_freeze_is_removed", 0),
# Multiple sheets can be selected, but only one can be active
# (hold down Ctrl and click multiple tabs in the file in OOo)
("sheet_selected", 0),
# "sheet_visible" should really be called "sheet_active"
# and is 1 when this sheet is the sheet displayed when the file
# is open. More than likely only one sheet should ever be set as
# visible.
# This would correspond to the Book's sheet_active attribute, but
# that doesn't exist as WINDOW1 records aren't currently processed.
# The real thing is the visibility attribute from the BOUNDSHEET record.
("sheet_visible", 0),
("show_in_page_break_preview", 0),
)
##
# <p>Contains the data for one worksheet.</p>
#
# <p>In the cell access functions, "rowx" is a row index, counting from zero, and "colx" is a
# column index, counting from zero.
# Negative values for row/column indexes and slice positions are supported in the expected fashion.</p>
#
# <p>For information about cell types and cell values, refer to the documentation of the {@link #Cell} class.</p>
#
# <p>WARNING: You don't call this class yourself. You access Sheet objects via the Book object that
# was returned when you called xlrd.open_workbook("myfile.xls").</p>
class Sheet(BaseObject):
##
# Name of sheet.
name = ''
##
# A reference to the Book object to which this sheet belongs.
# Example usage: some_sheet.book.datemode
book = None
##
# Number of rows in sheet. A row index is in range(thesheet.nrows).
nrows = 0
##
# Nominal number of columns in sheet. It is 1 + the maximum column index
# found, ignoring trailing empty cells. See also open_workbook(ragged_rows=?)
# and Sheet.{@link #Sheet.row_len}(row_index).
ncols = 0
##
# The map from a column index to a {@link #Colinfo} object. Often there is an entry
# in COLINFO records for all column indexes in range(257).
# Note that xlrd ignores the entry for the non-existent
# 257th column. On the other hand, there may be no entry for unused columns.
# <br /> -- New in version 0.6.1. Populated only if open_workbook(formatting_info=True).
colinfo_map = {}
##
# The map from a row index to a {@link #Rowinfo} object. Note that it is possible
# to have missing entries -- at least one source of XLS files doesn't
# bother writing ROW records.
# <br /> -- New in version 0.6.1. Populated only if open_workbook(formatting_info=True).
rowinfo_map = {}
##
# List of address ranges of cells containing column labels.
# These are set up in Excel by Insert > Name > Labels > Columns.
# <br> -- New in version 0.6.0
# <br>How to deconstruct the list:
# <pre>
# for crange in thesheet.col_label_ranges:
# rlo, rhi, clo, chi = crange
# for rx in xrange(rlo, rhi):
# for cx in xrange(clo, chi):
# print "Column label at (rowx=%d, colx=%d) is %r" \
# (rx, cx, thesheet.cell_value(rx, cx))
# </pre>
col_label_ranges = []
##
# List of address ranges of cells containing row labels.
# For more details, see <i>col_label_ranges</i> above.
# <br> -- New in version 0.6.0
row_label_ranges = []
##
# List of address ranges of cells which have been merged.
# These are set up in Excel by Format > Cells > Alignment, then ticking
# the "Merge cells" box.
# <br> -- New in version 0.6.1. Extracted only if open_workbook(formatting_info=True).
# <br>How to deconstruct the list:
# <pre>
# for crange in thesheet.merged_cells:
# rlo, rhi, clo, chi = crange
# for rowx in xrange(rlo, rhi):
# for colx in xrange(clo, chi):
# # cell (rlo, clo) (the top left one) will carry the data
# # and formatting info; the remainder will be recorded as
# # blank cells, but a renderer will apply the formatting info
# # for the top left cell (e.g. border, pattern) to all cells in
# # the range.
# </pre>
merged_cells = []
##
# Mapping of (rowx, colx) to list of (offset, font_index) tuples. The offset
# defines where in the string the font begins to be used.
# Offsets are expected to be in ascending order.
# If the first offset is not zero, the meaning is that the cell's XF's font should
# be used from offset 0.
# <br /> This is a sparse mapping. There is no entry for cells that are not formatted with
# rich text.
# <br>How to use:
# <pre>
# runlist = thesheet.rich_text_runlist_map.get((rowx, colx))
# if runlist:
# for offset, font_index in runlist:
# # do work here.
# pass
# </pre>
# Populated only if open_workbook(formatting_info=True).
# <br /> -- New in version 0.7.2.
# <br />
rich_text_runlist_map = {}
##
# Default column width from DEFCOLWIDTH record, else None.
# From the OOo docs:<br />
# """Column width in characters, using the width of the zero character
# from default font (first FONT record in the file). Excel adds some
# extra space to the default width, depending on the default font and
# default font size. The algorithm how to exactly calculate the resulting
# column width is not known.<br />
# Example: The default width of 8 set in this record results in a column
# width of 8.43 using Arial font with a size of 10 points."""<br />
# For the default hierarchy, refer to the {@link #Colinfo} class.
# <br /> -- New in version 0.6.1
defcolwidth = None
##
# Default column width from STANDARDWIDTH record, else None.
# From the OOo docs:<br />
# """Default width of the columns in 1/256 of the width of the zero
# character, using default font (first FONT record in the file)."""<br />
# For the default hierarchy, refer to the {@link #Colinfo} class.
# <br /> -- New in version 0.6.1
standardwidth = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_height = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_height_mismatch = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_hidden = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_additional_space_above = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_additional_space_below = None
##
# Visibility of the sheet. 0 = visible, 1 = hidden (can be unhidden
# by user -- Format/Sheet/Unhide), 2 = "very hidden" (can be unhidden
# only by VBA macro).
visibility = 0
##
# A 256-element tuple corresponding to the contents of the GCW record for this sheet.
# If no such record, treat as all bits zero.
# Applies to BIFF4-7 only. See docs of the {@link #Colinfo} class for discussion.
gcw = (0, ) * 256
##
# <p>A list of {@link #Hyperlink} objects corresponding to HLINK records found
# in the worksheet.<br />-- New in version 0.7.2 </p>
hyperlink_list = []
##
# <p>A sparse mapping from (rowx, colx) to an item in {@link #Sheet.hyperlink_list}.
# Cells not covered by a hyperlink are not mapped.
# It is possible using the Excel UI to set up a hyperlink that
# covers a larger-than-1x1 rectangle of cells.
# Hyperlink rectangles may overlap (Excel doesn't check).
# When a multiply-covered cell is clicked on, the hyperlink that is activated
# (and the one that is mapped here) is the last in hyperlink_list.
# <br />-- New in version 0.7.2 </p>
hyperlink_map = {}
##
# <p>A sparse mapping from (rowx, colx) to a {@link #Note} object.
# Cells not containing a note ("comment") are not mapped.
# <br />-- New in version 0.7.2 </p>
cell_note_map = {}
##
# Number of columns in left pane (frozen panes; for split panes, see comments below in code)
vert_split_pos = 0
##
# Number of rows in top pane (frozen panes; for split panes, see comments below in code)
horz_split_pos = 0
##
# Index of first visible row in bottom frozen/split pane
horz_split_first_visible = 0
##
# Index of first visible column in right frozen/split pane
vert_split_first_visible = 0
##
# Frozen panes: ignore it. Split panes: explanation and diagrams in OOo docs.
split_active_pane = 0
##
# Boolean specifying if a PANE record was present, ignore unless you're xlutils.copy
has_pane_record = 0
##
# A list of the horizontal page breaks in this sheet.
# Breaks are tuples in the form (index of row after break, start col index, end col index).
# Populated only if open_workbook(formatting_info=True).
# <br /> -- New in version 0.7.2
horizontal_page_breaks = []
##
# A list of the vertical page breaks in this sheet.
# Breaks are tuples in the form (index of col after break, start row index, end row index).
# Populated only if open_workbook(formatting_info=True).
# <br /> -- New in version 0.7.2
vertical_page_breaks = []
def __init__(self, book, position, name, number):
self.book = book
self.biff_version = book.biff_version
self._position = position
self.logfile = book.logfile
self.pickleable = book.pickleable
if array_array and (CAN_PICKLE_ARRAY or not book.pickleable):
# use array
self.bt = array_array('B', [XL_CELL_EMPTY])
self.bf = array_array('h', [-1])
else:
# don't use array
self.bt = [XL_CELL_EMPTY]
self.bf = [-1]
self.name = name
self.number = number
self.verbosity = book.verbosity
self.formatting_info = book.formatting_info
self.ragged_rows = book.ragged_rows
if self.ragged_rows:
self.put_cell = self.put_cell_ragged
else:
self.put_cell = self.put_cell_unragged
self._xf_index_to_xl_type_map = book._xf_index_to_xl_type_map
self.nrows = 0 # actual, including possibly empty cells
self.ncols = 0
self._maxdatarowx = -1 # highest rowx containing a non-empty cell
self._maxdatacolx = -1 # highest colx containing a non-empty cell
self._dimnrows = 0 # as per DIMENSIONS record
self._dimncols = 0
self._cell_values = []
self._cell_types = []
self._cell_xf_indexes = []
self.defcolwidth = None
self.standardwidth = None
self.default_row_height = None
self.default_row_height_mismatch = 0
self.default_row_hidden = 0
self.default_additional_space_above = 0
self.default_additional_space_below = 0
self.colinfo_map = {}
self.rowinfo_map = {}
self.col_label_ranges = []
self.row_label_ranges = []
self.merged_cells = []
self.rich_text_runlist_map = {}
self.horizontal_page_breaks = []
self.vertical_page_breaks = []
self._xf_index_stats = [0, 0, 0, 0]
self.visibility = book._sheet_visibility[number] # from BOUNDSHEET record
for attr, defval in _WINDOW2_options:
setattr(self, attr, defval)
self.first_visible_rowx = 0
self.first_visible_colx = 0
self.gridline_colour_index = 0x40
self.gridline_colour_rgb = None # pre-BIFF8
self.hyperlink_list = []
self.hyperlink_map = {}
self.cell_note_map = {}
# Values calculated by xlrd to predict the mag factors that
# will actually be used by Excel to display your worksheet.
# Pass these values to xlwt when writing XLS files.
# Warning 1: Behaviour of OOo Calc and Gnumeric has been observed to differ from Excel's.
# Warning 2: A value of zero means almost exactly what it says. Your sheet will be
# displayed as a very tiny speck on the screen. xlwt will reject attempts to set
# a mag_factor that is not (10 <= mag_factor <= 400).
self.cooked_page_break_preview_mag_factor = 60
self.cooked_normal_view_mag_factor = 100
# Values (if any) actually stored on the XLS file
self.cached_page_break_preview_mag_factor = None # from WINDOW2 record
self.cached_normal_view_mag_factor = None # from WINDOW2 record
self.scl_mag_factor = None # from SCL record
self._ixfe = None # BIFF2 only
self._cell_attr_to_xfx = {} # BIFF2.0 only
#### Don't initialise this here, use class attribute initialisation.
#### self.gcw = (0, ) * 256 ####
if self.biff_version >= 80:
self.utter_max_rows = 65536
else:
self.utter_max_rows = 16384
self.utter_max_cols = 256
self._first_full_rowx = -1
# self._put_cell_exceptions = 0
# self._put_cell_row_widenings = 0
# self._put_cell_rows_appended = 0
# self._put_cell_cells_appended = 0
##
# {@link #Cell} object in the given row and column.
def cell(self, rowx, colx):
if self.formatting_info:
xfx = self.cell_xf_index(rowx, colx)
else:
xfx = None
return Cell(
self._cell_types[rowx][colx],
self._cell_values[rowx][colx],
xfx,
)
##
# Value of the cell in the given row and column.
def cell_value(self, rowx, colx):
return self._cell_values[rowx][colx]
##
# Type of the cell in the given row and column.
# Refer to the documentation of the {@link #Cell} class.
def cell_type(self, rowx, colx):
return self._cell_types[rowx][colx]
##
# XF index of the cell in the given row and column.
# This is an index into Book.{@link #Book.xf_list}.
# <br /> -- New in version 0.6.1
def cell_xf_index(self, rowx, colx):
self.req_fmt_info()
xfx = self._cell_xf_indexes[rowx][colx]
if xfx > -1:
self._xf_index_stats[0] += 1
return xfx
# Check for a row xf_index
try:
xfx = self.rowinfo_map[rowx].xf_index
if xfx > -1:
self._xf_index_stats[1] += 1
return xfx
except KeyError:
pass
# Check for a column xf_index
try:
xfx = self.colinfo_map[colx].xf_index
if xfx == -1: xfx = 15
self._xf_index_stats[2] += 1
return xfx
except KeyError:
# If all else fails, 15 is used as hardwired global default xf_index.
self._xf_index_stats[3] += 1
return 15
##
# Returns the effective number of cells in the given row. For use with
# open_workbook(ragged_rows=True) which is likely to produce rows
# with fewer than {@link #Sheet.ncols} cells.
# <br /> -- New in version 0.7.2
def row_len(self, rowx):
return len(self._cell_values[rowx])
##
# Returns a sequence of the {@link #Cell} objects in the given row.
def row(self, rowx):
return [
self.cell(rowx, colx)
for colx in xrange(len(self._cell_values[rowx]))
]
##
# Returns a slice of the types
# of the cells in the given row.
def row_types(self, rowx, start_colx=0, end_colx=None):
if end_colx is None:
return self._cell_types[rowx][start_colx:]
return self._cell_types[rowx][start_colx:end_colx]
##
# Returns a slice of the values
# of the cells in the given row.
def row_values(self, rowx, start_colx=0, end_colx=None):
if end_colx is None:
return self._cell_values[rowx][start_colx:]
return self._cell_values[rowx][start_colx:end_colx]
##
# Returns a slice of the {@link #Cell} objects in the given row.
def row_slice(self, rowx, start_colx=0, end_colx=None):
nc = len(self._cell_values[rowx])
if start_colx < 0:
start_colx += nc
if start_colx < 0:
start_colx = 0
if end_colx is None or end_colx > nc:
end_colx = nc
elif end_colx < 0:
end_colx += nc
return [
self.cell(rowx, colx)
for colx in xrange(start_colx, end_colx)
]
##
# Returns a slice of the {@link #Cell} objects in the given column.
def col_slice(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self.cell(rowx, colx)
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a slice of the values of the cells in the given column.
def col_values(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_values[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a slice of the types of the cells in the given column.
def col_types(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_types[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a sequence of the {@link #Cell} objects in the given column.
def col(self, colx):
return self.col_slice(colx)
# Above two lines just for the docs. Here's the real McCoy:
col = col_slice
# === Following methods are used in building the worksheet.
# === They are not part of the API.
def tidy_dimensions(self):
if self.verbosity >= 3:
fprintf(self.logfile,
"tidy_dimensions: nrows=%d ncols=%d \n",
self.nrows, self.ncols,
)
if 1 and self.merged_cells:
nr = nc = 0
umaxrows = self.utter_max_rows
umaxcols = self.utter_max_cols
for crange in self.merged_cells:
rlo, rhi, clo, chi = crange
if not (0 <= rlo < rhi <= umaxrows) \
or not (0 <= clo < chi <= umaxcols):
fprintf(self.logfile,
"*** WARNING: sheet #%d (%r), MERGEDCELLS bad range %r\n",
self.number, self.name, crange)
if rhi > nr: nr = rhi
if chi > nc: nc = chi
if nc > self.ncols:
self.ncols = nc
if nr > self.nrows:
# we put one empty cell at (nr-1,0) to make sure
# we have the right number of rows. The ragged rows
# will sort out the rest if needed.
self.put_cell(nr-1, 0, XL_CELL_EMPTY, -1)
if self.verbosity >= 1 \
and (self.nrows != self._dimnrows or self.ncols != self._dimncols):
fprintf(self.logfile,
"NOTE *** sheet %d (%r): DIMENSIONS R,C = %d,%d should be %d,%d\n",
self.number,
self.name,
self._dimnrows,
self._dimncols,
self.nrows,
self.ncols,
)
if not self.ragged_rows:
# fix ragged rows
ncols = self.ncols
s_cell_types = self._cell_types
s_cell_values = self._cell_values
s_cell_xf_indexes = self._cell_xf_indexes
s_fmt_info = self.formatting_info
# for rowx in xrange(self.nrows):
if self._first_full_rowx == -2:
ubound = self.nrows
else:
ubound = self._first_full_rowx
for rowx in xrange(ubound):
trow = s_cell_types[rowx]
rlen = len(trow)
nextra = ncols - rlen
if nextra > 0:
s_cell_values[rowx][rlen:] = [''] * nextra
trow[rlen:] = self.bt * nextra
if s_fmt_info:
s_cell_xf_indexes[rowx][rlen:] = self.bf * nextra
def put_cell_ragged(self, rowx, colx, ctype, value, xf_index):
if ctype is None:
# we have a number, so look up the cell type
ctype = self._xf_index_to_xl_type_map[xf_index]
assert 0 <= colx < self.utter_max_cols
assert 0 <= rowx < self.utter_max_rows
fmt_info = self.formatting_info
try:
nr = rowx + 1
if self.nrows < nr:
scta = self._cell_types.append
scva = self._cell_values.append
scxa = self._cell_xf_indexes.append
bt = self.bt
bf = self.bf
for _unused in xrange(self.nrows, nr):
scta(bt * 0)
scva([])
if fmt_info:
scxa(bf * 0)
self.nrows = nr
types_row = self._cell_types[rowx]
values_row = self._cell_values[rowx]
if fmt_info:
fmt_row = self._cell_xf_indexes[rowx]
ltr = len(types_row)
if colx >= self.ncols:
self.ncols = colx + 1
num_empty = colx - ltr
if not num_empty:
# most common case: colx == previous colx + 1
# self._put_cell_cells_appended += 1
types_row.append(ctype)
values_row.append(value)
if fmt_info:
fmt_row.append(xf_index)
return
if num_empty > 0:
num_empty += 1
# self._put_cell_row_widenings += 1
# types_row.extend(self.bt * num_empty)
# values_row.extend([''] * num_empty)
# if fmt_info:
# fmt_row.extend(self.bf * num_empty)
types_row[ltr:] = self.bt * num_empty
values_row[ltr:] = [''] * num_empty
if fmt_info:
fmt_row[ltr:] = self.bf * num_empty
types_row[colx] = ctype
values_row[colx] = value
if fmt_info:
fmt_row[colx] = xf_index
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
def put_cell_unragged(self, rowx, colx, ctype, value, xf_index):
if ctype is None:
# we have a number, so look up the cell type
ctype = self._xf_index_to_xl_type_map[xf_index]
# assert 0 <= colx < self.utter_max_cols
# assert 0 <= rowx < self.utter_max_rows
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except IndexError:
# print >> self.logfile, "put_cell extending", rowx, colx
# self.extend_cells(rowx+1, colx+1)
# self._put_cell_exceptions += 1
nr = rowx + 1
nc = colx + 1
assert 1 <= nc <= self.utter_max_cols
assert 1 <= nr <= self.utter_max_rows
if nc > self.ncols:
self.ncols = nc
# The row self._first_full_rowx and all subsequent rows
# are guaranteed to have length == self.ncols. Thus the
# "fix ragged rows" section of the tidy_dimensions method
# doesn't need to examine them.
if nr < self.nrows:
# cell data is not in non-descending row order *AND*
# self.ncols has been bumped up.
# This very rare case ruins this optmisation.
self._first_full_rowx = -2
elif rowx > self._first_full_rowx > -2:
self._first_full_rowx = rowx
if nr <= self.nrows:
# New cell is in an existing row, so extend that row (if necessary).
# Note that nr < self.nrows means that the cell data
# is not in ascending row order!!
trow = self._cell_types[rowx]
nextra = self.ncols - len(trow)
if nextra > 0:
# self._put_cell_row_widenings += 1
trow.extend(self.bt * nextra)
if self.formatting_info:
self._cell_xf_indexes[rowx].extend(self.bf * nextra)
self._cell_values[rowx].extend([''] * nextra)
else:
scta = self._cell_types.append
scva = self._cell_values.append
scxa = self._cell_xf_indexes.append
fmt_info = self.formatting_info
nc = self.ncols
bt = self.bt
bf = self.bf
for _unused in xrange(self.nrows, nr):
# self._put_cell_rows_appended += 1
scta(bt * nc)
scva([''] * nc)
if fmt_info:
scxa(bf * nc)
self.nrows = nr
# === end of code from extend_cells()
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
# === Methods after this line neither know nor care about how cells are stored.
def read(self, bk):
global rc_stats
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
blah_rows = DEBUG or self.verbosity >= 4
blah_formulas = 0 and blah
r1c1 = 0
oldpos = bk._position
bk._position = self._position
XL_SHRFMLA_ETC_ETC = (
XL_SHRFMLA, XL_ARRAY, XL_TABLEOP, XL_TABLEOP2,
XL_ARRAY2, XL_TABLEOP_B2,
)
self_put_cell = self.put_cell
local_unpack = unpack
bk_get_record_parts = bk.get_record_parts
bv = self.biff_version
fmt_info = self.formatting_info
do_sst_rich_text = fmt_info and bk._rich_text_runlist_map
rowinfo_sharing_dict = {}
txos = {}
eof_found = 0
while 1:
# if DEBUG: print "SHEET.READ: about to read from position %d" % bk._position
rc, data_len, data = bk_get_record_parts()
# if rc in rc_stats:
# rc_stats[rc] += 1
# else:
# rc_stats[rc] = 1
# if DEBUG: print "SHEET.READ: op 0x%04x, %d bytes %r" % (rc, data_len, data)
if rc == XL_NUMBER:
# [:14] in following stmt ignores extraneous rubbish at end of record.
# Sample file testEON-8.xls supplied by Jan Kraus.
rowx, colx, xf_index, d = local_unpack('<HHHd', data[:14])
# if xf_index == 0:
# fprintf(self.logfile,
# "NUMBER: r=%d c=%d xfx=%d %f\n", rowx, colx, xf_index, d)
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_LABELSST:
rowx, colx, xf_index, sstindex = local_unpack('<HHHi', data)
# print "LABELSST", rowx, colx, sstindex, bk._sharedstrings[sstindex]
self_put_cell(rowx, colx, XL_CELL_TEXT, bk._sharedstrings[sstindex], xf_index)
if do_sst_rich_text:
runlist = bk._rich_text_runlist_map.get(sstindex)
if runlist:
self.rich_text_runlist_map[(rowx, colx)] = runlist
elif rc == XL_LABEL:
rowx, colx, xf_index = local_unpack('<HHH', data[0:6])
if bv < BIFF_FIRST_UNICODE:
strg = unpack_string(data, 6, bk.encoding or bk.derive_encoding(), lenlen=2)
else:
strg = unpack_unicode(data, 6, lenlen=2)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
elif rc == XL_RSTRING:
rowx, colx, xf_index = local_unpack('<HHH', data[0:6])
if bv < BIFF_FIRST_UNICODE:
strg, pos = unpack_string_update_pos(data, 6, bk.encoding or bk.derive_encoding(), lenlen=2)
nrt = BYTES_ORD(data[pos])
pos += 1
runlist = []
for _unused in xrange(nrt):
runlist.append(unpack('<BB', data[pos:pos+2]))
pos += 2
assert pos == len(data)
else:
strg, pos = unpack_unicode_update_pos(data, 6, lenlen=2)
nrt = unpack('<H', data[pos:pos+2])[0]
pos += 2
runlist = []
for _unused in xrange(nrt):
runlist.append(unpack('<HH', data[pos:pos+4]))
pos += 4
assert pos == len(data)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
self.rich_text_runlist_map[(rowx, colx)] = runlist
elif rc == XL_RK:
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
d = unpack_RK(data[6:10])
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_MULRK:
mulrk_row, mulrk_first = local_unpack('<HH', data[0:4])
mulrk_last, = local_unpack('<H', data[-2:])
pos = 4
for colx in xrange(mulrk_first, mulrk_last+1):
xf_index, = local_unpack('<H', data[pos:pos+2])
d = unpack_RK(data[pos+2:pos+6])
pos += 6
self_put_cell(mulrk_row, colx, None, d, xf_index)
elif rc == XL_ROW:
# Version 0.6.0a3: ROW records are just not worth using (for memory allocation).
# Version 0.6.1: now used for formatting info.
if not fmt_info: continue
rowx, bits1, bits2 = local_unpack('<H4xH4xi', data[0:16])
if not(0 <= rowx < self.utter_max_rows):
print("*** NOTE: ROW record has row index %d; " \
"should have 0 <= rowx < %d -- record ignored!" \
% (rowx, self.utter_max_rows), file=self.logfile)
continue
key = (bits1, bits2)
r = rowinfo_sharing_dict.get(key)
if r is None:
rowinfo_sharing_dict[key] = r = Rowinfo()
# Using upkbits() is far too slow on a file
# with 30 sheets each with 10K rows :-(
# upkbits(r, bits1, (
# ( 0, 0x7FFF, 'height'),
# (15, 0x8000, 'has_default_height'),
# ))
# upkbits(r, bits2, (
# ( 0, 0x00000007, 'outline_level'),
# ( 4, 0x00000010, 'outline_group_starts_ends'),
# ( 5, 0x00000020, 'hidden'),
# ( 6, 0x00000040, 'height_mismatch'),
# ( 7, 0x00000080, 'has_default_xf_index'),
# (16, 0x0FFF0000, 'xf_index'),
# (28, 0x10000000, 'additional_space_above'),
# (29, 0x20000000, 'additional_space_below'),
# ))
# So:
r.height = bits1 & 0x7fff
r.has_default_height = (bits1 >> 15) & 1
r.outline_level = bits2 & 7
r.outline_group_starts_ends = (bits2 >> 4) & 1
r.hidden = (bits2 >> 5) & 1
r.height_mismatch = (bits2 >> 6) & 1
r.has_default_xf_index = (bits2 >> 7) & 1
r.xf_index = (bits2 >> 16) & 0xfff
r.additional_space_above = (bits2 >> 28) & 1
r.additional_space_below = (bits2 >> 29) & 1
if not r.has_default_xf_index:
r.xf_index = -1
self.rowinfo_map[rowx] = r
if 0 and r.xf_index > -1:
fprintf(self.logfile,
"**ROW %d %d %d\n",
self.number, rowx, r.xf_index)
if blah_rows:
print('ROW', rowx, bits1, bits2, file=self.logfile)
r.dump(self.logfile,
header="--- sh #%d, rowx=%d ---" % (self.number, rowx))
elif rc in XL_FORMULA_OPCODES: # 06, 0206, 0406
# DEBUG = 1
# if DEBUG: print "FORMULA: rc: 0x%04x data: %r" % (rc, data)
if bv >= 50:
rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16])
lenlen = 2
tkarr_offset = 20
elif bv >= 30:
rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16])
lenlen = 2
tkarr_offset = 16
else: # BIFF2
rowx, colx, cell_attr, result_str, flags = local_unpack('<HH3s8sB', data[0:16])
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx)
lenlen = 1
tkarr_offset = 16
if blah_formulas: # testing formula dumper
#### XXXX FIXME
fprintf(self.logfile, "FORMULA: rowx=%d colx=%d\n", rowx, colx)
fmlalen = local_unpack("<H", data[20:22])[0]
decompile_formula(bk, data[22:], fmlalen, FMLA_TYPE_CELL,
browx=rowx, bcolx=colx, blah=1, r1c1=r1c1)
if result_str[6:8] == b"\xFF\xFF":
first_byte = BYTES_ORD(result_str[0])
if first_byte == 0:
# need to read next record (STRING)
gotstring = 0
# if flags & 8:
if 1: # "flags & 8" applies only to SHRFMLA
# actually there's an optional SHRFMLA or ARRAY etc record to skip over
rc2, data2_len, data2 = bk.get_record_parts()
if rc2 == XL_STRING or rc2 == XL_STRING_B2:
gotstring = 1
elif rc2 == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data2[:14])
if blah_formulas:
fprintf(self.logfile, "ARRAY: %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, array_flags)
# dump_formula(bk, data2[14:], tokslen, bv, reldelta=0, blah=1)
elif rc2 == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data2[:10])
if blah_formulas:
fprintf(self.logfile, "SHRFMLA (sub): %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, nfmlas)
decompile_formula(bk, data2[10:], tokslen, FMLA_TYPE_SHARED,
blah=1, browx=rowx, bcolx=colx, r1c1=r1c1)
elif rc2 not in XL_SHRFMLA_ETC_ETC:
raise XLRDError(
"Expected SHRFMLA, ARRAY, TABLEOP* or STRING record; found 0x%04x" % rc2)
# if DEBUG: print "gotstring:", gotstring
# now for the STRING record
if not gotstring:
rc2, _unused_len, data2 = bk.get_record_parts()
if rc2 not in (XL_STRING, XL_STRING_B2):
raise XLRDError("Expected STRING record; found 0x%04x" % rc2)
# if DEBUG: print "STRING: data=%r BIFF=%d cp=%d" % (data2, self.biff_version, bk.encoding)
strg = self.string_record_contents(data2)
self.put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
# if DEBUG: print "FORMULA strg %r" % strg
elif first_byte == 1:
# boolean formula result
value = BYTES_ORD(result_str[2])
self_put_cell(rowx, colx, XL_CELL_BOOLEAN, value, xf_index)
elif first_byte == 2:
# Error in cell
value = BYTES_ORD(result_str[2])
self_put_cell(rowx, colx, XL_CELL_ERROR, value, xf_index)
elif first_byte == 3:
# empty ... i.e. empty (zero-length) string, NOT an empty cell.
self_put_cell(rowx, colx, XL_CELL_TEXT, "", xf_index)
else:
raise XLRDError("unexpected special case (0x%02x) in FORMULA" % first_byte)
else:
# it is a number
d = local_unpack('<d', result_str)[0]
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_BOOLERR:
rowx, colx, xf_index, value, is_err = local_unpack('<HHHBB', data[:8])
# Note OOo Calc 2.0 writes 9-byte BOOLERR records.
# OOo docs say 8. Excel writes 8.
cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err]
# if DEBUG: print "XL_BOOLERR", rowx, colx, xf_index, value, is_err
self_put_cell(rowx, colx, cellty, value, xf_index)
elif rc == XL_COLINFO:
if not fmt_info: continue
c = Colinfo()
first_colx, last_colx, c.width, c.xf_index, flags \
= local_unpack("<HHHHH", data[:10])
#### Colinfo.width is denominated in 256ths of a character,
#### *not* in characters.
if not(0 <= first_colx <= last_colx <= 256):
# Note: 256 instead of 255 is a common mistake.
# We silently ignore the non-existing 257th column in that case.
print("*** NOTE: COLINFO record has first col index %d, last %d; " \
"should have 0 <= first <= last <= 255 -- record ignored!" \
% (first_colx, last_colx), file=self.logfile)
del c
continue
upkbits(c, flags, (
( 0, 0x0001, 'hidden'),
( 1, 0x0002, 'bit1_flag'),
# *ALL* colinfos created by Excel in "default" cases are 0x0002!!
# Maybe it's "locked" by analogy with XFProtection data.
( 8, 0x0700, 'outline_level'),
(12, 0x1000, 'collapsed'),
))
for colx in xrange(first_colx, last_colx+1):
if colx > 255: break # Excel does 0 to 256 inclusive
self.colinfo_map[colx] = c
if 0:
fprintf(self.logfile,
"**COL %d %d %d\n",
self.number, colx, c.xf_index)
if blah:
fprintf(
self.logfile,
"COLINFO sheet #%d cols %d-%d: wid=%d xf_index=%d flags=0x%04x\n",
self.number, first_colx, last_colx, c.width, c.xf_index, flags,
)
c.dump(self.logfile, header='===')
elif rc == XL_DEFCOLWIDTH:
self.defcolwidth, = local_unpack("<H", data[:2])
if 0: print('DEFCOLWIDTH', self.defcolwidth, file=self.logfile)
elif rc == XL_STANDARDWIDTH:
if data_len != 2:
print('*** ERROR *** STANDARDWIDTH', data_len, repr(data), file=self.logfile)
self.standardwidth, = local_unpack("<H", data[:2])
if 0: print('STANDARDWIDTH', self.standardwidth, file=self.logfile)
elif rc == XL_GCW:
if not fmt_info: continue # useless w/o COLINFO
assert data_len == 34
assert data[0:2] == b"\x20\x00"
iguff = unpack("<8i", data[2:34])
gcw = []
for bits in iguff:
for j in xrange(32):
gcw.append(bits & 1)
bits >>= 1
self.gcw = tuple(gcw)
if 0:
showgcw = "".join(map(lambda x: "F "[x], gcw)).rstrip().replace(' ', '.')
print("GCW:", showgcw, file=self.logfile)
elif rc == XL_BLANK:
if not fmt_info: continue
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
# if 0: print >> self.logfile, "BLANK", rowx, colx, xf_index
self_put_cell(rowx, colx, XL_CELL_BLANK, '', xf_index)
elif rc == XL_MULBLANK: # 00BE
if not fmt_info: continue
nitems = data_len >> 1
result = local_unpack("<%dH" % nitems, data)
rowx, mul_first = result[:2]
mul_last = result[-1]
# print >> self.logfile, "MULBLANK", rowx, mul_first, mul_last, data_len, nitems, mul_last + 4 - mul_first
assert nitems == mul_last + 4 - mul_first
pos = 2
for colx in xrange(mul_first, mul_last + 1):
self_put_cell(rowx, colx, XL_CELL_BLANK, '', result[pos])
pos += 1
elif rc == XL_DIMENSION or rc == XL_DIMENSION2:
# if data_len == 10:
# Was crashing on BIFF 4.0 file w/o the two trailing unused bytes.
# Reported by Ralph Heimburger.
if bv < 80:
dim_tuple = local_unpack('<HxxH', data[2:8])
else:
dim_tuple = local_unpack('<ixxH', data[4:12])
self.nrows, self.ncols = 0, 0
self._dimnrows, self._dimncols = dim_tuple
if bv in (21, 30, 40) and self.book.xf_list and not self.book._xf_epilogue_done:
self.book.xf_epilogue()
if blah:
fprintf(self.logfile,
"sheet %d(%r) DIMENSIONS: ncols=%d nrows=%d\n",
self.number, self.name, self._dimncols, self._dimnrows
)
elif rc == XL_HLINK:
self.handle_hlink(data)
elif rc == XL_QUICKTIP:
self.handle_quicktip(data)
elif rc == XL_EOF:
DEBUG = 0
if DEBUG: print("SHEET.READ: EOF", file=self.logfile)
eof_found = 1
break
elif rc == XL_OBJ:
# handle SHEET-level objects; note there's a separate Book.handle_obj
saved_obj = self.handle_obj(data)
if saved_obj: saved_obj_id = saved_obj.id
else: saved_obj_id = None
elif rc == XL_MSO_DRAWING:
self.handle_msodrawingetc(rc, data_len, data)
elif rc == XL_TXO:
txo = self.handle_txo(data)
if txo and saved_obj_id:
txos[saved_obj_id] = txo
saved_obj_id = None
elif rc == XL_NOTE:
self.handle_note(data, txos)
elif rc == XL_FEAT11:
self.handle_feat11(data)
elif rc in bofcodes: ##### EMBEDDED BOF #####
version, boftype = local_unpack('<HH', data[0:4])
if boftype != 0x20: # embedded chart
print("*** Unexpected embedded BOF (0x%04x) at offset %d: version=0x%04x type=0x%04x" \
% (rc, bk._position - data_len - 4, version, boftype), file=self.logfile)
while 1:
code, data_len, data = bk.get_record_parts()
if code == XL_EOF:
break
if DEBUG: print("---> found EOF", file=self.logfile)
elif rc == XL_COUNTRY:
bk.handle_country(data)
elif rc == XL_LABELRANGES:
pos = 0
pos = unpack_cell_range_address_list_update_pos(
self.row_label_ranges, data, pos, bv, addr_size=8,
)
pos = unpack_cell_range_address_list_update_pos(
self.col_label_ranges, data, pos, bv, addr_size=8,
)
assert pos == data_len
elif rc == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data[:14])
if blah_formulas:
print("ARRAY:", row1x, rownx, col1x, colnx, array_flags, file=self.logfile)
# dump_formula(bk, data[14:], tokslen, bv, reldelta=0, blah=1)
elif rc == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data[:10])
if blah_formulas:
print("SHRFMLA (main):", row1x, rownx, col1x, colnx, nfmlas, file=self.logfile)
decompile_formula(bk, data[10:], tokslen, FMLA_TYPE_SHARED,
blah=1, browx=rowx, bcolx=colx, r1c1=r1c1)
elif rc == XL_CONDFMT:
if not fmt_info: continue
assert bv >= 80
num_CFs, needs_recalc, browx1, browx2, bcolx1, bcolx2 = \
unpack("<6H", data[0:12])
if self.verbosity >= 1:
fprintf(self.logfile,
"\n*** WARNING: Ignoring CONDFMT (conditional formatting) record\n" \
"*** in Sheet %d (%r).\n" \
"*** %d CF record(s); needs_recalc_or_redraw = %d\n" \
"*** Bounding box is %s\n",
self.number, self.name, num_CFs, needs_recalc,
rangename2d(browx1, browx2+1, bcolx1, bcolx2+1),
)
olist = [] # updated by the function
pos = unpack_cell_range_address_list_update_pos(
olist, data, 12, bv, addr_size=8)
# print >> self.logfile, repr(result), len(result)
if self.verbosity >= 1:
fprintf(self.logfile,
"*** %d individual range(s):\n" \
"*** %s\n",
len(olist),
", ".join([rangename2d(*coords) for coords in olist]),
)
elif rc == XL_CF:
if not fmt_info: continue
cf_type, cmp_op, sz1, sz2, flags = unpack("<BBHHi", data[0:10])
font_block = (flags >> 26) & 1
bord_block = (flags >> 28) & 1
patt_block = (flags >> 29) & 1
if self.verbosity >= 1:
fprintf(self.logfile,
"\n*** WARNING: Ignoring CF (conditional formatting) sub-record.\n" \
"*** cf_type=%d, cmp_op=%d, sz1=%d, sz2=%d, flags=0x%08x\n" \
"*** optional data blocks: font=%d, border=%d, pattern=%d\n",
cf_type, cmp_op, sz1, sz2, flags,
font_block, bord_block, patt_block,
)
# hex_char_dump(data, 0, data_len, fout=self.logfile)
pos = 12
if font_block:
(font_height, font_options, weight, escapement, underline,
font_colour_index, two_bits, font_esc, font_underl) = \
unpack("<64x i i H H B 3x i 4x i i i 18x", data[pos:pos+118])
font_style = (two_bits > 1) & 1
posture = (font_options > 1) & 1
font_canc = (two_bits > 7) & 1
cancellation = (font_options > 7) & 1
if self.verbosity >= 1:
fprintf(self.logfile,
"*** Font info: height=%d, weight=%d, escapement=%d,\n" \
"*** underline=%d, colour_index=%d, esc=%d, underl=%d,\n" \
"*** style=%d, posture=%d, canc=%d, cancellation=%d\n",
font_height, weight, escapement, underline,
font_colour_index, font_esc, font_underl,
font_style, posture, font_canc, cancellation,
)
pos += 118
if bord_block:
pos += 8
if patt_block:
pos += 4
fmla1 = data[pos:pos+sz1]
pos += sz1
if blah and sz1:
fprintf(self.logfile,
"*** formula 1:\n",
)
dump_formula(bk, fmla1, sz1, bv, reldelta=0, blah=1)
fmla2 = data[pos:pos+sz2]
pos += sz2
assert pos == data_len
if blah and sz2:
fprintf(self.logfile,
"*** formula 2:\n",
)
dump_formula(bk, fmla2, sz2, bv, reldelta=0, blah=1)
elif rc == XL_DEFAULTROWHEIGHT:
if data_len == 4:
bits, self.default_row_height = unpack("<HH", data[:4])
elif data_len == 2:
self.default_row_height, = unpack("<H", data)
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is 2, " \
"should be 4; assuming BIFF2 format\n")
else:
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is %d, " \
"should be 4; ignoring this record\n",
data_len)
self.default_row_height_mismatch = bits & 1
self.default_row_hidden = (bits >> 1) & 1
self.default_additional_space_above = (bits >> 2) & 1
self.default_additional_space_below = (bits >> 3) & 1
elif rc == XL_MERGEDCELLS:
if not fmt_info: continue
pos = unpack_cell_range_address_list_update_pos(
self.merged_cells, data, 0, bv, addr_size=8)
if blah:
fprintf(self.logfile,
"MERGEDCELLS: %d ranges\n", int_floor_div(pos - 2, 8))
assert pos == data_len, \
"MERGEDCELLS: pos=%d data_len=%d" % (pos, data_len)
elif rc == XL_WINDOW2:
if bv >= 80 and data_len >= 14:
(options,
self.first_visible_rowx, self.first_visible_colx,
self.gridline_colour_index,
self.cached_page_break_preview_mag_factor,
self.cached_normal_view_mag_factor
) = unpack("<HHHHxxHH", data[:14])
else:
assert bv >= 30 # BIFF3-7
(options,
self.first_visible_rowx, self.first_visible_colx,
) = unpack("<HHH", data[:6])
self.gridline_colour_rgb = unpack("<BBB", data[6:9])
self.gridline_colour_index = nearest_colour_index(
self.book.colour_map, self.gridline_colour_rgb, debug=0)
self.cached_page_break_preview_mag_factor = 0 # default (60%)
self.cached_normal_view_mag_factor = 0 # default (100%)
# options -- Bit, Mask, Contents:
# 0 0001H 0 = Show formula results 1 = Show formulas
# 1 0002H 0 = Do not show grid lines 1 = Show grid lines
# 2 0004H 0 = Do not show sheet headers 1 = Show sheet headers
# 3 0008H 0 = Panes are not frozen 1 = Panes are frozen (freeze)
# 4 0010H 0 = Show zero values as empty cells 1 = Show zero values
# 5 0020H 0 = Manual grid line colour 1 = Automatic grid line colour
# 6 0040H 0 = Columns from left to right 1 = Columns from right to left
# 7 0080H 0 = Do not show outline symbols 1 = Show outline symbols
# 8 0100H 0 = Keep splits if pane freeze is removed 1 = Remove splits if pane freeze is removed
# 9 0200H 0 = Sheet not selected 1 = Sheet selected (BIFF5-BIFF8)
# 10 0400H 0 = Sheet not visible 1 = Sheet visible (BIFF5-BIFF8)
# 11 0800H 0 = Show in normal view 1 = Show in page break preview (BIFF8)
# The freeze flag specifies, if a following PANE record (6.71) describes unfrozen or frozen panes.
for attr, _unused_defval in _WINDOW2_options:
setattr(self, attr, options & 1)
options >>= 1
elif rc == XL_SCL:
num, den = unpack("<HH", data)
result = 0
if den:
result = int_floor_div(num * 100, den)
if not(10 <= result <= 400):
if DEBUG or self.verbosity >= 0:
print((
"WARNING *** SCL rcd sheet %d: should have 0.1 <= num/den <= 4; got %d/%d"
% (self.number, num, den)
), file=self.logfile)
result = 100
self.scl_mag_factor = result
elif rc == XL_PANE:
(
self.vert_split_pos,
self.horz_split_pos,
self.horz_split_first_visible,
self.vert_split_first_visible,
self.split_active_pane,
) = unpack("<HHHHB", data[:9])
self.has_pane_record = 1
elif rc == XL_HORIZONTALPAGEBREAKS:
if not fmt_info: continue
num_breaks, = local_unpack("<H", data[:2])
assert num_breaks * (2 + 4 * (bv >= 80)) + 2 == data_len
pos = 2
if bv < 80:
while pos < data_len:
self.horizontal_page_breaks.append((local_unpack("<H", data[pos:pos+2])[0], 0, 255))
pos += 2
else:
while pos < data_len:
self.horizontal_page_breaks.append(local_unpack("<HHH", data[pos:pos+6]))
pos += 6
elif rc == XL_VERTICALPAGEBREAKS:
if not fmt_info: continue
num_breaks, = local_unpack("<H", data[:2])
assert num_breaks * (2 + 4 * (bv >= 80)) + 2 == data_len
pos = 2
if bv < 80:
while pos < data_len:
self.vertical_page_breaks.append((local_unpack("<H", data[pos:pos+2])[0], 0, 65535))
pos += 2
else:
while pos < data_len:
self.vertical_page_breaks.append(local_unpack("<HHH", data[pos:pos+6]))
pos += 6
#### all of the following are for BIFF <= 4W
elif bv <= 45:
if rc == XL_FORMAT or rc == XL_FORMAT2:
bk.handle_format(data, rc)
elif rc == XL_FONT or rc == XL_FONT_B3B4:
bk.handle_font(data)
elif rc == XL_STYLE:
if not self.book._xf_epilogue_done:
self.book.xf_epilogue()
bk.handle_style(data)
elif rc == XL_PALETTE:
bk.handle_palette(data)
elif rc == XL_BUILTINFMTCOUNT:
bk.handle_builtinfmtcount(data)
elif rc == XL_XF4 or rc == XL_XF3 or rc == XL_XF2: #### N.B. not XL_XF
bk.handle_xf(data)
elif rc == XL_DATEMODE:
bk.handle_datemode(data)
elif rc == XL_CODEPAGE:
bk.handle_codepage(data)
elif rc == XL_FILEPASS:
bk.handle_filepass(data)
elif rc == XL_WRITEACCESS:
bk.handle_writeaccess(data)
elif rc == XL_IXFE:
self._ixfe = local_unpack('<H', data)[0]
elif rc == XL_NUMBER_B2:
rowx, colx, cell_attr, d = local_unpack('<HH3sd', data)
self_put_cell(rowx, colx, None, d, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_INTEGER:
rowx, colx, cell_attr, d = local_unpack('<HH3sH', data)
self_put_cell(rowx, colx, None, float(d), self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_LABEL_B2:
rowx, colx, cell_attr = local_unpack('<HH3s', data[0:7])
strg = unpack_string(data, 7, bk.encoding or bk.derive_encoding(), lenlen=1)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_BOOLERR_B2:
rowx, colx, cell_attr, value, is_err = local_unpack('<HH3sBB', data)
cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err]
# if DEBUG: print "XL_BOOLERR_B2", rowx, colx, cell_attr, value, is_err
self_put_cell(rowx, colx, cellty, value, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_BLANK_B2:
if not fmt_info: continue
rowx, colx, cell_attr = local_unpack('<HH3s', data[:7])
self_put_cell(rowx, colx, XL_CELL_BLANK, '', self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_EFONT:
bk.handle_efont(data)
elif rc == XL_ROW_B2:
if not fmt_info: continue
rowx, bits1, bits2 = local_unpack('<H4xH2xB', data[0:11])
if not(0 <= rowx < self.utter_max_rows):
print("*** NOTE: ROW_B2 record has row index %d; " \
"should have 0 <= rowx < %d -- record ignored!" \
% (rowx, self.utter_max_rows), file=self.logfile)
continue
if not (bits2 & 1): # has_default_xf_index is false
xf_index = -1
elif data_len == 18:
# Seems the XF index in the cell_attr is dodgy
xfx = local_unpack('<H', data[16:18])[0]
xf_index = self.fixed_BIFF2_xfindex(cell_attr=None, rowx=rowx, colx=-1, true_xfx=xfx)
else:
cell_attr = data[13:16]
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx=-1)
key = (bits1, bits2, xf_index)
r = rowinfo_sharing_dict.get(key)
if r is None:
rowinfo_sharing_dict[key] = r = Rowinfo()
r.height = bits1 & 0x7fff
r.has_default_height = (bits1 >> 15) & 1
r.has_default_xf_index = bits2 & 1
r.xf_index = xf_index
# r.outline_level = 0 # set in __init__
# r.outline_group_starts_ends = 0 # set in __init__
# r.hidden = 0 # set in __init__
# r.height_mismatch = 0 # set in __init__
# r.additional_space_above = 0 # set in __init__
# r.additional_space_below = 0 # set in __init__
self.rowinfo_map[rowx] = r
if 0 and r.xf_index > -1:
fprintf(self.logfile,
"**ROW %d %d %d\n",
self.number, rowx, r.xf_index)
if blah_rows:
print('ROW_B2', rowx, bits1, has_defaults, file=self.logfile)
r.dump(self.logfile,
header="--- sh #%d, rowx=%d ---" % (self.number, rowx))
elif rc == XL_COLWIDTH: # BIFF2 only
if not fmt_info: continue
first_colx, last_colx, width\
= local_unpack("<BBH", data[:4])
if not(first_colx <= last_colx):
print("*** NOTE: COLWIDTH record has first col index %d, last %d; " \
"should have first <= last -- record ignored!" \
% (first_colx, last_colx), file=self.logfile)
continue
for colx in xrange(first_colx, last_colx+1):
if colx in self.colinfo_map:
c = self.colinfo_map[colx]
else:
c = Colinfo()
self.colinfo_map[colx] = c
c.width = width
if blah:
fprintf(
self.logfile,
"COLWIDTH sheet #%d cols %d-%d: wid=%d\n",
self.number, first_colx, last_colx, width
)
elif rc == XL_COLUMNDEFAULT: # BIFF2 only
if not fmt_info: continue
first_colx, last_colx = local_unpack("<HH", data[:4])
#### Warning OOo docs wrong; first_colx <= colx < last_colx
if blah:
fprintf(
self.logfile,
"COLUMNDEFAULT sheet #%d cols in range(%d, %d)\n",
self.number, first_colx, last_colx
)
if not(0 <= first_colx < last_colx <= 256):
print("*** NOTE: COLUMNDEFAULT record has first col index %d, last %d; " \
"should have 0 <= first < last <= 256" \
% (first_colx, last_colx), file=self.logfile)
last_colx = min(last_colx, 256)
for colx in xrange(first_colx, last_colx):
offset = 4 + 3 * (colx - first_colx)
cell_attr = data[offset:offset+3]
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx=-1, colx=colx)
if colx in self.colinfo_map:
c = self.colinfo_map[colx]
else:
c = Colinfo()
self.colinfo_map[colx] = c
c.xf_index = xf_index
elif rc == XL_WINDOW2_B2: # BIFF 2 only
attr_names = ("show_formulas", "show_grid_lines", "show_sheet_headers",
"panes_are_frozen", "show_zero_values")
for attr, char in zip(attr_names, data[0:5]):
setattr(self, attr, int(char != b'\0'))
(self.first_visible_rowx, self.first_visible_colx,
self.automatic_grid_line_colour,
) = unpack("<HHB", data[5:10])
self.gridline_colour_rgb = unpack("<BBB", data[10:13])
self.gridline_colour_index = nearest_colour_index(
self.book.colour_map, self.gridline_colour_rgb, debug=0)
self.cached_page_break_preview_mag_factor = 0 # default (60%)
self.cached_normal_view_mag_factor = 0 # default (100%)
else:
# if DEBUG: print "SHEET.READ: Unhandled record type %02x %d bytes %r" % (rc, data_len, data)
pass
if not eof_found:
raise XLRDError("Sheet %d (%r) missing EOF record" \
% (self.number, self.name))
self.tidy_dimensions()
self.update_cooked_mag_factors()
bk._position = oldpos
return 1
def string_record_contents(self, data):
bv = self.biff_version
bk = self.book
lenlen = (bv >= 30) + 1
nchars_expected = unpack("<" + "BH"[lenlen - 1], data[:lenlen])[0]
offset = lenlen
if bv < 80:
enc = bk.encoding or bk.derive_encoding()
nchars_found = 0
result = UNICODE_LITERAL("")
while 1:
if bv >= 80:
flag = BYTES_ORD(data[offset]) & 1
enc = ("latin_1", "utf_16_le")[flag]
offset += 1
chunk = unicode(data[offset:], enc)
result += chunk
nchars_found += len(chunk)
if nchars_found == nchars_expected:
return result
if nchars_found > nchars_expected:
msg = ("STRING/CONTINUE: expected %d chars, found %d"
% (nchars_expected, nchars_found))
raise XLRDError(msg)
rc, _unused_len, data = bk.get_record_parts()
if rc != XL_CONTINUE:
raise XLRDError(
"Expected CONTINUE record; found record-type 0x%04X" % rc)
offset = 0
def update_cooked_mag_factors(self):
# Cached values are used ONLY for the non-active view mode.
# When the user switches to the non-active view mode,
# if the cached value for that mode is not valid,
# Excel pops up a window which says:
# "The number must be between 10 and 400. Try again by entering a number in this range."
# When the user hits OK, it drops into the non-active view mode
# but uses the magn from the active mode.
# NOTE: definition of "valid" depends on mode ... see below
blah = DEBUG or self.verbosity > 0
if self.show_in_page_break_preview:
if self.scl_mag_factor is None: # no SCL record
self.cooked_page_break_preview_mag_factor = 100 # Yes, 100, not 60, NOT a typo
else:
self.cooked_page_break_preview_mag_factor = self.scl_mag_factor
zoom = self.cached_normal_view_mag_factor
if not (10 <= zoom <=400):
if blah:
print((
"WARNING *** WINDOW2 rcd sheet %d: Bad cached_normal_view_mag_factor: %d"
% (self.number, self.cached_normal_view_mag_factor)
), file=self.logfile)
zoom = self.cooked_page_break_preview_mag_factor
self.cooked_normal_view_mag_factor = zoom
else:
# normal view mode
if self.scl_mag_factor is None: # no SCL record
self.cooked_normal_view_mag_factor = 100
else:
self.cooked_normal_view_mag_factor = self.scl_mag_factor
zoom = self.cached_page_break_preview_mag_factor
if zoom == 0:
# VALID, defaults to 60
zoom = 60
elif not (10 <= zoom <= 400):
if blah:
print((
"WARNING *** WINDOW2 rcd sheet %r: Bad cached_page_break_preview_mag_factor: %r"
% (self.number, self.cached_page_break_preview_mag_factor)
), file=self.logfile)
zoom = self.cooked_normal_view_mag_factor
self.cooked_page_break_preview_mag_factor = zoom
def fixed_BIFF2_xfindex(self, cell_attr, rowx, colx, true_xfx=None):
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
if self.biff_version == 21:
if self.book.xf_list:
if true_xfx is not None:
xfx = true_xfx
else:
xfx = BYTES_ORD(cell_attr[0]) & 0x3F
if xfx == 0x3F:
if self._ixfe is None:
raise XLRDError("BIFF2 cell record has XF index 63 but no preceding IXFE record.")
xfx = self._ixfe
# OOo docs are capable of interpretation that each
# cell record is preceded immediately by its own IXFE record.
# Empirical evidence is that (sensibly) an IXFE record applies to all
# following cell records until another IXFE comes along.
return xfx
# Have either Excel 2.0, or broken 2.1 w/o XF records -- same effect.
self.biff_version = self.book.biff_version = 20
#### check that XF slot in cell_attr is zero
xfx_slot = BYTES_ORD(cell_attr[0]) & 0x3F
assert xfx_slot == 0
xfx = self._cell_attr_to_xfx.get(cell_attr)
if xfx is not None:
return xfx
if blah:
fprintf(self.logfile, "New cell_attr %r at (%r, %r)\n", cell_attr, rowx, colx)
if not self.book.xf_list:
for xfx in xrange(16):
self.insert_new_BIFF20_xf(cell_attr=b"\x40\x00\x00", style=xfx < 15)
xfx = self.insert_new_BIFF20_xf(cell_attr=cell_attr)
return xfx
def insert_new_BIFF20_xf(self, cell_attr, style=0):
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
book = self.book
xfx = len(book.xf_list)
xf = self.fake_XF_from_BIFF20_cell_attr(cell_attr, style)
xf.xf_index = xfx
book.xf_list.append(xf)
if blah:
xf.dump(self.logfile, header="=== Faked XF %d ===" % xfx, footer="======")
if xf.format_key not in book.format_map:
if xf.format_key:
msg = "ERROR *** XF[%d] unknown format key (%d, 0x%04x)\n"
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
fmt = Format(xf.format_key, FUN, UNICODE_LITERAL("General"))
book.format_map[xf.format_key] = fmt
book.format_list.append(fmt)
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
fmt = book.format_map[xf.format_key]
cellty = cellty_from_fmtty[fmt.type]
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
self._cell_attr_to_xfx[cell_attr] = xfx
return xfx
def fake_XF_from_BIFF20_cell_attr(self, cell_attr, style=0):
from .formatting import XF, XFAlignment, XFBorder, XFBackground, XFProtection
xf = XF()
xf.alignment = XFAlignment()
xf.alignment.indent_level = 0
xf.alignment.shrink_to_fit = 0
xf.alignment.text_direction = 0
xf.border = XFBorder()
xf.border.diag_up = 0
xf.border.diag_down = 0
xf.border.diag_colour_index = 0
xf.border.diag_line_style = 0 # no line
xf.background = XFBackground()
xf.protection = XFProtection()
(prot_bits, font_and_format, halign_etc) = unpack('<BBB', cell_attr)
xf.format_key = font_and_format & 0x3F
xf.font_index = (font_and_format & 0xC0) >> 6
upkbits(xf.protection, prot_bits, (
(6, 0x40, 'cell_locked'),
(7, 0x80, 'formula_hidden'),
))
xf.alignment.hor_align = halign_etc & 0x07
for mask, side in ((0x08, 'left'), (0x10, 'right'), (0x20, 'top'), (0x40, 'bottom')):
if halign_etc & mask:
colour_index, line_style = 8, 1 # black, thin
else:
colour_index, line_style = 0, 0 # none, none
setattr(xf.border, side + '_colour_index', colour_index)
setattr(xf.border, side + '_line_style', line_style)
bg = xf.background
if halign_etc & 0x80:
bg.fill_pattern = 17
else:
bg.fill_pattern = 0
bg.background_colour_index = 9 # white
bg.pattern_colour_index = 8 # black
xf.parent_style_index = (0x0FFF, 0)[style]
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, 1)
return xf
def req_fmt_info(self):
if not self.formatting_info:
raise XLRDError("Feature requires open_workbook(..., formatting_info=True)")
##
# Determine column display width.
# <br /> -- New in version 0.6.1
# <br />
# @param colx Index of the queried column, range 0 to 255.
# Note that it is possible to find out the width that will be used to display
# columns with no cell information e.g. column IV (colx=255).
# @return The column width that will be used for displaying
# the given column by Excel, in units of 1/256th of the width of a
# standard character (the digit zero in the first font).
def computed_column_width(self, colx):
self.req_fmt_info()
if self.biff_version >= 80:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
if self.standardwidth is not None:
return self.standardwidth
elif self.biff_version >= 40:
if self.gcw[colx]:
if self.standardwidth is not None:
return self.standardwidth
else:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
elif self.biff_version == 30:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
# All roads lead to Rome and the DEFCOLWIDTH ...
if self.defcolwidth is not None:
return self.defcolwidth * 256
return 8 * 256 # 8 is what Excel puts in a DEFCOLWIDTH record
def handle_hlink(self, data):
# DEBUG = 1
if DEBUG: print("\n=== hyperlink ===", file=self.logfile)
record_size = len(data)
h = Hyperlink()
h.frowx, h.lrowx, h.fcolx, h.lcolx, guid0, dummy, options = unpack('<HHHH16s4si', data[:32])
assert guid0 == b"\xD0\xC9\xEA\x79\xF9\xBA\xCE\x11\x8C\x82\x00\xAA\x00\x4B\xA9\x0B"
assert dummy == b"\x02\x00\x00\x00"
if DEBUG: print("options: %08X" % options, file=self.logfile)
offset = 32
def get_nul_terminated_unicode(buf, ofs):
nb = unpack('<L', buf[ofs:ofs+4])[0] * 2
ofs += 4
uc = unicode(buf[ofs:ofs+nb], 'UTF-16le')[:-1]
ofs += nb
return uc, ofs
if options & 0x14: # has a description
h.desc, offset = get_nul_terminated_unicode(data, offset)
if options & 0x80: # has a target
h.target, offset = get_nul_terminated_unicode(data, offset)
if (options & 1) and not (options & 0x100): # HasMoniker and not MonikerSavedAsString
# an OLEMoniker structure
clsid, = unpack('<16s', data[offset:offset + 16])
if DEBUG: print("clsid=%r" %clsid, file=self.logfile)
offset += 16
if clsid == b"\xE0\xC9\xEA\x79\xF9\xBA\xCE\x11\x8C\x82\x00\xAA\x00\x4B\xA9\x0B":
# E0H C9H EAH 79H F9H BAH CEH 11H 8CH 82H 00H AAH 00H 4BH A9H 0BH
# URL Moniker
h.type = UNICODE_LITERAL('url')
nbytes = unpack('<L', data[offset:offset + 4])[0]
offset += 4
h.url_or_path = unicode(data[offset:offset + nbytes], 'UTF-16le')
if DEBUG: print("initial url=%r len=%d" % (h.url_or_path, len(h.url_or_path)), file=self.logfile)
endpos = h.url_or_path.find('\x00')
if DEBUG: print("endpos=%d" % endpos, file=self.logfile)
h.url_or_path = h.url_or_path[:endpos]
true_nbytes = 2 * (endpos + 1)
offset += true_nbytes
extra_nbytes = nbytes - true_nbytes
extra_data = data[offset:offset + extra_nbytes]
offset += extra_nbytes
if DEBUG: print("url=%r" % h.url_or_path, file=self.logfile)
if DEBUG: print("extra=%r" % extra_data, file=self.logfile)
if DEBUG: print("nbytes=%d true_nbytes=%d extra_nbytes=%d" % (nbytes, true_nbytes, extra_nbytes), file=self.logfile)
assert extra_nbytes in (24, 0)
elif clsid == b"\x03\x03\x00\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46":
# file moniker
h.type = UNICODE_LITERAL('local file')
uplevels, nbytes = unpack("<Hi", data[offset:offset + 6])
offset += 6
shortpath = b"..\\" * uplevels + data[offset:offset + nbytes - 1] #### BYTES, not unicode
if DEBUG: print("uplevels=%d shortpath=%r" % (uplevels, shortpath), file=self.logfile)
offset += nbytes
offset += 24 # OOo: "unknown byte sequence"
# above is version 0xDEAD + 20 reserved zero bytes
sz = unpack('<i', data[offset:offset + 4])[0]
if DEBUG: print("sz=%d" % sz, file=self.logfile)
offset += 4
if sz:
xl = unpack('<i', data[offset:offset + 4])[0]
offset += 4
offset += 2 # "unknown byte sequence" MS: 0x0003
extended_path = unicode(data[offset:offset + xl], 'UTF-16le') # not zero-terminated
offset += xl
h.url_or_path = extended_path
else:
h.url_or_path = shortpath
#### MS KLUDGE WARNING ####
# The "shortpath" is bytes encoded in the **UNKNOWN** creator's "ANSI" encoding.
else:
print("*** unknown clsid %r" % clsid, file=self.logfile)
elif options & 0x163 == 0x103: # UNC
h.type = UNICODE_LITERAL('unc')
h.url_or_path, offset = get_nul_terminated_unicode(data, offset)
elif options & 0x16B == 8:
h.type = UNICODE_LITERAL('workbook')
else:
h.type = UNICODE_LITERAL('unknown')
if options & 0x8: # has textmark
h.textmark, offset = get_nul_terminated_unicode(data, offset)
assert offset == record_size
if DEBUG: h.dump(header="... object dump ...")
self.hyperlink_list.append(h)
for rowx in xrange(h.frowx, h.lrowx+1):
for colx in xrange(h.fcolx, h.lcolx+1):
self.hyperlink_map[rowx, colx] = h
def handle_quicktip(self, data):
rcx, frowx, lrowx, fcolx, lcolx = unpack('<5H', data[:10])
assert rcx == XL_QUICKTIP
assert self.hyperlink_list
h = self.hyperlink_list[-1]
assert (frowx, lrowx, fcolx, lcolx) == (h.frowx, h.lrowx, h.fcolx, h.lcolx)
assert data[-2:] == b'\x00\x00'
h.quicktip = unicode(data[10:-2], 'utf_16_le')
def handle_msodrawingetc(self, recid, data_len, data):
if not OBJ_MSO_DEBUG:
return
DEBUG = 1
if self.biff_version < 80:
return
o = MSODrawing()
pos = 0
while pos < data_len:
tmp, fbt, cb = unpack('<HHI', data[pos:pos+8])
ver = tmp & 0xF
inst = (tmp >> 4) & 0xFFF
if ver == 0xF:
ndb = 0 # container
else:
ndb = cb
if DEBUG:
hex_char_dump(data, pos, ndb + 8, base=0, fout=self.logfile)
fprintf(self.logfile,
"fbt:0x%04X inst:%d ver:0x%X cb:%d (0x%04X)\n",
fbt, inst, ver, cb, cb)
if fbt == 0xF010: # Client Anchor
assert ndb == 18
(o.anchor_unk,
o.anchor_colx_lo, o.anchor_rowx_lo,
o.anchor_colx_hi, o.anchor_rowx_hi) = unpack('<Hiiii', data[pos+8:pos+8+ndb])
elif fbt == 0xF011: # Client Data
# must be followed by an OBJ record
assert cb == 0
assert pos + 8 == data_len
else:
pass
pos += ndb + 8
else:
# didn't break out of while loop
assert pos == data_len
if DEBUG:
o.dump(self.logfile, header="=== MSODrawing ===", footer= " ")
def handle_obj(self, data):
if self.biff_version < 80:
return None
o = MSObj()
data_len = len(data)
pos = 0
if OBJ_MSO_DEBUG:
fprintf(self.logfile, "... OBJ record len=%d...\n", data_len)
while pos < data_len:
ft, cb = unpack('<HH', data[pos:pos+4])
if OBJ_MSO_DEBUG:
fprintf(self.logfile, "pos=%d ft=0x%04X cb=%d\n", pos, ft, cb)
hex_char_dump(data, pos, cb + 4, base=0, fout=self.logfile)
if pos == 0 and not (ft == 0x15 and cb == 18):
if self.verbosity:
fprintf(self.logfile, "*** WARNING Ignoring antique or corrupt OBJECT record\n")
return None
if ft == 0x15: # ftCmo ... s/b first
assert pos == 0
o.type, o.id, option_flags = unpack('<HHH', data[pos+4:pos+10])
upkbits(o, option_flags, (
( 0, 0x0001, 'locked'),
( 4, 0x0010, 'printable'),
( 8, 0x0100, 'autofilter'), # not documented in Excel 97 dev kit
( 9, 0x0200, 'scrollbar_flag'), # not documented in Excel 97 dev kit
(13, 0x2000, 'autofill'),
(14, 0x4000, 'autoline'),
))
elif ft == 0x00:
if data[pos:data_len] == b'\0' * (data_len - pos):
# ignore "optional reserved" data at end of record
break
msg = "Unexpected data at end of OBJECT record"
fprintf(self.logfile, "*** ERROR %s\n" % msg)
hex_char_dump(data, pos, data_len - pos, base=0, fout=self.logfile)
raise XLRDError(msg)
elif ft == 0x0C: # Scrollbar
values = unpack('<5H', data[pos+8:pos+18])
for value, tag in zip(values, ('value', 'min', 'max', 'inc', 'page')):
setattr(o, 'scrollbar_' + tag, value)
elif ft == 0x0D: # "Notes structure" [used for cell comments]
# not documented in Excel 97 dev kit
if OBJ_MSO_DEBUG: fprintf(self.logfile, "*** OBJ record has ft==0x0D 'notes' structure\n")
elif ft == 0x13: # list box data
if o.autofilter: # non standard exit. NOT documented
break
else:
pass
pos += cb + 4
else:
# didn't break out of while loop
pass
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== MSOBj ===", footer= " ")
return o
def handle_note(self, data, txos):
if OBJ_MSO_DEBUG:
fprintf(self.logfile, '... NOTE record ...\n')
hex_char_dump(data, 0, len(data), base=0, fout=self.logfile)
o = Note()
data_len = len(data)
if self.biff_version < 80:
o.rowx, o.colx, expected_bytes = unpack('<HHH', data[:6])
nb = len(data) - 6
assert nb <= expected_bytes
pieces = [data[6:]]
expected_bytes -= nb
while expected_bytes > 0:
rc2, data2_len, data2 = self.book.get_record_parts()
assert rc2 == XL_NOTE
dummy_rowx, nb = unpack('<H2xH', data2[:6])
assert dummy_rowx == 0xFFFF
assert nb == data2_len - 6
pieces.append(data2[6:])
expected_bytes -= nb
assert expected_bytes == 0
enc = self.book.encoding or self.book.derive_encoding()
o.text = unicode(b''.join(pieces), enc)
o.rich_text_runlist = [(0, 0)]
o.show = 0
o.row_hidden = 0
o.col_hidden = 0
o.author = UNICODE_LITERAL('')
o._object_id = None
self.cell_note_map[o.rowx, o.colx] = o
return
# Excel 8.0+
o.rowx, o.colx, option_flags, o._object_id = unpack('<4H', data[:8])
o.show = (option_flags >> 1) & 1
o.row_hidden = (option_flags >> 7) & 1
o.col_hidden = (option_flags >> 8) & 1
# XL97 dev kit book says NULL [sic] bytes padding between string count and string data
# to ensure that string is word-aligned. Appears to be nonsense.
o.author, endpos = unpack_unicode_update_pos(data, 8, lenlen=2)
# There is a random/undefined byte after the author string (not counted in the
# string length).
# Issue 4 on github: Google Spreadsheet doesn't write the undefined byte.
assert (data_len - endpos) in (0, 1)
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== Note ===", footer= " ")
txo = txos.get(o._object_id)
if txo:
o.text = txo.text
o.rich_text_runlist = txo.rich_text_runlist
self.cell_note_map[o.rowx, o.colx] = o
def handle_txo(self, data):
if self.biff_version < 80:
return
o = MSTxo()
data_len = len(data)
fmt = '<HH6sHHH'
fmtsize = calcsize(fmt)
option_flags, o.rot, controlInfo, cchText, cbRuns, o.ifntEmpty = unpack(fmt, data[:fmtsize])
o.fmla = data[fmtsize:]
upkbits(o, option_flags, (
( 3, 0x000E, 'horz_align'),
( 6, 0x0070, 'vert_align'),
( 9, 0x0200, 'lock_text'),
(14, 0x4000, 'just_last'),
(15, 0x8000, 'secret_edit'),
))
totchars = 0
o.text = UNICODE_LITERAL('')
while totchars < cchText:
rc2, data2_len, data2 = self.book.get_record_parts()
assert rc2 == XL_CONTINUE
if OBJ_MSO_DEBUG:
hex_char_dump(data2, 0, data2_len, base=0, fout=self.logfile)
nb = BYTES_ORD(data2[0]) # 0 means latin1, 1 means utf_16_le
nchars = data2_len - 1
if nb:
assert nchars % 2 == 0
nchars /= 2
utext, endpos = unpack_unicode_update_pos(data2, 0, known_len=nchars)
assert endpos == data2_len
o.text += utext
totchars += nchars
o.rich_text_runlist = []
totruns = 0
while totruns < cbRuns: # counts of BYTES, not runs
rc3, data3_len, data3 = self.book.get_record_parts()
# print totruns, cbRuns, rc3, data3_len, repr(data3)
assert rc3 == XL_CONTINUE
assert data3_len % 8 == 0
for pos in xrange(0, data3_len, 8):
run = unpack('<HH4x', data3[pos:pos+8])
o.rich_text_runlist.append(run)
totruns += 8
# remove trailing entries that point to the end of the string
while o.rich_text_runlist and o.rich_text_runlist[-1][0] == cchText:
del o.rich_text_runlist[-1]
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== MSTxo ===", footer= " ")
print(o.rich_text_runlist, file=self.logfile)
return o
def handle_feat11(self, data):
if not OBJ_MSO_DEBUG:
return
# rt: Record type; this matches the BIFF rt in the first two bytes of the record; =0872h
# grbitFrt: FRT cell reference flag (see table below for details)
# Ref0: Range reference to a worksheet cell region if grbitFrt=1 (bitFrtRef). Otherwise blank.
# isf: Shared feature type index =5 for Table
# fHdr: =0 since this is for feat not feat header
# reserved0: Reserved for future use =0 for Table
# cref: Count of ref ranges this feature is on
# cbFeatData: Count of byte for the current feature data.
# reserved1: =0 currently not used
# Ref1: Repeat of Ref0. UNDOCUMENTED
rt, grbitFrt, Ref0, isf, fHdr, reserved0, cref, cbFeatData, reserved1, Ref1 = unpack('<HH8sHBiHiH8s', data[0:35])
assert reserved0 == 0
assert reserved1 == 0
assert isf == 5
assert rt == 0x872
assert fHdr == 0
assert Ref1 == Ref0
print("FEAT11: grbitFrt=%d Ref0=%r cref=%d cbFeatData=%d" % (grbitFrt, Ref0, cref, cbFeatData), file=self.logfile)
# lt: Table data source type:
# =0 for Excel Worksheet Table =1 for read-write SharePoint linked List
# =2 for XML mapper Table =3 for Query Table
# idList: The ID of the Table (unique per worksheet)
# crwHeader: How many header/title rows the Table has at the top
# crwTotals: How many total rows the Table has at the bottom
# idFieldNext: Next id to try when assigning a unique id to a new field
# cbFSData: The size of the Fixed Data portion of the Table data structure.
# rupBuild: the rupBuild that generated the record
# unusedShort: UNUSED short that can be used later. The value is reserved during round-tripping.
# listFlags: Collection of bit flags: (see listFlags' bit setting table below for detail.)
# lPosStmCache: Table data stream position of cached data
# cbStmCache: Count of bytes of cached data
# cchStmCache: Count of characters of uncompressed cached data in the stream
# lem: Table edit mode (see List (Table) Editing Mode (lem) setting table below for details.)
# rgbHashParam: Hash value for SharePoint Table
# cchName: Count of characters in the Table name string rgbName
(lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData,
rupBuild, unusedShort, listFlags, lPosStmCache, cbStmCache,
cchStmCache, lem, rgbHashParam, cchName) = unpack('<iiiiiiHHiiiii16sH', data[35:35+66])
print("lt=%d idList=%d crwHeader=%d crwTotals=%d idFieldNext=%d cbFSData=%d\n"\
"rupBuild=%d unusedShort=%d listFlags=%04X lPosStmCache=%d cbStmCache=%d\n"\
"cchStmCache=%d lem=%d rgbHashParam=%r cchName=%d" % (
lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData,
rupBuild, unusedShort,listFlags, lPosStmCache, cbStmCache,
cchStmCache, lem, rgbHashParam, cchName), file=self.logfile)
class MSODrawing(BaseObject):
pass
class MSObj(BaseObject):
pass
class MSTxo(BaseObject):
pass
##
# <p> Represents a user "comment" or "note".
# Note objects are accessible through Sheet.{@link #Sheet.cell_note_map}.
# <br />-- New in version 0.7.2
# </p>
class Note(BaseObject):
##
# Author of note
author = UNICODE_LITERAL('')
##
# True if the containing column is hidden
col_hidden = 0
##
# Column index
colx = 0
##
# List of (offset_in_string, font_index) tuples.
# Unlike Sheet.{@link #Sheet.rich_text_runlist_map}, the first offset should always be 0.
rich_text_runlist = None
##
# True if the containing row is hidden
row_hidden = 0
##
# Row index
rowx = 0
##
# True if note is always shown
show = 0
##
# Text of the note
text = UNICODE_LITERAL('')
##
# <p>Contains the attributes of a hyperlink.
# Hyperlink objects are accessible through Sheet.{@link #Sheet.hyperlink_list}
# and Sheet.{@link #Sheet.hyperlink_map}.
# <br />-- New in version 0.7.2
# </p>
class Hyperlink(BaseObject):
##
# Index of first row
frowx = None
##
# Index of last row
lrowx = None
##
# Index of first column
fcolx = None
##
# Index of last column
lcolx = None
##
# Type of hyperlink. Unicode string, one of 'url', 'unc',
# 'local file', 'workbook', 'unknown'
type = None
##
# The URL or file-path, depending in the type. Unicode string, except
# in the rare case of a local but non-existent file with non-ASCII
# characters in the name, in which case only the "8.3" filename is available,
# as a bytes (3.x) or str (2.x) string, <i>with unknown encoding.</i>
url_or_path = None
##
# Description ... this is displayed in the cell,
# and should be identical to the cell value. Unicode string, or None. It seems
# impossible NOT to have a description created by the Excel UI.
desc = None
##
# Target frame. Unicode string. Note: I have not seen a case of this.
# It seems impossible to create one in the Excel UI.
target = None
##
# "Textmark": the piece after the "#" in
# "http://docs.python.org/library#struct_module", or the Sheet1!A1:Z99
# part when type is "workbook".
textmark = None
##
# The text of the "quick tip" displayed when the cursor
# hovers over the hyperlink.
quicktip = None
# === helpers ===
def unpack_RK(rk_str):
flags = BYTES_ORD(rk_str[0])
if flags & 2:
# There's a SIGNED 30-bit integer in there!
i, = unpack('<i', rk_str)
i >>= 2 # div by 4 to drop the 2 flag bits
if flags & 1:
return i / 100.0
return float(i)
else:
# It's the most significant 30 bits of an IEEE 754 64-bit FP number
d, = unpack('<d', b'\0\0\0\0' + BYTES_LITERAL(chr(flags & 252)) + rk_str[1:4])
if flags & 1:
return d / 100.0
return d
##### =============== Cell ======================================== #####
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
ctype_text = {
XL_CELL_EMPTY: 'empty',
XL_CELL_TEXT: 'text',
XL_CELL_NUMBER: 'number',
XL_CELL_DATE: 'xldate',
XL_CELL_BOOLEAN: 'bool',
XL_CELL_ERROR: 'error',
XL_CELL_BLANK: 'blank',
}
##
# <p>Contains the data for one cell.</p>
#
# <p>WARNING: You don't call this class yourself. You access Cell objects
# via methods of the {@link #Sheet} object(s) that you found in the {@link #Book} object that
# was returned when you called xlrd.open_workbook("myfile.xls").</p>
# <p> Cell objects have three attributes: <i>ctype</i> is an int, <i>value</i>
# (which depends on <i>ctype</i>) and <i>xf_index</i>.
# If "formatting_info" is not enabled when the workbook is opened, xf_index will be None.
# The following table describes the types of cells and how their values
# are represented in Python.</p>
#
# <table border="1" cellpadding="7">
# <tr>
# <th>Type symbol</th>
# <th>Type number</th>
# <th>Python value</th>
# </tr>
# <tr>
# <td>XL_CELL_EMPTY</td>
# <td align="center">0</td>
# <td>empty string u''</td>
# </tr>
# <tr>
# <td>XL_CELL_TEXT</td>
# <td align="center">1</td>
# <td>a Unicode string</td>
# </tr>
# <tr>
# <td>XL_CELL_NUMBER</td>
# <td align="center">2</td>
# <td>float</td>
# </tr>
# <tr>
# <td>XL_CELL_DATE</td>
# <td align="center">3</td>
# <td>float</td>
# </tr>
# <tr>
# <td>XL_CELL_BOOLEAN</td>
# <td align="center">4</td>
# <td>int; 1 means TRUE, 0 means FALSE</td>
# </tr>
# <tr>
# <td>XL_CELL_ERROR</td>
# <td align="center">5</td>
# <td>int representing internal Excel codes; for a text representation,
# refer to the supplied dictionary error_text_from_code</td>
# </tr>
# <tr>
# <td>XL_CELL_BLANK</td>
# <td align="center">6</td>
# <td>empty string u''. Note: this type will appear only when
# open_workbook(..., formatting_info=True) is used.</td>
# </tr>
# </table>
#<p></p>
class Cell(BaseObject):
__slots__ = ['ctype', 'value', 'xf_index']
def __init__(self, ctype, value, xf_index=None):
self.ctype = ctype
self.value = value
self.xf_index = xf_index
def __repr__(self):
if self.xf_index is None:
return "%s:%r" % (ctype_text[self.ctype], self.value)
else:
return "%s:%r (XF:%r)" % (ctype_text[self.ctype], self.value, self.xf_index)
##
# There is one and only one instance of an empty cell -- it's a singleton. This is it.
# You may use a test like "acell is empty_cell".
empty_cell = Cell(XL_CELL_EMPTY, '')
##### =============== Colinfo and Rowinfo ============================== #####
##
# Width and default formatting information that applies to one or
# more columns in a sheet. Derived from COLINFO records.
#
# <p> Here is the default hierarchy for width, according to the OOo docs:
#
# <br />"""In BIFF3, if a COLINFO record is missing for a column,
# the width specified in the record DEFCOLWIDTH is used instead.
#
# <br />In BIFF4-BIFF7, the width set in this [COLINFO] record is only used,
# if the corresponding bit for this column is cleared in the GCW
# record, otherwise the column width set in the DEFCOLWIDTH record
# is used (the STANDARDWIDTH record is always ignored in this case [see footnote!]).
#
# <br />In BIFF8, if a COLINFO record is missing for a column,
# the width specified in the record STANDARDWIDTH is used.
# If this [STANDARDWIDTH] record is also missing,
# the column width of the record DEFCOLWIDTH is used instead."""
# <br />
#
# Footnote: The docs on the GCW record say this:
# """<br />
# If a bit is set, the corresponding column uses the width set in the STANDARDWIDTH
# record. If a bit is cleared, the corresponding column uses the width set in the
# COLINFO record for this column.
# <br />If a bit is set, and the worksheet does not contain the STANDARDWIDTH record, or if
# the bit is cleared, and the worksheet does not contain the COLINFO record, the DEFCOLWIDTH
# record of the worksheet will be used instead.
# <br />"""<br />
# At the moment (2007-01-17) xlrd is going with the GCW version of the story.
# Reference to the source may be useful: see the computed_column_width(colx) method
# of the Sheet class.
# <br />-- New in version 0.6.1
# </p>
class Colinfo(BaseObject):
##
# Width of the column in 1/256 of the width of the zero character,
# using default font (first FONT record in the file).
width = 0
##
# XF index to be used for formatting empty cells.
xf_index = -1
##
# 1 = column is hidden
hidden = 0
##
# Value of a 1-bit flag whose purpose is unknown
# but is often seen set to 1
bit1_flag = 0
##
# Outline level of the column, in range(7).
# (0 = no outline)
outline_level = 0
##
# 1 = column is collapsed
collapsed = 0
_USE_SLOTS = 1
##
# <p>Height and default formatting information that applies to a row in a sheet.
# Derived from ROW records.
# <br /> -- New in version 0.6.1</p>
#
# <p><b>height</b>: Height of the row, in twips. One twip == 1/20 of a point.</p>
#
# <p><b>has_default_height</b>: 0 = Row has custom height; 1 = Row has default height.</p>
#
# <p><b>outline_level</b>: Outline level of the row (0 to 7) </p>
#
# <p><b>outline_group_starts_ends</b>: 1 = Outline group starts or ends here (depending on where the
# outline buttons are located, see WSBOOL record [TODO ??]),
# <i>and</i> is collapsed </p>
#
# <p><b>hidden</b>: 1 = Row is hidden (manually, or by a filter or outline group) </p>
#
# <p><b>height_mismatch</b>: 1 = Row height and default font height do not match </p>
#
# <p><b>has_default_xf_index</b>: 1 = the xf_index attribute is usable; 0 = ignore it </p>
#
# <p><b>xf_index</b>: Index to default XF record for empty cells in this row.
# Don't use this if has_default_xf_index == 0. </p>
#
# <p><b>additional_space_above</b>: This flag is set, if the upper border of at least one cell in this row
# or if the lower border of at least one cell in the row above is
# formatted with a thick line style. Thin and medium line styles are not
# taken into account. </p>
#
# <p><b>additional_space_below</b>: This flag is set, if the lower border of at least one cell in this row
# or if the upper border of at least one cell in the row below is
# formatted with a medium or thick line style. Thin line styles are not
# taken into account. </p>
class Rowinfo(BaseObject):
if _USE_SLOTS:
__slots__ = (
"height",
"has_default_height",
"outline_level",
"outline_group_starts_ends",
"hidden",
"height_mismatch",
"has_default_xf_index",
"xf_index",
"additional_space_above",
"additional_space_below",
)
def __init__(self):
self.height = None
self.has_default_height = None
self.outline_level = None
self.outline_group_starts_ends = None
self.hidden = None
self.height_mismatch = None
self.has_default_xf_index = None
self.xf_index = None
self.additional_space_above = None
self.additional_space_below = None
def __getstate__(self):
return (
self.height,
self.has_default_height,
self.outline_level,
self.outline_group_starts_ends,
self.hidden,
self.height_mismatch,
self.has_default_xf_index,
self.xf_index,
self.additional_space_above,
self.additional_space_below,
)
def __setstate__(self, state):
(
self.height,
self.has_default_height,
self.outline_level,
self.outline_group_starts_ends,
self.hidden,
self.height_mismatch,
self.has_default_xf_index,
self.xf_index,
self.additional_space_above,
self.additional_space_below,
) = state
| femmerling/DirMaker | box/lib/python2.7/site-packages/xlrd/sheet.py | Python | mit | 106,931 |
"""
This file supports the XBlock service that returns data about users.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from xblock.reference.plugins import Service
class UserService(Service):
"""
UserService returns information about users. Initially only data about the currently-logged-in user.
This service returns personally-identifiable information (PII). If a runtime needed to control exposure to a
user's PII, the runtime would deny access to this XBlock service.
"""
def get_current_user(self):
"""
This is default, example implementation. Anything real needs to override
This is expected to return an instance of XBlockUser
"""
raise NotImplementedError()
class XBlockUser(object):
"""
A model representation of user data returned by the `UserService`.
There are two layers of fields for this class. Standard user profile fields are first-class attributes
of class instances.
For an instance `xblock_user` of `XBlockUser`, the following fields will exist but may return None, except
where a different "Falsey" default makes sense (which will be noted):
- xblock_user.is_current_user: is this user the current user of the xblock. Always True for instances returned
by `get_current_user()`
- xblock_user.emails: a list of email address for that user. May return [] (in place of None)
- xblock_user.full_name: the full name of that user. For example, used for generated certificates.
- xblock_user.display_name: the name of the user that should be shown in a display context. The audience of
display_name may be other users, as within a social-oriented xblock, or the user himself/herself,
as in the top navigation bar of edx-platform.
"Optional" user attributes are available under xblock_user.opt_attrs, which must support a dict-like interface.
These user attributes are optional because they may be platform-specific or simply unavailable. The `opt_attrs`
field must exist for any instances of this class and be a dict-like thing, but none of its keys/values are
guaranteed to exist, so using `.get()` or handling KeyError is recommended. As an example, if the runtime is
edx-platform:
- xblock_user.opt_attrs['edx-platform.is_authenticated'] indicates whether the xblock_user is authenticated
(e.g. not django's AnonymousUser)
- xblock_user.opt_attrs['edx-platform.user_id'] is the edx-platform user id.
- xblock_user.opt_attrs['edx-platform.username'] is the edx-platform username, which is used as the "handle"
in discussion forums, for example.
All of this data should be considered personally-identifiable information (PII).
"""
def __init__(self, is_current_user=False, emails=None, full_name=None):
# Set standardized attributes
self.is_current_user = is_current_user
self.emails = emails or []
self.full_name = full_name
self.opt_attrs = {}
| mitodl/XBlock | xblock/reference/user_service.py | Python | apache-2.0 | 3,093 |
# -*- coding: utf-8 -*-
from pylab import *
from lammpsreader import *
from sys import argv
from generators import *
fileName = argv[1]
lammps_header = zeros(1, dtype=lammps_header_dtype)
Lx,Ly,Lz = 50.,18.,18.
lammps_header["bounds"][0][0] = 0
lammps_header["bounds"][0][1] = Lx
lammps_header["bounds"][0][2] = 0
lammps_header["bounds"][0][3] = Ly
lammps_header["bounds"][0][4] = 0
lammps_header["bounds"][0][5] = Lz
nx,ny,nz = 10,20,20
atoms = zeros(nx*ny*nz, dtype=atoms_dtype)
index = 0
# First wall
for i in range(1):
for j in range(ny):
for k in range(nz):
x = Lx * 0.5
y = float(j + 0.5) / ny * Ly
z = float(k + 0.5) / nz * Lz
atoms[index]["position"][0] = x
atoms[index]["position"][1] = y
atoms[index]["position"][2] = z
atoms[index]["type"] = 0
index += 1
atoms = resize(atoms, index)
atoms_live = zeros(8, dtype=atoms_dtype)
#atoms_live = create_fcc(2, 2, 2, 3, 3, 3, atom_type=18, T=0.05)
index = 0
for i in range(2):
for j in range(2):
for k in range(2):
atom = atoms_live[index]
atom["position"][0] += Lx * 0.05 + i * 1
atom["position"][1] += 0.5 * Ly - 0.5 + j * 1
atom["position"][2] += 0.5 * Lz - 0.5 + k * 1
atom["velocity"][0] = 5.0
atom["type"] = 18
index += 1
atoms = concatenate((atoms, atoms_live))
index = 0
for atom in atoms:
atom["id"] = index + 1
index += 1
save_atoms(lammps_header, atoms, fileName)
| ComputationalPhysics/molecules | tools/wallcrash.py | Python | gpl-3.0 | 1,556 |
from __future__ import absolute_import
from pprint import pprint
# pylint: disable=import-error
from twisted.internet import defer, threads, reactor
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
@defer.inlineCallbacks
def create_node(name):
node = yield threads.deferToThread(_thread_create_node,
name=name)
pprint(node)
reactor.stop()
def _thread_create_node(name):
Driver = get_driver(Provider.RACKSPACE)
conn = Driver('username', 'api key')
image = conn.list_images()[0]
size = conn.list_sizes()[0]
node = conn.create_node(name=name, image=image, size=size)
return node
def stop(*args, **kwargs):
reactor.stop()
d = create_node(name='my-lc-node')
d.addCallback(stop) # pylint: disable=no-member
d.addErrback(stop) # pylint: disable=no-member
reactor.run()
| StackPointCloud/libcloud | docs/examples/misc/twisted_create_node.py | Python | apache-2.0 | 898 |
import sys
from eventlet import greenio
from eventlet import greenthread
from eventlet import greenpool
from eventlet.green import socket
from eventlet.support import greenlets as greenlet
def connect(addr, family=socket.AF_INET, bind=None):
"""Convenience function for opening client sockets.
:param addr: Address of the server to connect to. For TCP sockets, this is a (host, port) tuple.
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
:param bind: Local address to bind to, optional.
:return: The connected green socket object.
"""
sock = socket.socket(family, socket.SOCK_STREAM)
if bind is not None:
sock.bind(bind)
sock.connect(addr)
return sock
def listen(addr, family=socket.AF_INET, backlog=50):
"""Convenience function for opening server sockets. This
socket can be used in :func:`~eventlet.serve` or a custom ``accept()`` loop.
Sets SO_REUSEADDR on the socket to save on annoyance.
:param addr: Address to listen on. For TCP sockets, this is a (host, port) tuple.
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
:param backlog: The maximum number of queued connections. Should be at least 1; the maximum value is system-dependent.
:return: The listening green socket object.
"""
sock = socket.socket(family, socket.SOCK_STREAM)
if sys.platform[:3]=="win":
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addr)
sock.listen(backlog)
return sock
class StopServe(Exception):
"""Exception class used for quitting :func:`~eventlet.serve` gracefully."""
pass
def _stop_checker(t, server_gt, conn):
try:
try:
t.wait()
finally:
conn.close()
except greenlet.GreenletExit:
pass
except Exception:
greenthread.kill(server_gt, *sys.exc_info())
def serve(sock, handle, concurrency=1000):
"""Runs a server on the supplied socket. Calls the function *handle* in a
separate greenthread for every incoming client connection. *handle* takes
two arguments: the client socket object, and the client address::
def myhandle(client_sock, client_addr):
print "client connected", client_addr
eventlet.serve(eventlet.listen(('127.0.0.1', 9999)), myhandle)
Returning from *handle* closes the client socket.
:func:`serve` blocks the calling greenthread; it won't return until
the server completes. If you desire an immediate return,
spawn a new greenthread for :func:`serve`.
Any uncaught exceptions raised in *handle* are raised as exceptions
from :func:`serve`, terminating the server, so be sure to be aware of the
exceptions your application can raise. The return value of *handle* is
ignored.
Raise a :class:`~eventlet.StopServe` exception to gracefully terminate the
server -- that's the only way to get the server() function to return rather
than raise.
The value in *concurrency* controls the maximum number of
greenthreads that will be open at any time handling requests. When
the server hits the concurrency limit, it stops accepting new
connections until the existing ones complete.
"""
pool = greenpool.GreenPool(concurrency)
server_gt = greenthread.getcurrent()
while True:
try:
conn, addr = sock.accept()
gt = pool.spawn(handle, conn, addr)
gt.link(_stop_checker, server_gt, conn)
conn, addr, gt = None, None, None
except StopServe:
return
def wrap_ssl(sock, *a, **kw):
"""Convenience function for converting a regular socket into an
SSL socket. Has the same interface as :func:`ssl.wrap_socket`,
but works on 2.5 or earlier, using PyOpenSSL (though note that it
ignores the *cert_reqs*, *ssl_version*, *ca_certs*,
*do_handshake_on_connect*, and *suppress_ragged_eofs* arguments
when using PyOpenSSL).
The preferred idiom is to call wrap_ssl directly on the creation
method, e.g., ``wrap_ssl(connect(addr))`` or
``wrap_ssl(listen(addr), server_side=True)``. This way there is
no "naked" socket sitting around to accidentally corrupt the SSL
session.
:return Green SSL object.
"""
return wrap_ssl_impl(sock, *a, **kw)
try:
from eventlet.green import ssl
wrap_ssl_impl = ssl.wrap_socket
except ImportError:
# < 2.6, trying PyOpenSSL
try:
from eventlet.green.OpenSSL import SSL
def wrap_ssl_impl(sock, keyfile=None, certfile=None, server_side=False,
cert_reqs=None, ssl_version=None, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, ciphers=None):
# theoretically the ssl_version could be respected in this
# next line
context = SSL.Context(SSL.SSLv23_METHOD)
if certfile is not None:
context.use_certificate_file(certfile)
if keyfile is not None:
context.use_privatekey_file(keyfile)
context.set_verify(SSL.VERIFY_NONE, lambda *x: True)
connection = SSL.Connection(context, sock)
if server_side:
connection.set_accept_state()
else:
connection.set_connect_state()
return connection
except ImportError:
def wrap_ssl_impl(*a, **kw):
raise ImportError("To use SSL with Eventlet, "
"you must install PyOpenSSL or use Python 2.6 or later.")
| vimmaniac/iemfbl | eventlet/convenience.py | Python | lgpl-3.0 | 5,751 |
import subprocess
class RpmInstaller:
def __init__(self, packages, cache):
self._packages = packages
self._cache = cache
def install_packages(self):
installed_packages = []
for p in self._packages:
if self._cache.is_package_installed(p['name'], p['version']):
print('[dem] {}-{} already installed'.format(p['name'], p['version']))
else:
print('[dem] installing {}-{}'.format(p['name'], p['version']))
if p['version'] == 'latest':
package_file = p['name']
else:
package_file = "{}-{}".format(p['name'], p['version'])
self._execute_yum(package_file)
package = dict()
package[p['name']] = {'version': p['version'], 'type': 'system'}
installed_packages.append(package)
return installed_packages
@staticmethod
def _execute_yum(package):
subprocess.call(['sudo', 'yum', 'install', package, '-y'])
| nitehawck/dem | dem/dependency/yum.py | Python | mit | 1,040 |
#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from builtins import str
from builtins import object
import logging
from re import compile as re_compile
from bq_data_access.v1.errors import FeatureNotFoundException
from bq_data_access.v1.feature_value_types import ValueType, DataTypes
from bq_data_access.v1.feature_data_provider import FeatureDataProvider
from bq_data_access.v1.utils import DurationLogged
from django.conf import settings
TABLES = [
{
'name': 'miRNA_BCGSC_GA_mirna',
'info': 'miRNA (GA, BCGSC RPM)',
'platform': 'IlluminaGA',
'feature_id': 'mirna_illumina_ga_rpm',
'value_field': 'reads_per_million_miRNA_mapped'
},
{
'name': 'miRNA_BCGSC_HiSeq_mirna',
'info': 'miRNA (HiSeq, BCGSC RPM)',
'platform': 'IlluminaHiSeq',
'feature_id': 'mirna_illumina_hiseq_rpm',
'value_field': 'reads_per_million_miRNA_mapped'
},
{
'name': 'miRNA_Expression',
'platform': 'both',
'info': 'miRNA',
'feature_id': 'expression',
'value_field': 'normalized_count'
}
]
TABLE_IDX_MIRNA_EXPRESSION = 2
VALUE_READS_PER_MILLION = 'RPM'
VALUE_NORMALIZED_COUNT = 'normalized_count'
MIRN_FEATURE_TYPE = 'MIRN'
COHORT_FIELD_NAME = 'sample_id'
def get_feature_type():
return MIRN_FEATURE_TYPE
def get_mirna_expression_table_info():
return TABLES[TABLE_IDX_MIRNA_EXPRESSION]
def get_table_info(platform, value):
table_info = None
if value == VALUE_NORMALIZED_COUNT:
table_info = get_mirna_expression_table_info()
else:
for table_entry in TABLES:
if platform == table_entry['platform']:
table_info = table_entry
return table_info
class MIRNFeatureDef(object):
# Regular expression for parsing the feature definition.
#
# Example ID: MIRN:hsa-mir-1244-1:mirna_illumina_ga_rpm
regex = re_compile("^MIRN:"
# mirna name
"([a-zA-Z0-9._\-]+):"
# table
"(" + "|".join([table['feature_id'] for table in TABLES]) +
")$")
def __init__(self, mirna_name, platform, value_field, table_id):
self.mirna_name = mirna_name
self.platform = platform
self.value_field = value_field
self.table_id = table_id
@classmethod
def get_table_info(cls, table_id):
table_info = None
for table_entry in TABLES:
if table_id == table_entry['feature_id']:
table_info = table_entry
return table_info
@classmethod
def from_feature_id(cls, feature_id):
feature_fields = cls.regex.findall(feature_id)
if len(feature_fields) == 0:
raise FeatureNotFoundException(feature_id)
mirna_name, table_id = feature_fields[0]
table_info = cls.get_table_info(table_id)
platform = table_info['platform']
value_field = table_info['value_field']
return cls(mirna_name, platform, value_field, table_id)
class MIRNFeatureProvider(FeatureDataProvider):
TABLES = TABLES
def __init__(self, feature_id, **kwargs):
self.feature_def = None
self.table_info = None
self.table_name = ''
self.parse_internal_feature_id(feature_id)
super(MIRNFeatureProvider, self).__init__(**kwargs)
def get_value_type(self):
return ValueType.FLOAT
def get_feature_type(self):
return DataTypes.MIRN
@classmethod
def process_data_point(cls, data_point):
return data_point['value']
def build_query(self, project_name, dataset_name, table_name, feature_def, cohort_dataset, cohort_table, cohort_id_array, project_id_array):
cohort_project_name = settings.GCLOUD_PROJECT_ID
# Generate the 'IN' statement string: (%s, %s, ..., %s)
cohort_id_stmt = ', '.join([str(cohort_id) for cohort_id in cohort_id_array])
project_id_stmt = ''
if project_id_array is not None:
project_id_stmt = ', '.join([str(project_id) for project_id in project_id_array])
query_template = \
("SELECT ParticipantBarcode, SampleBarcode, AliquotBarcode, {value_field} AS value, {mirna_name_field} "
"FROM [{project_name}:{dataset_name}.{table_name}] "
"WHERE {mirna_name_field}='{mirna_name}' ")
if table_name == get_mirna_expression_table_info()['name']:
mirna_name_field = 'mirna_id'
query_template += " AND Platform='{platform}' "
else:
mirna_name_field = 'miRNA_ID'
query_template += \
("AND SampleBarcode IN ( "
" SELECT sample_barcode "
" FROM [{cohort_project_name}:{cohort_dataset}.{cohort_table}] "
" WHERE cohort_id IN ({cohort_id_list})"
" AND (project_id IS NULL")
query_template += (" OR project_id IN ({project_id_list})))" if project_id_array is not None else "))")
query = query_template.format(dataset_name=dataset_name, project_name=project_name, table_name=table_name,
mirna_name_field=mirna_name_field, mirna_name=feature_def.mirna_name,
platform=feature_def.platform, cohort_project_name=cohort_project_name,
value_field=feature_def.value_field,
cohort_dataset=cohort_dataset, cohort_table=cohort_table,
cohort_id_list=cohort_id_stmt, project_id_list=project_id_stmt)
logging.debug("BQ_QUERY_MIRN: " + query)
return query
@DurationLogged('MIRN', 'UNPACK')
def unpack_query_response(self, query_result_array):
"""
Unpacks values from a BigQuery response object into a flat array. The array will contain dicts with
the following fields:
- 'case_id': Patient barcode
- 'sample_id': Sample barcode
- 'aliquot_id': Aliquot barcode
- 'value': Value of the selected column from the miRNA data table
Args:
query_result_array: A BigQuery query response object
Returns:
Array of dict objects.
"""
result = []
for row in query_result_array:
result.append({
'case_id': row['f'][0]['v'],
'sample_id': row['f'][1]['v'],
'aliquot_id': row['f'][2]['v'],
'value': float(row['f'][3]['v'])
})
return result
def get_table_info(self, table_id):
table_info = None
for table_entry in self.TABLES:
if table_id == table_entry['feature_id']:
table_info = table_entry
return table_info
def parse_internal_feature_id(self, feature_id):
self.feature_def = MIRNFeatureDef.from_feature_id(feature_id)
self.table_info = self.get_table_info(self.feature_def.table_id)
self.table_name = self.table_info['name']
@classmethod
def is_valid_feature_id(cls, feature_id):
is_valid = False
try:
MIRNFeatureDef.from_feature_id(feature_id)
is_valid = True
except Exception:
# MIRNFeatureDef.from_feature_id raises Exception if the feature identifier
# is not valid. Nothing needs to be done here, since is_valid is already False.
pass
finally:
return is_valid
| isb-cgc/ISB-CGC-Webapp | bq_data_access/v1/mirna_data.py | Python | apache-2.0 | 8,089 |
# -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2006-2019 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
"""
system/elf.py
=============
The system elf module implements Elf classes for both 32/64bits executable format.
"""
from amoco.system.core import BinFormat
from amoco.system.structs import Consts, StructDefine, StructureError
from amoco.system.structs import StructFormatter, token_constant_fmt, token_address_fmt
from amoco.logger import Log
logger = Log(__name__)
logger.debug("loading module")
class ElfError(Exception):
"""
ElfError is raised whenever Elf object instance fails
to decode required structures.
"""
def __init__(self, message):
self.message = message
def __str__(self):
return str(self.message)
# ------------------------------------------------------------------------------
class Elf(BinFormat):
"""
This class takes a DataIO object (ie an opened file of BytesIO instance)
and decodes all ELF structures found in it.
Attributes:
entrypoints (list of int): list of entrypoint addresses.
filename (str): binary file name.
Ehdr (Ehdr): the ELF header structure.
Phdr (list of Phdr): the list of ELF Program header structures.
Shdr (list of Shdr): the list of ELF Section header structures.
dynamic (Bool): True if the binary wants to load dynamic libs.
basemap (int): base address for this ELF image.
functions (list): a list of function names gathered from internal
definitions (if not stripped) and import names.
variables (list): a list of global variables' names (if found.)
"""
is_ELF = True
@property
def entrypoints(self):
return [self.Ehdr.e_entry]
@property
def filename(self):
return self.__file.name
@property
def header(self):
return self.Ehdr
@property
def dataio(self):
return self.__file
def __init__(self, f):
self.__file = f
self.Ehdr = Ehdr(f)
x64 = self.Ehdr.e_ident.EI_CLASS == ELFCLASS64
lbe = ">" if (self.Ehdr.e_ident.EI_DATA == ELFDATA2MSB) else None
self.dynamic = False
# read program header table: should not raise any errors
self.Phdr = []
if self.Ehdr.e_phoff:
offset = self.Ehdr.e_phoff
n, l = self.Ehdr.e_phnum, self.Ehdr.e_phentsize
for pht in range(n):
P = Phdr(f, offset, lbe, x64)
offset += l
if P.p_type == PT_LOAD:
if not self.basemap:
self.basemap = P.p_vaddr
self.Phdr.append(P)
elif P.p_type == PT_INTERP:
self.dynamic = True
self.Phdr.append(P)
elif not P.p_type in Consts.All["p_type"].keys():
logger.verbose("invalid segment detected (removed)")
else:
self.Phdr.append(P)
# read section header table: unused by loader, can raise error
self.Shdr = []
if self.Ehdr.e_shoff:
try:
offset = self.Ehdr.e_shoff
n, l = self.Ehdr.e_shnum, self.Ehdr.e_shentsize
for sht in range(n):
S = Shdr(f, offset, lbe, x64)
offset += l
if S.sh_type in Consts.All["sh_type"].keys():
self.Shdr.append(S)
else:
logger.verbose("unknown sh_type: %d" % S.sh_type)
except Exception:
logger.verbose("exception raised while parsing section(s)")
# read section's name string table:
for i, s in enumerate(self.Shdr):
s.name = ".s%d" % i
n = self.Ehdr.e_shstrndx
if n != SHN_UNDEF and n in range(len(self.Shdr)):
S = self.Shdr[self.Ehdr.e_shstrndx]
if S.sh_type != SHT_STRTAB:
logger.verbose("section names not a string table")
else:
from codecs import decode
offset = S.sh_offset
data = f[offset : offset + S.sh_size]
for s in self.Shdr:
name = data[s.sh_name :].split(b"\0")[0]
s.name = decode(name)
self.__sections = {}
self.functions = self.__functions()
self.variables = self.__variables()
def getsize(self):
"total file size of all the Program headers"
total = sum([s.p_filesz for s in self.Phdr])
return total
def getinfo(self, target):
"""
target is either an address provided as str or int,
or a symbol str searched in the functions dictionary.
Returns a triplet with:
- section index (0 is error, -1 is a dynamic call)
- offset into section (idem)
- base virtual address (0 for dynamic calls)
"""
addr = None
if isinstance(target, str):
try:
addr = int(target, 16)
except ValueError:
for a, f in iter(self.functions.items()):
if f[0] == target:
addr = int(a, 16)
break
elif isinstance(target, int):
addr = target
if addr is None:
# target is propably a symbol not found in functions
return None, 0, 0
# now we have addr so we can see in which section/segment it is...
# sections are smaller than segments so we try first with Shdr
# but this may lead to errors because what really matters are segments
# loaded by the kernel binfmt_elf.c loader.
if self.Shdr:
for s in reversed(self.Shdr):
if s.sh_type != SHT_PROGBITS:
continue
if s.sh_addr <= addr < s.sh_addr + s.sh_size:
return s, addr - s.sh_addr, s.sh_addr
elif self.Phdr:
for s in reversed(self.Phdr):
if s.p_type != PT_LOAD:
continue
if s.p_vaddr <= addr < s.p_vaddr + s.p_filesz:
return s, addr - s.p_vaddr, s.p_vaddr
return None, 0, 0
def data(self, target, size):
"returns 'size' bytes located at target virtual address"
return self._readcode(target, size)[0]
def _readcode(self, target, size=None):
s, offset, base = self.getinfo(target)
data = b""
if s:
if isinstance(s, Phdr):
c = self.readsegment(s)
else:
c = self.readsection(s)
if c:
if size != None:
if isinstance(c, Str):
c = c.data
data = c[offset : offset + size]
else:
data = c[offset:]
return data, 0, base + offset
def getfileoffset(self, target):
"converts given target virtual address back to offset in file"
s, offset, base = self.getinfo(target)
if s != None:
result = s.p_offset + offset
else:
result = None
return result
def readsegment(self, S):
"returns segment S data padded to S.p_memsz"
self.__file.seek(S.p_offset)
return self.__file.read(S.p_filesz).ljust(S.p_memsz, b"\x00")
def loadsegment(self, S, pagesize=None):
"""
If S is of type PT_LOAD, returns a dict {base: bytes}
indicating that segment data bytes (extended to pagesize boundary)
need to be mapped at virtual base address.
(Returns None if not a PT_LOAD segment.)
"""
if S.p_type == PT_LOAD:
self.__file.seek(S.p_offset)
if S.p_align > 1 and (S.p_offset != (S.p_vaddr % S.p_align)):
logger.verbose(
"wrong p_vaddr/p_align [%08x/%0d]" % (S.p_vaddr, S.p_align)
)
base = S.p_vaddr
bytes_ = self.__file.read(S.p_filesz).ljust(S.p_memsz, b"\x00")
if pagesize:
# note: bytes are not truncated, only extended if needed...
bytes_ = bytes_.ljust(pagesize, b"\x00")
return {base: bytes_}
else:
logger.error("segment not a PT_LOAD [%08x/%0d]" % (S.p_vaddr, S.p_align))
return None
def readsection(self, sect):
"returns the given section data bytes from file."
S = None
if isinstance(sect, str):
for st in self.Shdr:
if st.name == sect:
S = st
break
elif isinstance(sect, int):
S = self.Shdr[sect]
else:
S = sect
if S:
if S.name in self.__sections:
return self.__sections[S.name]
if S.sh_type in (SHT_SYMTAB, SHT_DYNSYM):
s = self.__read_symtab(S)
elif S.sh_type == SHT_STRTAB:
s = self.__read_strtab(S)
elif S.sh_type in (SHT_REL, SHT_RELA):
s = self.__read_relocs(S)
elif S.sh_type == SHT_DYNAMIC:
s = self.__read_dynamic(S)
else:
self.__file.seek(S.sh_offset)
s = self.__file.read(S.sh_size)
self.__sections[S.name] = s
return s
def __read_symtab(self, section):
if section.sh_type not in (SHT_SYMTAB, SHT_DYNSYM):
logger.warning("not a symbol table section")
return None
x64 = self.Ehdr.e_ident.EI_CLASS == ELFCLASS64
lbe = ">" if (self.Ehdr.e_ident.EI_DATA == ELFDATA2MSB) else None
# read the section:
self.__file.seek(section.sh_offset)
data = self.__file.read(section.sh_size)
# and parse it into Sym objects:
l = section.sh_entsize
if (section.sh_size % l) != 0:
raise ElfError("symbol table size mismatch")
else:
n = section.sh_size // l
symtab = []
offset = 0
for i in range(n):
symtab.append(Sym(data, offset, lbe, x64))
offset += l
return symtab
def __read_strtab(self, section):
if section.sh_type != SHT_STRTAB:
raise ElfError("not a string table section")
self.__file.seek(section.sh_offset)
data = self.__file.read(section.sh_size)
x64 = self.Ehdr.e_ident.EI_CLASS == ELFCLASS64
strtab = StrTable(data, x64)
return strtab
def __read_relocs(self, section):
if section.sh_type not in (SHT_REL, SHT_RELA):
logger.warning("not a relocation table section")
return None
self.__file.seek(section.sh_offset)
data = self.__file.read(section.sh_size)
l = section.sh_entsize
if (section.sh_size % l) != 0:
raise ElfError("relocation table size mismatch")
else:
n = section.sh_size // l
reltab = []
x64 = self.Ehdr.e_ident.EI_CLASS == ELFCLASS64
lbe = ">" if (self.Ehdr.e_ident.EI_DATA == ELFDATA2MSB) else None
offset = 0
if section.sh_type == SHT_REL:
rcls = Rel
elif section.sh_type == SHT_RELA:
rcls = Rela
for i in range(n):
reltab.append(rcls(data, offset, lbe, x64))
offset += l
return reltab
def __read_dynamic(self, section):
if section.sh_type != SHT_DYNAMIC:
logger.warning("not a dynamic linking section")
return None
# read the section:
self.__file.seek(section.sh_offset)
data = self.__file.read(section.sh_size)
# and parse it into Dyn objects:
l = section.sh_entsize
if (section.sh_size % l) != 0:
raise ElfError("dynamic linking size mismatch")
else:
n = section.sh_size // l
dyntab = []
x64 = self.Ehdr.e_ident.EI_CLASS == ELFCLASS64
lbe = ">" if (self.Ehdr.e_ident.EI_DATA == ELFDATA2MSB) else None
offset = 0
for i in range(n):
dyntab.append(Dyn(data, offset, lbe, x64))
offset += l
return dyntab
def __read_note(self, section):
if section.sh_type != SHT_NOTE:
logger.warning("not a note section")
return None
self.__file.seek(section.sh_offset)
data = self.__file.read(section.sh_size)
x64 = self.Ehdr.e_ident.EI_CLASS == ELFCLASS64
lbe = ">" if (self.Ehdr.e_ident.EI_DATA == ELFDATA2MSB) else None
note = Note(data, lbe, x64)
return note
def __functions(self, fltr=None):
D = self.__symbols(STT_FUNC)
# fltr applies to section name only :
if fltr:
for k, v in iter(D.items()):
if self.Shdr[v[2]].name != fltr:
D.pop(k)
if self.dynamic:
D.update(self.__dynamic(STT_FUNC))
return D
def __variables(self, fltr=None):
D = self.__symbols(STT_OBJECT)
# fltr applies also to section name :
if fltr:
for k, v in iter(D.items()):
if self.Shdr[v[2]].name != fltr:
D.pop(k)
return D
def __symbols(self, t):
D = {}
symtab = self.readsection(".symtab") or []
strtab = self.readsection(".strtab")
if strtab:
for sym in symtab:
if sym.st_type == t and sym.st_value:
D[sym.st_value] = (
str(strtab[sym.st_name].decode()),
sym.st_size,
sym.st_info,
sym.st_shndx,
)
return D
def __dynamic(self, type=None):
D = {}
self.readsection(".dynamic")
dynsym = self.readsection(".dynsym") or []
dynstr = self.readsection(".dynstr")
if dynstr:
for s in self.Shdr:
if s.sh_type in (SHT_REL, SHT_RELA):
for r in self.readsection(s):
if r.r_offset:
sym = dynsym[r.r_sym]
D[r.r_offset] = str(dynstr[sym.st_name].decode())
return D
def checksec(self):
"check for usual security features."
R = {}
R["Canary"] = 0
R["Fortify"] = 0
for f in iter(self.functions.values()):
if isinstance(f, tuple):
f = f[0]
if f.startswith("__stack_chk_fail"):
R["Canary"] = 1
elif f.endswith("_chk@GLIBC"):
R["Fortify"] = 1
R["NX"] = 0
R["Partial RelRO"] = 0
for p in self.Phdr:
if p.p_type == PT_GNU_STACK:
if not (p.p_flags & PF_X):
R["NX"] = 1
elif p.p_type == PT_GNU_RELRO:
R["Partial RelRO"] = 1
R["PIE"] = 0
if self.Ehdr.e_type != ET_EXEC:
R["PIE"] = 1
R["Full RelRO"] = 0
for d in self.readsection(".dynamic") or []:
if d.d_tag == DT_BIND_NOW or\
(d.d_tag == DT_FLAGS and d.d_un==DF_BIND_NOW):
R["Full RelRO"] = 1
break
return R
def __str__(self):
ss = ["ELF header:"]
tmp = self.Ehdr.pfx
self.Ehdr.pfx = "\t"
ss.append(self.Ehdr.__str__())
self.Ehdr.pfx = tmp
ss += ["\nSections:"]
for s in self.Shdr:
tmp = s.pfx
s.pfx = "\t"
ss.append(s.__str__())
ss.append("---")
s.pfx = tmp
ss += ["\nSegments:"]
for s in self.Phdr:
tmp = s.pfx
s.pfx = "\t"
ss.append(s.__str__())
ss.append("---")
s.pfx = tmp
return "\n".join(ss)
# ------------------------------------------------------------------------------
@StructDefine(
"""
B : ELFMAG0
c*3: ELFMAG
B : EI_CLASS
B : EI_DATA
B : EI_VERSION
B : EI_OSABI
B : EI_ABIVERSION
b*7: unused
"""
)
class IDENT(StructFormatter):
def __init__(self, data=None):
self.name_formatter("EI_CLASS", "EI_DATA", "EI_OSABI")
if data:
self.unpack(data)
def unpack(self, data, offset=0):
StructFormatter.unpack(self, data, offset)
if self.ELFMAG0 != 0x7F or self.ELFMAG != b"ELF":
raise ElfError("Wrong magic number, not an ELF file ?")
if self.EI_DATA not in (ELFDATA2LSB, ELFDATA2MSB):
logger.info("No endianess specified in ELF header.")
return self
# EI_CLASS values:
with Consts("EI_CLASS"):
ELFCLASSNONE = 0
ELFCLASS32 = 1
ELFCLASS64 = 2
ELFCLASSNUM = 3
# EI_DATA values:
with Consts("EI_DATA"):
ELFDATANONE = 0
ELFDATA2LSB = 1
ELFDATA2MSB = 2
ELFDATANUM = 3
# EI_OSABI values:
with Consts("EI_OSABI"):
ELFOSABI_NONE = 0
ELFOSABI_SYSV = 0
ELFOSABI_HPUX = 1
ELFOSABI_NETBSD = 2
ELFOSABI_LINUX = 3
ELFOSABI_SOLARIS = 6
ELFOSABI_AIX = 7
ELFOSABI_IRIX = 8
ELFOSABI_FREEBSD = 9
ELFOSABI_TRU64 = 10
ELFOSABI_MODESTO = 11
ELFOSABI_OPENBSD = 12
ELFOSABI_ARM = 97
ELFOSABI_STANDALONE = 255
@StructDefine(
"""
IDENT :< e_ident
H : e_type
H : e_machine
I : e_version
I : e_entry
I : e_phoff
I : e_shoff
I : e_flags
H : e_ehsize
H : e_phentsize
H : e_phnum
H : e_shentsize
H : e_shnum
H : e_shstrndx
"""
)
class Ehdr(StructFormatter):
def __init__(self, data=None):
self.name_formatter("e_type", "e_machine", "e_version")
self.address_formatter("e_entry")
self.flag_formatter("e_flags")
if data:
self.unpack(data)
def unpack(self, data, offset=0):
f0 = self.fields[0]
self._v.e_ident = f0.unpack(data, offset)
offset += f0.size()
# change endianness if necessary:
if self._v.e_ident.EI_DATA == ELFDATA2MSB:
for f in self.fields[1:]:
f.order = ">"
# change pointers format if necessary:
if self._v.e_ident.EI_CLASS == ELFCLASS64:
self.fields[4].typename = "Q"
self.fields[5].typename = "Q"
self.fields[6].typename = "Q"
for f in self.fields[1:]:
setattr(self._v, f.name, f.unpack(data, offset))
offset += f.size()
return self
# legal values for e_type (object file type):
with Consts("e_type"):
ET_NONE = 0
ET_REL = 1
ET_EXEC = 2
ET_DYN = 3
ET_CORE = 4
ET_NUM = 5
ET_LOOS = 0xFE00
ET_HIOS = 0xFEFF
ET_LOPROC = 0xFF00
ET_HIPROC = 0xFFFF
# legal values for e_machine (architecture):
with Consts("e_machine"):
EM_NONE = 0
EM_M32 = 1
EM_SPARC = 2
EM_386 = 3
EM_68K = 4
EM_88K = 5
EM_860 = 7
EM_MIPS = 8
EM_S370 = 9
EM_MIPS_RS3_LE = 10
EM_PARISC = 15
EM_VPP500 = 17
EM_SPARC32PLUS = 18
EM_960 = 19
EM_PPC = 20
EM_PPC64 = 21
EM_S390 = 22
EM_V800 = 36
EM_FR20 = 37
EM_RH32 = 38
EM_RCE = 39
EM_ARM = 40
EM_FAKE_ALPHA = 41
EM_SH = 42
EM_SPARCV9 = 43
EM_TRICORE = 44
EM_ARC = 45
EM_H8_300 = 46
EM_H8_300H = 47
EM_H8S = 48
EM_H8_500 = 49
EM_IA_64 = 50
EM_MIPS_X = 51
EM_COLDFIRE = 52
EM_68HC12 = 53
EM_MMA = 54
EM_PCP = 55
EM_NCPU = 56
EM_NDR1 = 57
EM_STARCORE = 58
EM_ME16 = 59
EM_ST100 = 60
EM_TINYJ = 61
EM_X86_64 = 62
EM_PDSP = 63
EM_FX66 = 66
EM_ST9PLUS = 67
EM_ST7 = 68
EM_68HC16 = 69
EM_68HC11 = 70
EM_68HC08 = 71
EM_68HC05 = 72
EM_SVX = 73
EM_ST19 = 74
EM_VAX = 75
EM_CRIS = 76
EM_JAVELIN = 77
EM_FIREPATH = 78
EM_ZSP = 79
EM_MMIX = 80
EM_HUANY = 81
EM_PRISM = 82
EM_AVR = 83
EM_FR30 = 84
EM_D10V = 85
EM_D30V = 86
EM_V850 = 87
EM_M32R = 88
EM_MN10300 = 89
EM_MN10200 = 90
EM_PJ = 91
EM_OPENRISC = 92
EM_ARC_A5 = 93
EM_XTENSA = 94
EM_NUM = 95
EM_ST200 = 100
EM_MSP430 = 105
EM_SEP = 108
EM_M16C = 117
EM_DSPIC30F = 118
EM_CE = 119
EM_M32C = 120
EM_R32C = 162
EM_QDSP6 = 164
EM_AARCH64 = 183
EM_AVR32 = 185
EM_STM8 = 186
EM_CUDA = 190
EM_Z80 = 220
EM_AMDGPU = 224
EM_RISCV = 243
EM_BPF = 247
# unofficial values should pick large index:
EM_ALPHA = 0x9026
EM_WEBASSEMBLY = 0x4157
# legal values for e_version (version):
with Consts("e_version"):
EV_NONE = 0
EV_CURRENT = 1
EV_NUM = 2
@StructDefine(
"""
I : sh_name
I : sh_type
I : sh_flags
I : sh_addr
I : sh_offset
I : sh_size
I : sh_link
I : sh_info
I : sh_addralign
I : sh_entsize
"""
)
class Shdr(StructFormatter):
def __init__(self, data=None, offset=0, order=None, x64=False):
if order:
for f in self.fields:
f.order = order
if x64:
for i in (2, 3, 4, 5, 8, 9):
self.fields[i].typename = "Q"
self.name_formatter("sh_name", "sh_type")
self.address_formatter("sh_addr")
self.flag_formatter("sh_flags")
self.func_formatter(sh_addralign=token_constant_fmt)
if data:
self.unpack(data, offset)
with Consts("sh_name"):
SHN_UNDEF = 0
SHN_LORESERVE = 0xFF00
SHN_LOPROC = 0xFF00
SHN_BEFORE = 0xFF00
SHN_AFTER = 0xFF01
SHN_HIPROC = 0xFF1F
SHN_LOOS = 0xFF20
SHN_HIOS = 0xFF3F
SHN_ABS = 0xFFF1
SHN_COMMON = 0xFFF2
SHN_XINDEX = 0xFFFF
SHN_HIRESERVE = 0xFFFF
# legal values for sh_type (section type):
with Consts("sh_type"):
SHT_NULL = 0
SHT_PROGBITS = 1
SHT_SYMTAB = 2
SHT_STRTAB = 3
SHT_RELA = 4
SHT_HASH = 5
SHT_DYNAMIC = 6
SHT_NOTE = 7
SHT_NOBITS = 8
SHT_REL = 9
SHT_SHLIB = 10
SHT_DYNSYM = 11
SHT_INIT_ARRAY = 14
SHT_FINI_ARRAY = 15
SHT_PREINIT_ARRAY = 16
SHT_GROUP = 17
SHT_SYMTAB_SHNDX = 18
SHT_NUM = 19
SHT_LOOS = 0x60000000
SHT_GNU_HASH = 0x6FFFFFF6
SHT_GNU_LIBLIST = 0x6FFFFFF7
SHT_CHECKSUM = 0x6FFFFFF8
SHT_LOSUNW = 0x6FFFFFFA
SHT_SUNW_move = 0x6FFFFFFA
SHT_SUNW_COMDAT = 0x6FFFFFFB
SHT_SUNW_syminfo = 0x6FFFFFFC
SHT_GNU_verdef = 0x6FFFFFFD
SHT_GNU_verneed = 0x6FFFFFFE
SHT_GNU_versym = 0x6FFFFFFF
SHT_HISUNW = 0x6FFFFFFF
SHT_HIOS = 0x6FFFFFFF
SHT_LOPROC = 0x70000000
SHT_HIPROC = 0x7FFFFFFF
SHT_LOUSER = 0x80000000
SHT_HIUSER = 0x8FFFFFFF
SHT_ARM_EXIDX = SHT_LOPROC + 1
SHT_ARM_PREEMPTMAP = SHT_LOPROC + 2
SHT_ARM_ATTRIBUTES = SHT_LOPROC + 3
# legal values for sh_flags (section flags):
with Consts("sh_flags"):
SHF_WRITE = 1 << 0
SHF_ALLOC = 1 << 1
SHF_EXECINSTR = 1 << 2
SHF_MERGE = 1 << 4
SHF_STRINGS = 1 << 5
SHF_INFO_LINK = 1 << 6
SHF_LINK_ORDER = 1 << 7
SHF_OS_NONCONFORMING = 1 << 8
SHF_GROUP = 1 << 9
SHF_TLS = 1 << 10
SHF_MASKOS = 0x0FF00000
SHF_MASKPROC = 0xF0000000
SHF_ORDERED = 1 << 30
SHF_EXCLUDE = 1 << 31
# section group handling:
GRP_COMDAT = 0x1
@StructDefine(
"""
I : st_name
I : st_value
I : st_size
B : st_info
B : st_other
H : st_shndx
"""
)
class Sym(StructFormatter):
def __init__(self, data=None, offset=0, order=None, x64=False):
if order:
for f in self.fields:
f.order = order
if x64: # need to reorder fields...
fvalue = self.fields.pop(1)
fsize = self.fields.pop(1)
fvalue.typename = fsize.typename = "Q"
self.fields.append(fvalue)
self.fields.append(fsize)
self.name_formatter("st_name", "st_bind", "st_type", "st_visibility")
if data:
self.unpack(data, offset)
def ELF32_ST_BIND(self):
return self.st_info >> 4
st_bind = property(ELF32_ST_BIND)
def ELF32_ST_TYPE(self):
return self.st_info & 0xF
st_type = property(ELF32_ST_TYPE)
def ELF32_ST_INFO(self, bind, type):
self._v.st_info = bind << 4 + (type & 0xF)
def ELF32_ST_VISIBILITY(self):
return self.st_other & 0x03
st_visibility = property(ELF32_ST_VISIBILITY)
def __str__(self):
s = super().__str__() + "\n"
cname = self.__class__.__name__
s += self.strkey("st_bind", cname) + "\n"
s += self.strkey("st_type", cname) + "\n"
s += self.strkey("st_visibility", cname)
return s
# legal values for st_bind:
with Consts("st_bind"):
STB_LOCAL = 0
STB_GLOBAL = 1
STB_WEAK = 2
STB_NUM = 3
STB_LOOS = 10
STB_HIOS = 12
STB_LOPROC = 13
STB_HIPROC = 15
# legal values for st_type:
with Consts("st_type"):
STT_NOTYPE = 0
STT_OBJECT = 1
STT_FUNC = 2
STT_SECTION = 3
STT_FILE = 4
STT_COMMON = 5
STT_TLS = 6
STT_NUM = 7
STT_LOOS = 10
STT_HIOS = 12
STT_LOPROC = 13
STT_HIPROC = 15
# special index indicating the end end of a chain:
STN_UNDEF = 0
with Consts("st_visibility"):
STV_DEFAULT = 0
STV_INTERNAL = 1
STV_HIDDEN = 2
STV_PROTECTED = 3
@StructDefine(
"""
I : r_offset
I : r_info
"""
)
class Rel(StructFormatter):
def __init__(self, data=None, offset=0, order=None, x64=False):
if order:
for f in self.fields:
f.order = order
if x64:
for f in self.fields:
f.typename = "Q"
self.name_formatter("r_type")
self.func_formatter(r_sym=token_address_fmt)
if data:
self.unpack(data, offset)
def ELF_R_SYM(self):
return self.r_info >> (8 if self.fields[1].typename == "I" else 32)
r_sym = property(ELF_R_SYM)
def ELF_R_TYPE(self):
return self.r_info & (0xFF if self.fields[1].typename == "I" else 0xFFFFFFFF)
r_type = property(ELF_R_TYPE)
def ELF_R_INFO(self, sym, type):
if self.fields[1].typename == "I":
self._v.r_info = sym << 8 + (type & 0xFF)
else:
self._v.r_info = sym << 32 + (type & 0xFFFFFFFF)
def __str__(self):
s = StructFormatter.__str__(self) + "\n"
cname = self.__class__.__name__
s += self.strkey("r_type", cname)
return s
@StructDefine(
"""
I : r_offset
I : r_info
I : r_addend
"""
)
class Rela(Rel):
def __init__(self, data=None, offset=0, order=None, x64=False):
if order:
for f in self.fields:
f.order = order
if x64:
for f in self.fields:
f.typename = "Q"
if data:
self.unpack(data, offset)
@StructDefine(
"""
I : p_type
I : p_offset
I : p_vaddr
I : p_paddr
I : p_filesz
I : p_memsz
I : p_flags
I : p_align
"""
)
class Phdr(StructFormatter):
def __init__(self, data=None, offset=0, order=None, x64=False):
if order:
for f in self.fields:
f.order = order
if x64:
pflags = self.fields.pop(6)
self.fields.insert(1, pflags)
for f in self.fields[2:]:
f.typename = "Q"
self.name_formatter("p_type")
self.address_formatter("p_vaddr", "p_paddr")
self.flag_formatter("p_flags")
if data:
self.unpack(data, offset)
# legal values for p_type (segment type):
with Consts("p_type"):
PT_NULL = 0
PT_LOAD = 1
PT_DYNAMIC = 2
PT_INTERP = 3
PT_NOTE = 4
PT_SHLIB = 5
PT_PHDR = 6
PT_TLS = 7
PT_NUM = 8
PT_LOOS = 0x60000000
PT_GNU_EH_FRAME = 0x6474E550
PT_GNU_STACK = 0x6474E551
PT_GNU_RELRO = 0x6474E552
PT_LOSUNW = 0x6FFFFFFA
PT_SUNWBSS = 0x6FFFFFFA
PT_SUNWSTACK = 0x6FFFFFFB
PT_HISUNW = 0x6FFFFFFF
PT_HIOS = 0x6FFFFFFF
PT_LOPROC = 0x70000000
PT_HIPROC = 0x7FFFFFFF
PT_ARM_EXIDX = PT_LOPROC + 1
# legal values for p_flags (segment flags):
with Consts("p_flags"):
PF_X = 1 << 0
PF_W = 1 << 1
PF_R = 1 << 2
PF_MASKOS = 0x0FF00000
PF_MASKPROC = 0xF0000000
PF_ARM_SB = 0x10000000
PF_ARM_PI = 0x20000000
PF_ARM_ABS = 0x40000000
@StructDefine(
"""
I : namesz
I : descsz
I : n_type
"""
)
class Note(StructFormatter):
def __init__(self, data=None, offset=0, order=None, x64=False):
if order:
for f in self.fields:
f.order = order
if x64:
for f in self.fields:
f.typename = "Q"
self.name_formatter("n_type")
if data:
self.unpack(data, offset)
offset += self.size()
self.name = data[offset : offset + self.namesz]
offset += self.namesz
if offset % 4 != 0:
offset = ((offset + 4) // 4) * 4
self.desc = data[offset : offset + self.descsz]
# legal values for note segment descriptor types for core files:
with Consts("n_type"):
NT_PRSTATUS = 1
NT_FPREGSET = 2
NT_PRPSINFO = 3
NT_PRXREG = 4
NT_TASKSTRUCT = 4
NT_PLATFORM = 5
NT_AUXV = 6
NT_GWINDOWS = 7
NT_ASRS = 8
NT_PSTATUS = 10
NT_PSINFO = 13
NT_PRCRED = 14
NT_UTSNAME = 15
NT_LWPSTATUS = 16
NT_LWPSINFO = 17
NT_PRFPXREG = 20
NT_VERSION = 1
@StructDefine(
"""
I : d_tag
I : d_un
"""
)
class Dyn(StructFormatter):
def __init__(self, data=None, offset=0, order=None, x64=False):
if order:
for f in self.fields:
f.order = order
if x64:
for f in self.fields:
f.typename = "Q"
self.name_formatter("d_tag")
self.address_formatter("d_un")
if data:
self.unpack(data, offset)
def DT_VALTAGIDX(self, tag):
self.d_un = DT_VALRNGHI - tag
def DT_ADDRTAGIDX(self, tag):
self.d_un = DT_ADDRRNGHI - tag
# legal values for d_tag (dynamic entry type):
with Consts("d_tag"):
DT_NULL = 0
DT_NEEDED = 1
DT_PLTRELSZ = 2
DT_PLTGOT = 3
DT_HASH = 4
DT_STRTAB = 5
DT_SYMTAB = 6
DT_RELA = 7
DT_RELASZ = 8
DT_RELAENT = 9
DT_STRSZ = 10
DT_SYMENT = 11
DT_INIT = 12
DT_FINI = 13
DT_SONAME = 14
DT_RPATH = 15
DT_SYMBOLIC = 16
DT_REL = 17
DT_RELSZ = 18
DT_RELENT = 19
DT_PLTREL = 20
DT_DEBUG = 21
DT_TEXTREL = 22
DT_JMPREL = 23
DT_BIND_NOW = 24
DT_INIT_ARRAY = 25
DT_FINI_ARRAY = 26
DT_INIT_ARRAYSZ = 27
DT_FINI_ARRAYSZ = 28
DT_RUNPATH = 29
DT_FLAGS = 30
DT_ENCODING = 32
DT_PREINIT_ARRAY = 32
DT_PREINIT_ARRAYSZ = 33
DT_NUM = 34
DT_LOOS = 0x6000000D
DT_HIOS = 0x6FFFF000
DT_LOPROC = 0x70000000
DT_HIPROC = 0x7FFFFFFF
DT_GNU_HASH = 0x6FFFFEF5
DT_VERDEF = 0x6FFFFFFC
DT_VERDEFNUM = 0x6FFFFFFD
DT_VERNEED = 0x6FFFFFFE
DT_VERNEEDNUM = 0x6FFFFFFF
DT_VERSYM = 0x6FFFFFF0
DT_RELACOUNT = 0x6FFFFFF9
DT_RELCOUNT = 0x6FFFFFFA
DT_FLAGS_1 = 0x6FFFFFFB
# legal values for d_un (union type use here value):
with Consts("d_un"):
DT_VALRNGLO = 0x6FFFFD00
DT_GNU_PRELINKED = 0x6FFFFDF5
DT_GNU_CONFLICTSZ = 0x6FFFFDF6
DT_GNU_LIBLISTSZ = 0x6FFFFDF7
DT_CHECKSUM = 0x6FFFFDF8
DT_PLTPADSZ = 0x6FFFFDF9
DT_MOVEENT = 0x6FFFFDFA
DT_MOVESZ = 0x6FFFFDFB
DT_FEATURE_1 = 0x6FFFFDFC
DT_POSFLAG_1 = 0x6FFFFDFD
DT_SYMINSZ = 0x6FFFFDFE
DT_SYMINENT = 0x6FFFFDFF
DT_VALRNGHI = 0x6FFFFDFF
DT_VALNUM = 12
# legal values for d_un (union type use here address):
DT_ADDRRNGLO = 0x6FFFFE00
DT_TLSDESC_PLT = 0x6FFFFEF6
DT_TLSDESC_GOT = 0x6FFFFEF7
DT_GNU_CONFLICT = 0x6FFFFEF8
DT_GNU_LIBLIST = 0x6FFFFEF9
DT_CONFIG = 0x6FFFFEFA
DT_DEPAUDIT = 0x6FFFFEFB
DT_AUDIT = 0x6FFFFEFC
DT_PLTPAD = 0x6FFFFEFD
DT_MOVETAB = 0x6FFFFEFE
DT_SYMINFO = 0x6FFFFEFF
DT_ADDRRNGHI = 0x6FFFFEFF
DT_ADDRNUM = 10
DF_ORIGIN = 0x1
DF_SYMBOLIC = 0x2
DF_TEXTREL = 0x4
DF_BIND_NOW = 0x8
DF_STATIC_TLS = 0x10
@StructDefine(
"""
I: l_name
I: l_time_stamp
I: l_checksum
I: l_version
I: l_flags
"""
)
class Lib(StructFormatter):
def __init__(self, data=None, offset=0, order=None, x64=False):
if order:
for f in self.fields:
f.order = order
self.flag_formatter("l_flags")
if data:
self.unpack(data, offset)
with Consts("l_flags"):
LL_IGNORE_INT_VER = 1 << 1
LL_EXACT_MATCH = 1 << 0
LL_REQUIRE_MINOR = 1 << 2
LL_NONE = 0x0
LL_DELTA = 1 << 5
LL_EXPORTS = 1 << 3
LL_DELAY_LOAD = 1 << 4
# String Table entry; provided to deal with C strings and char indexed
# string table sections. This is not a standard structure, it is more
# like a C-string Array class for python.
# ------------------------------------------------------------------------------
class StrTable(object):
def __init__(self, data, x64=False):
self.data = data
self.x64 = x64
def __getitem__(self, i):
z = self.data[i:].index(b"\0")
return self.data[i : i + z]
def as_dict(self):
D = {}
cstrings = self.data.split(b"\0")
p = 0
for cs in cstrings:
D[p] = cs
p += len(cs) + 1
return D
def __str__(self):
fmt = "0x%" + "%02dx: %%s" % (16 if self.x64 else 8)
return "\n".join((fmt % (k, v) for (k, v) in iter(self.as_dict().items())))
| bdcht/amoco | amoco/system/elf.py | Python | gpl-2.0 | 33,586 |
from bs4 import BeautifulSoup
import requests
import re
import urllib2
import os
import cookielib
import json
def main():
i = 0
total = len(os.listdir('car_photos'))
for d in os.listdir('car_photos'):
if d == '.DS_Store':
continue
td = 'car_photos/' + d
kw = d
#print('Keyword: %s\nNum Images: %d\nProgress: %d of %d' % (kw,len(os.listdir(td)), i, total))
print('Num Images: %d' % len(os.listdir(td)))
i += 1
if len(os.listdir(td)) < 0:
image_search(kw)
def get_soup(url,header):
return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),'html.parser')
def image_search(keyword):
query = keyword # you can change the query for the image here
image_type="ActiOn"
query= query.split()
query='+'.join(query)
url="https://www.google.com/search?q="+query+"&source=lnms&tbm=isch"
#print url
#add the directory for your image here
DIR="car_photos"
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
}
soup = get_soup(url,header)
ActualImages=[]# contains the link for Large original images, type of image
for a in soup.find_all("div",{"class":"rg_meta"}):
link , Type =json.loads(a.text)["ou"] ,json.loads(a.text)["ity"]
ActualImages.append((link,Type))
#print "there are total" , len(ActualImages),"images"
if not os.path.exists(DIR):
os.mkdir(DIR)
DIR = os.path.join(DIR, query.split()[0])
if not os.path.exists(DIR):
os.mkdir(DIR)
###print images
for i , (img , Type) in enumerate( ActualImages):
try:
req = urllib2.Request(img, headers={'User-Agent' : header})
raw_img = urllib2.urlopen(req).read()
cntr = len([i for i in os.listdir(DIR) if image_type in i]) + 1
#print cntr
if len(Type)==0:
f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+".jpg"), 'wb')
else :
f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+"."+Type), 'wb')
f.write(raw_img)
f.close()
except Exception as e:
pass
#print "could not load : "+img
#print e
main()
| gioGats/FGIC | yang_et_al/misc/image_search.py | Python | gpl-3.0 | 2,356 |
class Solution(object):
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
if num != 0 and num % 9 == 0:
return 9
else:
return num % 9
| Mlieou/leetcode_python | leetcode/python/ex_258.py | Python | mit | 221 |
# Import submodules so that we can expose their __all__
from mongoengine import connection
from mongoengine import document
from mongoengine import errors
from mongoengine import fields
from mongoengine import queryset
from mongoengine import signals
# Import everything from each submodule so that it can be accessed via
# mongoengine, e.g. instead of `from mongoengine.connection import connect`,
# users can simply use `from mongoengine import connect`, or even
# `from mongoengine import *` and then `connect('testdb')`.
from mongoengine.connection import *
from mongoengine.document import *
from mongoengine.errors import *
from mongoengine.fields import *
from mongoengine.queryset import *
from mongoengine.signals import *
__all__ = (list(document.__all__) + list(fields.__all__) +
list(connection.__all__) + list(queryset.__all__) +
list(signals.__all__) + list(errors.__all__))
VERSION = (0, 13, 0)
def get_version():
"""Return the VERSION as a string, e.g. for VERSION == (0, 10, 7),
return '0.10.7'.
"""
return '.'.join(map(str, VERSION))
__version__ = get_version()
| Pablo126/SSBW | Entrega1/lib/python3.5/site-packages/mongoengine/__init__.py | Python | gpl-3.0 | 1,127 |
from time import time
t= time()
def spiral(n,a,gid):
s=0
for i in range(1,gid//2):
a+=8
n+=a
s+=n
return s
# bottom right
n = 1001
br = spiral(3,2,n)
#bottom left
bl = spiral(5,4,n)
#top right
tr = spiral(9,8,n)
#top left
tl = spiral(7,6,n)
s= br+bl+tl+tr + 1 + 3 + 5 + 7 + 9
t = time()-t
print(s)
print("{0:.6}s total time.".format(t))
| potato16/pythonl | projecteuler/p028.py | Python | apache-2.0 | 350 |
from django import forms
from models import UserVote
class UserVoteForm(forms.ModelForm):
class Meta:
model = UserVote
exclude = ('user', 'date')
| django-stars/dash2011 | presence/apps/vote/forms.py | Python | bsd-3-clause | 169 |
import pandas as pd
from larray.core.array import Array
from larray.inout.pandas import from_frame
__all__ = ['read_stata']
def read_stata(filepath_or_buffer, index_col=None, sort_rows=False, sort_columns=False, **kwargs) -> Array:
r"""
Reads Stata .dta file and returns an Array with the contents
Parameters
----------
filepath_or_buffer : str or file-like object
Path to .dta file or a file handle.
index_col : str or None, optional
Name of column to set as index. Defaults to None.
sort_rows : bool, optional
Whether or not to sort the rows alphabetically (sorting is more efficient than not sorting).
This only makes sense in combination with index_col. Defaults to False.
sort_columns : bool, optional
Whether or not to sort the columns alphabetically (sorting is more efficient than not sorting).
Defaults to False.
Returns
-------
Array
See Also
--------
Array.to_stata
Notes
-----
The round trip to Stata (Array.to_stata followed by read_stata) loose the name of the "column" axis.
Examples
--------
>>> read_stata('test.dta') # doctest: +SKIP
{0}\{1} row country sex
0 0 BE F
1 1 FR M
2 2 FR F
>>> read_stata('test.dta', index_col='row') # doctest: +SKIP
row\{1} country sex
0 BE F
1 FR M
2 FR F
"""
df = pd.read_stata(filepath_or_buffer, index_col=index_col, **kwargs)
return from_frame(df, sort_rows=sort_rows, sort_columns=sort_columns)
| gdementen/larray | larray/inout/stata.py | Python | gpl-3.0 | 1,657 |
from __future__ import with_statement
from itertools import chain
import datetime
import sys
import warnings
import time
import threading
import time as mod_time
from redis._compat import (b, basestring, bytes, imap, iteritems, iterkeys,
itervalues, izip, long, nativestr, unicode,
safe_unicode)
from redis.connection import (ConnectionPool, UnixDomainSocketConnection,
SSLConnection, Token)
from redis.lock import Lock, LuaLock
from redis.exceptions import (
ConnectionError,
DataError,
ExecAbortError,
NoScriptError,
PubSubError,
RedisError,
ResponseError,
TimeoutError,
WatchError,
)
SYM_EMPTY = b('')
def list_or_args(keys, args):
# returns a single list combining keys and args
try:
iter(keys)
# a string or bytes instance can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, (basestring, bytes)):
keys = [keys]
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys
def timestamp_to_datetime(response):
"Converts a unix timestamp to a Python datetime object"
if not response:
return None
try:
response = int(response)
except ValueError:
return None
return datetime.datetime.fromtimestamp(response)
def string_keys_to_dict(key_string, callback):
return dict.fromkeys(key_string.split(), callback)
def dict_merge(*dicts):
merged = {}
for d in dicts:
merged.update(d)
return merged
def parse_debug_object(response):
"Parse the results of Redis's DEBUG OBJECT command into a Python dict"
# The 'type' of the object is the first item in the response, but isn't
# prefixed with a name
response = nativestr(response)
response = 'type:' + response
response = dict([kv.split(':') for kv in response.split()])
# parse some expected int values from the string response
# note: this cmd isn't spec'd so these may not appear in all redis versions
int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle')
for field in int_fields:
if field in response:
response[field] = int(response[field])
return response
def parse_object(response, infotype):
"Parse the results of an OBJECT command"
if infotype in ('idletime', 'refcount'):
return int_or_none(response)
return response
def parse_info(response):
"Parse the result of Redis's INFO command into a Python dict"
info = {}
response = nativestr(response)
def get_value(value):
if ',' not in value or '=' not in value:
try:
if '.' in value:
return float(value)
else:
return int(value)
except ValueError:
return value
else:
sub_dict = {}
for item in value.split(','):
k, v = item.rsplit('=', 1)
sub_dict[k] = get_value(v)
return sub_dict
for line in response.splitlines():
if line and not line.startswith('#'):
if line.find(':') != -1:
key, value = line.split(':', 1)
info[key] = get_value(value)
else:
# if the line isn't splittable, append it to the "__raw__" key
info.setdefault('__raw__', []).append(line)
return info
SENTINEL_STATE_TYPES = {
'can-failover-its-master': int,
'config-epoch': int,
'down-after-milliseconds': int,
'failover-timeout': int,
'info-refresh': int,
'last-hello-message': int,
'last-ok-ping-reply': int,
'last-ping-reply': int,
'last-ping-sent': int,
'master-link-down-time': int,
'master-port': int,
'num-other-sentinels': int,
'num-slaves': int,
'o-down-time': int,
'pending-commands': int,
'parallel-syncs': int,
'port': int,
'quorum': int,
'role-reported-time': int,
's-down-time': int,
'slave-priority': int,
'slave-repl-offset': int,
'voted-leader-epoch': int
}
def parse_sentinel_state(item):
result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)
flags = set(result['flags'].split(','))
for name, flag in (('is_master', 'master'), ('is_slave', 'slave'),
('is_sdown', 's_down'), ('is_odown', 'o_down'),
('is_sentinel', 'sentinel'),
('is_disconnected', 'disconnected'),
('is_master_down', 'master_down')):
result[name] = flag in flags
return result
def parse_sentinel_master(response):
return parse_sentinel_state(imap(nativestr, response))
def parse_sentinel_masters(response):
result = {}
for item in response:
state = parse_sentinel_state(imap(nativestr, item))
result[state['name']] = state
return result
def parse_sentinel_slaves_and_sentinels(response):
return [parse_sentinel_state(imap(nativestr, item)) for item in response]
def parse_sentinel_get_master(response):
return response and (response[0], int(response[1])) or None
def pairs_to_dict(response):
"Create a dict given a list of key/value pairs"
it = iter(response)
return dict(izip(it, it))
def pairs_to_dict_typed(response, type_info):
it = iter(response)
result = {}
for key, value in izip(it, it):
if key in type_info:
try:
value = type_info[key](value)
except:
# if for some reason the value can't be coerced, just use
# the string value
pass
result[key] = value
return result
def zset_score_pairs(response, **options):
"""
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
if not response or not options['withscores']:
return response
score_cast_func = options.get('score_cast_func', float)
it = iter(response)
return list(izip(it, imap(score_cast_func, it)))
def sort_return_tuples(response, **options):
"""
If ``groups`` is specified, return the response as a list of
n-element tuples with n being the value found in options['groups']
"""
if not response or not options['groups']:
return response
n = options['groups']
return list(izip(*[response[i::n] for i in range(n)]))
def int_or_none(response):
if response is None:
return None
return int(response)
def float_or_none(response):
if response is None:
return None
return float(response)
def bool_ok(response):
return nativestr(response) == 'OK'
def parse_client_list(response, **options):
clients = []
for c in nativestr(response).splitlines():
clients.append(dict([pair.split('=') for pair in c.split(' ')]))
return clients
def parse_config_get(response, **options):
response = [nativestr(i) if i is not None else None for i in response]
return response and pairs_to_dict(response) or {}
def parse_scan(response, **options):
cursor, r = response
return long(cursor), r
def parse_hscan(response, **options):
cursor, r = response
return long(cursor), r and pairs_to_dict(r) or {}
def parse_zscan(response, **options):
score_cast_func = options.get('score_cast_func', float)
cursor, r = response
it = iter(r)
return long(cursor), list(izip(it, imap(score_cast_func, it)))
def parse_slowlog_get(response, **options):
return [{
'id': item[0],
'start_time': int(item[1]),
'duration': int(item[2]),
'command': b(' ').join(item[3])
} for item in response]
class StrictRedis(object):
"""
Implementation of the Redis protocol.
This abstract class provides a Python interface to all Redis commands
and an implementation of the Redis protocol.
Connection and Pipeline derive from this, implementing how
the commands are sent and received to the Redis server
"""
RESPONSE_CALLBACKS = dict_merge(
string_keys_to_dict(
'AUTH EXISTS EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST '
'PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX',
bool
),
string_keys_to_dict(
'BITCOUNT BITPOS DECRBY DEL GETBIT HDEL HLEN INCRBY LINSERT LLEN '
'LPUSHX PFADD PFCOUNT RPUSHX SADD SCARD SDIFFSTORE SETBIT '
'SETRANGE SINTERSTORE SREM STRLEN SUNIONSTORE ZADD ZCARD '
'ZLEXCOUNT ZREM ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE',
int
),
string_keys_to_dict('INCRBYFLOAT HINCRBYFLOAT', float),
string_keys_to_dict(
# these return OK, or int if redis-server is >=1.3.4
'LPUSH RPUSH',
lambda r: isinstance(r, long) and r or nativestr(r) == 'OK'
),
string_keys_to_dict('SORT', sort_return_tuples),
string_keys_to_dict('ZSCORE ZINCRBY', float_or_none),
string_keys_to_dict(
'FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE RENAME '
'SAVE SELECT SHUTDOWN SLAVEOF WATCH UNWATCH',
bool_ok
),
string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None),
string_keys_to_dict(
'SDIFF SINTER SMEMBERS SUNION',
lambda r: r and set(r) or set()
),
string_keys_to_dict(
'ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE',
zset_score_pairs
),
string_keys_to_dict('ZRANK ZREVRANK', int_or_none),
string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True),
{
'CLIENT GETNAME': lambda r: r and nativestr(r),
'CLIENT KILL': bool_ok,
'CLIENT LIST': parse_client_list,
'CLIENT SETNAME': bool_ok,
'CONFIG GET': parse_config_get,
'CONFIG RESETSTAT': bool_ok,
'CONFIG SET': bool_ok,
'DEBUG OBJECT': parse_debug_object,
'HGETALL': lambda r: r and pairs_to_dict(r) or {},
'HSCAN': parse_hscan,
'INFO': parse_info,
'LASTSAVE': timestamp_to_datetime,
'OBJECT': parse_object,
'PING': lambda r: nativestr(r) == 'PONG',
'RANDOMKEY': lambda r: r and r or None,
'SCAN': parse_scan,
'SCRIPT EXISTS': lambda r: list(imap(bool, r)),
'SCRIPT FLUSH': bool_ok,
'SCRIPT KILL': bool_ok,
'SCRIPT LOAD': nativestr,
'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master,
'SENTINEL MASTER': parse_sentinel_master,
'SENTINEL MASTERS': parse_sentinel_masters,
'SENTINEL MONITOR': bool_ok,
'SENTINEL REMOVE': bool_ok,
'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels,
'SENTINEL SET': bool_ok,
'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels,
'SET': lambda r: r and nativestr(r) == 'OK',
'SLOWLOG GET': parse_slowlog_get,
'SLOWLOG LEN': int,
'SLOWLOG RESET': bool_ok,
'SSCAN': parse_scan,
'TIME': lambda x: (int(x[0]), int(x[1])),
'ZSCAN': parse_zscan
}
)
@classmethod
def from_url(cls, url, db=None, **kwargs):
"""
Return a Redis client object configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
connection_pool = ConnectionPool.from_url(url, db=db, **kwargs)
return cls(connection_pool=connection_pool)
def __init__(self, host='localhost', port=6379,
db=0, password=None, socket_timeout=None,
socket_connect_timeout=None,
socket_keepalive=None, socket_keepalive_options=None,
connection_pool=None, unix_socket_path=None,
encoding='utf-8', encoding_errors='strict',
charset=None, errors=None,
decode_responses=False, retry_on_timeout=False,
ssl=False, ssl_keyfile=None, ssl_certfile=None,
ssl_cert_reqs=None, ssl_ca_certs=None,
max_connections=None):
if not connection_pool:
if charset is not None:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
encoding = charset
if errors is not None:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
encoding_errors = errors
kwargs = {
'db': db,
'password': password,
'socket_timeout': socket_timeout,
'encoding': encoding,
'encoding_errors': encoding_errors,
'decode_responses': decode_responses,
'retry_on_timeout': retry_on_timeout,
'max_connections': max_connections
}
# based on input, setup appropriate connection args
if unix_socket_path is not None:
kwargs.update({
'path': unix_socket_path,
'connection_class': UnixDomainSocketConnection
})
else:
# TCP specific options
kwargs.update({
'host': host,
'port': port,
'socket_connect_timeout': socket_connect_timeout,
'socket_keepalive': socket_keepalive,
'socket_keepalive_options': socket_keepalive_options,
})
if ssl:
kwargs.update({
'connection_class': SSLConnection,
'ssl_keyfile': ssl_keyfile,
'ssl_certfile': ssl_certfile,
'ssl_cert_reqs': ssl_cert_reqs,
'ssl_ca_certs': ssl_ca_certs,
})
connection_pool = ConnectionPool(**kwargs)
self.connection_pool = connection_pool
self._use_lua_lock = None
self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy()
def __repr__(self):
return "%s<%s>" % (type(self).__name__, repr(self.connection_pool))
def set_response_callback(self, command, callback):
"Set a custom Response Callback"
self.response_callbacks[command] = callback
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return StrictPipeline(
self.connection_pool,
self.response_callbacks,
transaction,
shard_hint)
def transaction(self, func, *watches, **kwargs):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single argument which is a Pipeline object.
"""
shard_hint = kwargs.pop('shard_hint', None)
value_from_callable = kwargs.pop('value_from_callable', False)
watch_delay = kwargs.pop('watch_delay', None)
with self.pipeline(True, shard_hint) as pipe:
while 1:
try:
if watches:
pipe.watch(*watches)
func_value = func(pipe)
exec_value = pipe.execute()
return func_value if value_from_callable else exec_value
except WatchError:
if watch_delay is not None and watch_delay > 0:
time.sleep(watch_delay)
continue
def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None,
lock_class=None, thread_local=True):
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``lock_class`` forces the specified lock implementation.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage. """
if lock_class is None:
if self._use_lua_lock is None:
# the first time .lock() is called, determine if we can use
# Lua by attempting to register the necessary scripts
try:
LuaLock.register_scripts(self)
self._use_lua_lock = True
except ResponseError:
self._use_lua_lock = False
lock_class = self._use_lua_lock and LuaLock or Lock
return lock_class(self, name, timeout=timeout, sleep=sleep,
blocking_timeout=blocking_timeout,
thread_local=thread_local)
def pubsub(self, **kwargs):
"""
Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.
"""
return PubSub(self.connection_pool, **kwargs)
# COMMAND EXECUTION AND PROTOCOL PARSING
def execute_command(self, *args, **options):
"Execute a command and return a parsed response"
pool = self.connection_pool
command_name = args[0]
connection = pool.get_connection(command_name, **options)
try:
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if not connection.retry_on_timeout and isinstance(e, TimeoutError):
raise
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
finally:
pool.release(connection)
def parse_response(self, connection, command_name, **options):
"Parses a response from the Redis server"
response = connection.read_response()
if command_name in self.response_callbacks:
return self.response_callbacks[command_name](response, **options)
return response
# SERVER INFORMATION
def bgrewriteaof(self):
"Tell the Redis server to rewrite the AOF file from data in memory."
return self.execute_command('BGREWRITEAOF')
def bgsave(self):
"""
Tell the Redis server to save its data to disk. Unlike save(),
this method is asynchronous and returns immediately.
"""
return self.execute_command('BGSAVE')
def client_kill(self, address):
"Disconnects the client at ``address`` (ip:port)"
return self.execute_command('CLIENT KILL', address)
def client_list(self):
"Returns a list of currently connected clients"
return self.execute_command('CLIENT LIST')
def client_getname(self):
"Returns the current connection name"
return self.execute_command('CLIENT GETNAME')
def client_setname(self, name):
"Sets the current connection name"
return self.execute_command('CLIENT SETNAME', name)
def config_get(self, pattern="*"):
"Return a dictionary of configuration based on the ``pattern``"
return self.execute_command('CONFIG GET', pattern)
def config_set(self, name, value):
"Set config item ``name`` with ``value``"
return self.execute_command('CONFIG SET', name, value)
def config_resetstat(self):
"Reset runtime statistics"
return self.execute_command('CONFIG RESETSTAT')
def config_rewrite(self):
"Rewrite config file with the minimal change to reflect running config"
return self.execute_command('CONFIG REWRITE')
def dbsize(self):
"Returns the number of keys in the current database"
return self.execute_command('DBSIZE')
def debug_object(self, key):
"Returns version specific meta information about a given key"
return self.execute_command('DEBUG OBJECT', key)
def echo(self, value):
"Echo the string back from the server"
return self.execute_command('ECHO', value)
def flushall(self):
"Delete all keys in all databases on the current host"
return self.execute_command('FLUSHALL')
def flushdb(self):
"Delete all keys in the current database"
return self.execute_command('FLUSHDB')
def info(self, section=None):
"""
Returns a dictionary containing information about the Redis server
The ``section`` option can be used to select a specific section
of information
The section option is not supported by older versions of Redis Server,
and will generate ResponseError
"""
if section is None:
return self.execute_command('INFO')
else:
return self.execute_command('INFO', section)
def lastsave(self):
"""
Return a Python datetime object representing the last time the
Redis database was saved to disk
"""
return self.execute_command('LASTSAVE')
def object(self, infotype, key):
"Return the encoding, idletime, or refcount about the key"
return self.execute_command('OBJECT', infotype, key, infotype=infotype)
def ping(self):
"Ping the Redis server"
return self.execute_command('PING')
def save(self):
"""
Tell the Redis server to save its data to disk,
blocking until the save is complete
"""
return self.execute_command('SAVE')
def sentinel(self, *args):
"Redis Sentinel's SENTINEL command."
warnings.warn(
DeprecationWarning('Use the individual sentinel_* methods'))
def sentinel_get_master_addr_by_name(self, service_name):
"Returns a (host, port) pair for the given ``service_name``"
return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME',
service_name)
def sentinel_master(self, service_name):
"Returns a dictionary containing the specified masters state."
return self.execute_command('SENTINEL MASTER', service_name)
def sentinel_masters(self):
"Returns a list of dictionaries containing each master's state."
return self.execute_command('SENTINEL MASTERS')
def sentinel_monitor(self, name, ip, port, quorum):
"Add a new master to Sentinel to be monitored"
return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
def sentinel_remove(self, name):
"Remove a master from Sentinel's monitoring"
return self.execute_command('SENTINEL REMOVE', name)
def sentinel_sentinels(self, service_name):
"Returns a list of sentinels for ``service_name``"
return self.execute_command('SENTINEL SENTINELS', service_name)
def sentinel_set(self, name, option, value):
"Set Sentinel monitoring parameters for a given master"
return self.execute_command('SENTINEL SET', name, option, value)
def sentinel_slaves(self, service_name):
"Returns a list of slaves for ``service_name``"
return self.execute_command('SENTINEL SLAVES', service_name)
def shutdown(self):
"Shutdown the server"
try:
self.execute_command('SHUTDOWN')
except ConnectionError:
# a ConnectionError here is expected
return
raise RedisError("SHUTDOWN seems to have failed.")
def slaveof(self, host=None, port=None):
"""
Set the server to be a replicated slave of the instance identified
by the ``host`` and ``port``. If called without arguments, the
instance is promoted to a master instead.
"""
if host is None and port is None:
return self.execute_command('SLAVEOF', Token('NO'), Token('ONE'))
return self.execute_command('SLAVEOF', host, port)
def slowlog_get(self, num=None):
"""
Get the entries from the slowlog. If ``num`` is specified, get the
most recent ``num`` items.
"""
args = ['SLOWLOG GET']
if num is not None:
args.append(num)
return self.execute_command(*args)
def slowlog_len(self):
"Get the number of items in the slowlog"
return self.execute_command('SLOWLOG LEN')
def slowlog_reset(self):
"Remove all items in the slowlog"
return self.execute_command('SLOWLOG RESET')
def time(self):
"""
Returns the server time as a 2-item tuple of ints:
(seconds since epoch, microseconds into this second).
"""
return self.execute_command('TIME')
def wait(self, num_replicas, timeout):
"""
Redis synchronous replication
That returns the number of replicas that processed the query when
we finally have at least ``num_replicas``, or when the ``timeout`` was
reached.
"""
return self.execute_command('WAIT', num_replicas, timeout)
# BASIC KEY COMMANDS
def append(self, key, value):
"""
Appends the string ``value`` to the value at ``key``. If ``key``
doesn't already exist, create it with a value of ``value``.
Returns the new length of the value at ``key``.
"""
return self.execute_command('APPEND', key, value)
def bitcount(self, key, start=None, end=None):
"""
Returns the count of set bits in the value of ``key``. Optional
``start`` and ``end`` paramaters indicate which bytes to consider
"""
params = [key]
if start is not None and end is not None:
params.append(start)
params.append(end)
elif (start is not None and end is None) or \
(end is not None and start is None):
raise RedisError("Both start and end must be specified")
return self.execute_command('BITCOUNT', *params)
def bitop(self, operation, dest, *keys):
"""
Perform a bitwise operation using ``operation`` between ``keys`` and
store the result in ``dest``.
"""
return self.execute_command('BITOP', operation, dest, *keys)
def bitpos(self, key, bit, start=None, end=None):
"""
Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.
"""
if bit not in (0, 1):
raise RedisError('bit must be 0 or 1')
params = [key, bit]
start is not None and params.append(start)
if start is not None and end is not None:
params.append(end)
elif start is None and end is not None:
raise RedisError("start argument is not set, "
"when end is specified")
return self.execute_command('BITPOS', *params)
def decr(self, name, amount=1):
"""
Decrements the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as 0 - ``amount``
"""
return self.execute_command('DECRBY', name, amount)
def delete(self, *names):
"Delete one or more keys specified by ``names``"
return self.execute_command('DEL', *names)
def __delitem__(self, name):
self.delete(name)
def dump(self, name):
"""
Return a serialized version of the value stored at the specified key.
If key does not exist a nil bulk reply is returned.
"""
return self.execute_command('DUMP', name)
def exists(self, name):
"Returns a boolean indicating whether key ``name`` exists"
return self.execute_command('EXISTS', name)
__contains__ = exists
def expire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` seconds. ``time``
can be represented by an integer or a Python timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return self.execute_command('EXPIRE', name, time)
def expireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer indicating unix time or a Python datetime object.
"""
if isinstance(when, datetime.datetime):
when = int(mod_time.mktime(when.timetuple()))
return self.execute_command('EXPIREAT', name, when)
def get(self, name):
"""
Return the value at key ``name``, or None if the key doesn't exist
"""
return self.execute_command('GET', name)
def __getitem__(self, name):
"""
Return the value at key ``name``, raises a KeyError if the key
doesn't exist.
"""
value = self.get(name)
if value is not None:
return value
raise KeyError(name)
def getbit(self, name, offset):
"Returns a boolean indicating the value of ``offset`` in ``name``"
return self.execute_command('GETBIT', name, offset)
def getrange(self, key, start, end):
"""
Returns the substring of the string value stored at ``key``,
determined by the offsets ``start`` and ``end`` (both are inclusive)
"""
return self.execute_command('GETRANGE', key, start, end)
def getset(self, name, value):
"""
Sets the value at key ``name`` to ``value``
and returns the old value at key ``name`` atomically.
"""
return self.execute_command('GETSET', name, value)
def incr(self, name, amount=1):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
return self.execute_command('INCRBY', name, amount)
def incrby(self, name, amount=1):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
# An alias for ``incr()``, because it is already implemented
# as INCRBY redis command.
return self.incr(name, amount)
def incrbyfloat(self, name, amount=1.0):
"""
Increments the value at key ``name`` by floating ``amount``.
If no key exists, the value will be initialized as ``amount``
"""
return self.execute_command('INCRBYFLOAT', name, amount)
def keys(self, pattern='*'):
"Returns a list of keys matching ``pattern``"
return self.execute_command('KEYS', pattern)
def mget(self, keys, *args):
"""
Returns a list of values ordered identically to ``keys``
"""
args = list_or_args(keys, args)
return self.execute_command('MGET', *args)
def mset(self, *args, **kwargs):
"""
Sets key/values based on a mapping. Mapping can be supplied as a single
dictionary argument or as kwargs.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSET requires **kwargs or a single dict arg')
kwargs.update(args[0])
items = []
for pair in iteritems(kwargs):
items.extend(pair)
return self.execute_command('MSET', *items)
def msetnx(self, *args, **kwargs):
"""
Sets key/values based on a mapping if none of the keys are already set.
Mapping can be supplied as a single dictionary argument or as kwargs.
Returns a boolean indicating if the operation was successful.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSETNX requires **kwargs or a single '
'dict arg')
kwargs.update(args[0])
items = []
for pair in iteritems(kwargs):
items.extend(pair)
return self.execute_command('MSETNX', *items)
def move(self, name, db):
"Moves the key ``name`` to a different Redis database ``db``"
return self.execute_command('MOVE', name, db)
def persist(self, name):
"Removes an expiration on ``name``"
return self.execute_command('PERSIST', name)
def pexpire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` milliseconds.
``time`` can be represented by an integer or a Python timedelta
object.
"""
if isinstance(time, datetime.timedelta):
ms = int(time.microseconds / 1000)
time = (time.seconds + time.days * 24 * 3600) * 1000 + ms
return self.execute_command('PEXPIRE', name, time)
def pexpireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer representing unix time in milliseconds (unix time * 1000)
or a Python datetime object.
"""
if isinstance(when, datetime.datetime):
ms = int(when.microsecond / 1000)
when = int(mod_time.mktime(when.timetuple())) * 1000 + ms
return self.execute_command('PEXPIREAT', name, when)
def psetex(self, name, time_ms, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object
"""
if isinstance(time_ms, datetime.timedelta):
ms = int(time_ms.microseconds / 1000)
time_ms = (time_ms.seconds + time_ms.days * 24 * 3600) * 1000 + ms
return self.execute_command('PSETEX', name, time_ms, value)
def pttl(self, name):
"Returns the number of milliseconds until the key ``name`` will expire"
return self.execute_command('PTTL', name)
def randomkey(self):
"Returns the name of a random key"
return self.execute_command('RANDOMKEY')
def rename(self, src, dst):
"""
Rename key ``src`` to ``dst``
"""
return self.execute_command('RENAME', src, dst)
def renamenx(self, src, dst):
"Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist"
return self.execute_command('RENAMENX', src, dst)
def restore(self, name, ttl, value):
"""
Create a key using the provided serialized value, previously obtained
using DUMP.
"""
return self.execute_command('RESTORE', name, ttl, value)
def set(self, name, value, ex=None, px=None, nx=False, xx=False):
"""
Set the value at key ``name`` to ``value``
``ex`` sets an expire flag on key ``name`` for ``ex`` seconds.
``px`` sets an expire flag on key ``name`` for ``px`` milliseconds.
``nx`` if set to True, set the value at key ``name`` to ``value`` if it
does not already exist.
``xx`` if set to True, set the value at key ``name`` to ``value`` if it
already exists.
"""
pieces = [name, value]
if ex:
pieces.append('EX')
if isinstance(ex, datetime.timedelta):
ex = ex.seconds + ex.days * 24 * 3600
pieces.append(ex)
if px:
pieces.append('PX')
if isinstance(px, datetime.timedelta):
ms = int(px.microseconds / 1000)
px = (px.seconds + px.days * 24 * 3600) * 1000 + ms
pieces.append(px)
if nx:
pieces.append('NX')
if xx:
pieces.append('XX')
return self.execute_command('SET', *pieces)
def __setitem__(self, name, value):
self.set(name, value)
def setbit(self, name, offset, value):
"""
Flag the ``offset`` in ``name`` as ``value``. Returns a boolean
indicating the previous value of ``offset``.
"""
value = value and 1 or 0
return self.execute_command('SETBIT', name, offset, value)
def setex(self, name, time, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return self.execute_command('SETEX', name, time, value)
def setnx(self, name, value):
"Set the value of key ``name`` to ``value`` if key doesn't exist"
return self.execute_command('SETNX', name, value)
def setrange(self, name, offset, value):
"""
Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
"""
return self.execute_command('SETRANGE', name, offset, value)
def strlen(self, name):
"Return the number of bytes stored in the value of ``name``"
return self.execute_command('STRLEN', name)
def substr(self, name, start, end=-1):
"""
Return a substring of the string at key ``name``. ``start`` and ``end``
are 0-based integers specifying the portion of the string to return.
"""
return self.execute_command('SUBSTR', name, start, end)
def ttl(self, name):
"Returns the number of seconds until the key ``name`` will expire"
return self.execute_command('TTL', name)
def type(self, name):
"Returns the type of key ``name``"
return self.execute_command('TYPE', name)
def watch(self, *names):
"""
Watches the values at keys ``names``, or None if the key doesn't exist
"""
warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object'))
def unwatch(self):
"""
Unwatches the value at key ``name``, or None of the key doesn't exist
"""
warnings.warn(
DeprecationWarning('Call UNWATCH from a Pipeline object'))
# LIST COMMANDS
def blpop(self, keys, timeout=0):
"""
LPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
keys.append(timeout)
return self.execute_command('BLPOP', *keys)
def brpop(self, keys, timeout=0):
"""
RPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
keys.append(timeout)
return self.execute_command('BRPOP', *keys)
def brpoplpush(self, src, dst, timeout=0):
"""
Pop a value off the tail of ``src``, push it on the head of ``dst``
and then return it.
This command blocks until a value is in ``src`` or until ``timeout``
seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
forever.
"""
if timeout is None:
timeout = 0
return self.execute_command('BRPOPLPUSH', src, dst, timeout)
def lindex(self, name, index):
"""
Return the item from list ``name`` at position ``index``
Negative indexes are supported and will return an item at the
end of the list
"""
return self.execute_command('LINDEX', name, index)
def linsert(self, name, where, refvalue, value):
"""
Insert ``value`` in list ``name`` either immediately before or after
[``where``] ``refvalue``
Returns the new length of the list on success or -1 if ``refvalue``
is not in the list.
"""
return self.execute_command('LINSERT', name, where, refvalue, value)
def llen(self, name):
"Return the length of the list ``name``"
return self.execute_command('LLEN', name)
def lpop(self, name):
"Remove and return the first item of the list ``name``"
return self.execute_command('LPOP', name)
def lpush(self, name, *values):
"Push ``values`` onto the head of the list ``name``"
return self.execute_command('LPUSH', name, *values)
def lpushx(self, name, value):
"Push ``value`` onto the head of the list ``name`` if ``name`` exists"
return self.execute_command('LPUSHX', name, value)
def lrange(self, name, start, end):
"""
Return a slice of the list ``name`` between
position ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LRANGE', name, start, end)
def lrem(self, name, count, value):
"""
Remove the first ``count`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The count argument influences the operation in the following ways:
count > 0: Remove elements equal to value moving from head to tail.
count < 0: Remove elements equal to value moving from tail to head.
count = 0: Remove all elements equal to value.
"""
return self.execute_command('LREM', name, count, value)
def lset(self, name, index, value):
"Set ``position`` of list ``name`` to ``value``"
return self.execute_command('LSET', name, index, value)
def ltrim(self, name, start, end):
"""
Trim the list ``name``, removing all values not within the slice
between ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LTRIM', name, start, end)
def rpop(self, name):
"Remove and return the last item of the list ``name``"
return self.execute_command('RPOP', name)
def rpoplpush(self, src, dst):
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
"""
return self.execute_command('RPOPLPUSH', src, dst)
def rpush(self, name, *values):
"Push ``values`` onto the tail of the list ``name``"
return self.execute_command('RPUSH', name, *values)
def rpushx(self, name, value):
"Push ``value`` onto the tail of the list ``name`` if ``name`` exists"
return self.execute_command('RPUSHX', name, value)
def sort(self, name, start=None, num=None, by=None, get=None,
desc=False, alpha=False, store=None, groups=False):
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = [name]
if by is not None:
pieces.append(Token('BY'))
pieces.append(by)
if start is not None and num is not None:
pieces.append(Token('LIMIT'))
pieces.append(start)
pieces.append(num)
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, basestring):
pieces.append(Token('GET'))
pieces.append(get)
else:
for g in get:
pieces.append(Token('GET'))
pieces.append(g)
if desc:
pieces.append(Token('DESC'))
if alpha:
pieces.append(Token('ALPHA'))
if store is not None:
pieces.append(Token('STORE'))
pieces.append(store)
if groups:
if not get or isinstance(get, basestring) or len(get) < 2:
raise DataError('when using "groups" the "get" argument '
'must be specified and contain at least '
'two keys')
options = {'groups': len(get) if groups else None}
return self.execute_command('SORT', *pieces, **options)
# SCAN COMMANDS
def scan(self, cursor=0, match=None, count=None):
"""
Incrementally return lists of key names. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
return self.execute_command('SCAN', *pieces)
def scan_iter(self, match=None, count=None):
"""
Make an iterator using the SCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.scan(cursor=cursor, match=match, count=count)
for item in data:
yield item
def sscan(self, name, cursor=0, match=None, count=None):
"""
Incrementally return lists of elements in a set. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
return self.execute_command('SSCAN', *pieces)
def sscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the SSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.sscan(name, cursor=cursor,
match=match, count=count)
for item in data:
yield item
def hscan(self, name, cursor=0, match=None, count=None):
"""
Incrementally return key/value slices in a hash. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
return self.execute_command('HSCAN', *pieces)
def hscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the HSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.hscan(name, cursor=cursor,
match=match, count=count)
for item in data.items():
yield item
def zscan(self, name, cursor=0, match=None, count=None,
score_cast_func=float):
"""
Incrementally return lists of elements in a sorted set. Also return a
cursor indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
options = {'score_cast_func': score_cast_func}
return self.execute_command('ZSCAN', *pieces, **options)
def zscan_iter(self, name, match=None, count=None,
score_cast_func=float):
"""
Make an iterator using the ZSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
cursor = '0'
while cursor != 0:
cursor, data = self.zscan(name, cursor=cursor, match=match,
count=count,
score_cast_func=score_cast_func)
for item in data:
yield item
# SET COMMANDS
def sadd(self, name, *values):
"Add ``value(s)`` to set ``name``"
return self.execute_command('SADD', name, *values)
def scard(self, name):
"Return the number of elements in set ``name``"
return self.execute_command('SCARD', name)
def sdiff(self, keys, *args):
"Return the difference of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SDIFF', *args)
def sdiffstore(self, dest, keys, *args):
"""
Store the difference of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SDIFFSTORE', dest, *args)
def sinter(self, keys, *args):
"Return the intersection of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SINTER', *args)
def sinterstore(self, dest, keys, *args):
"""
Store the intersection of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SINTERSTORE', dest, *args)
def sismember(self, name, value):
"Return a boolean indicating if ``value`` is a member of set ``name``"
return self.execute_command('SISMEMBER', name, value)
def smembers(self, name):
"Return all members of the set ``name``"
return self.execute_command('SMEMBERS', name)
def smove(self, src, dst, value):
"Move ``value`` from set ``src`` to set ``dst`` atomically"
return self.execute_command('SMOVE', src, dst, value)
def spop(self, name):
"Remove and return a random member of set ``name``"
return self.execute_command('SPOP', name)
def srandmember(self, name, number=None):
"""
If ``number`` is None, returns a random member of set ``name``.
If ``number`` is supplied, returns a list of ``number`` random
memebers of set ``name``. Note this is only available when running
Redis 2.6+.
"""
args = number and [number] or []
return self.execute_command('SRANDMEMBER', name, *args)
def srem(self, name, *values):
"Remove ``values`` from set ``name``"
return self.execute_command('SREM', name, *values)
def sunion(self, keys, *args):
"Return the union of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SUNION', *args)
def sunionstore(self, dest, keys, *args):
"""
Store the union of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SUNIONSTORE', dest, *args)
# SORTED SET COMMANDS
def zadd(self, name, *args, **kwargs):
"""
Set any number of score, element-name pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: score1, name1, score2, name2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 1.1, 'name1', 2.2, 'name2', name3=3.3, name4=4.4)
"""
pieces = []
if args:
if len(args) % 2 != 0:
raise RedisError("ZADD requires an equal number of "
"values and scores")
pieces.extend(args)
for pair in iteritems(kwargs):
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces)
def zcard(self, name):
"Return the number of elements in the sorted set ``name``"
return self.execute_command('ZCARD', name)
def zcount(self, name, min, max):
"""
Returns the number of elements in the sorted set at key ``name`` with
a score between ``min`` and ``max``.
"""
return self.execute_command('ZCOUNT', name, min, max)
def zincrby(self, name, value, amount=1):
"Increment the score of ``value`` in sorted set ``name`` by ``amount``"
return self.execute_command('ZINCRBY', name, amount, value)
def zinterstore(self, dest, keys, aggregate=None):
"""
Intersect multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZINTERSTORE', dest, keys, aggregate)
def zlexcount(self, name, min, max):
"""
Return the number of items in the sorted set ``name`` between the
lexicographical range ``min`` and ``max``.
"""
return self.execute_command('ZLEXCOUNT', name, min, max)
def zrange(self, name, start, end, desc=False, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in ascending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``desc`` a boolean indicating whether to sort the results descendingly
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if desc:
return self.zrevrange(name, start, end, withscores,
score_cast_func)
pieces = ['ZRANGE', name, start, end]
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrangebylex(self, name, min, max, start=None, num=None):
"""
Return the lexicographical range of values from sorted set ``name``
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYLEX', name, min, max]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
return self.execute_command(*pieces)
def zrevrangebylex(self, name, max, min, start=None, num=None):
"""
Return the reversed lexicographical range of values from sorted set
``name`` between ``max`` and ``min``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZREVRANGEBYLEX', name, max, min]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
return self.execute_command(*pieces)
def zrangebyscore(self, name, min, max, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
`score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYSCORE', name, min, max]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrank(self, name, value):
"""
Returns a 0-based value indicating the rank of ``value`` in sorted set
``name``
"""
return self.execute_command('ZRANK', name, value)
def zrem(self, name, *values):
"Remove member ``values`` from sorted set ``name``"
return self.execute_command('ZREM', name, *values)
def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYLEX', name, min, max)
def zremrangebyrank(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with ranks between
``min`` and ``max``. Values are 0-based, ordered from smallest score
to largest. Values can be negative indicating the highest scores.
Returns the number of elements removed
"""
return self.execute_command('ZREMRANGEBYRANK', name, min, max)
def zremrangebyscore(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with scores
between ``min`` and ``max``. Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYSCORE', name, min, max)
def zrevrange(self, name, start, end, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in descending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``withscores`` indicates to return the scores along with the values
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
pieces = ['ZREVRANGE', name, start, end]
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrangebyscore(self, name, max, min, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZREVRANGEBYSCORE', name, max, min]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrank(self, name, value):
"""
Returns a 0-based value indicating the descending rank of
``value`` in sorted set ``name``
"""
return self.execute_command('ZREVRANK', name, value)
def zscore(self, name, value):
"Return the score of element ``value`` in sorted set ``name``"
return self.execute_command('ZSCORE', name, value)
def zunionstore(self, dest, keys, aggregate=None):
"""
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate)
def _zaggregate(self, command, dest, keys, aggregate=None):
pieces = [command, dest, len(keys)]
if isinstance(keys, dict):
keys, weights = iterkeys(keys), itervalues(keys)
else:
weights = None
pieces.extend(keys)
if weights:
pieces.append(Token('WEIGHTS'))
pieces.extend(weights)
if aggregate:
pieces.append(Token('AGGREGATE'))
pieces.append(aggregate)
return self.execute_command(*pieces)
# HYPERLOGLOG COMMANDS
def pfadd(self, name, *values):
"Adds the specified elements to the specified HyperLogLog."
return self.execute_command('PFADD', name, *values)
def pfcount(self, *sources):
"""
Return the approximated cardinality of
the set observed by the HyperLogLog at key(s).
"""
return self.execute_command('PFCOUNT', *sources)
def pfmerge(self, dest, *sources):
"Merge N different HyperLogLogs into a single one."
return self.execute_command('PFMERGE', dest, *sources)
# HASH COMMANDS
def hdel(self, name, *keys):
"Delete ``keys`` from hash ``name``"
return self.execute_command('HDEL', name, *keys)
def hexists(self, name, key):
"Returns a boolean indicating if ``key`` exists within hash ``name``"
return self.execute_command('HEXISTS', name, key)
def hget(self, name, key):
"Return the value of ``key`` within the hash ``name``"
return self.execute_command('HGET', name, key)
def hgetall(self, name):
"Return a Python dict of the hash's name/value pairs"
return self.execute_command('HGETALL', name)
def hincrby(self, name, key, amount=1):
"Increment the value of ``key`` in hash ``name`` by ``amount``"
return self.execute_command('HINCRBY', name, key, amount)
def hincrbyfloat(self, name, key, amount=1.0):
"""
Increment the value of ``key`` in hash ``name`` by floating ``amount``
"""
return self.execute_command('HINCRBYFLOAT', name, key, amount)
def hkeys(self, name):
"Return the list of keys within hash ``name``"
return self.execute_command('HKEYS', name)
def hlen(self, name):
"Return the number of elements in hash ``name``"
return self.execute_command('HLEN', name)
def hset(self, name, key, value):
"""
Set ``key`` to ``value`` within hash ``name``
Returns 1 if HSET created a new field, otherwise 0
"""
return self.execute_command('HSET', name, key, value)
def hsetnx(self, name, key, value):
"""
Set ``key`` to ``value`` within hash ``name`` if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
"""
return self.execute_command('HSETNX', name, key, value)
def hmset(self, name, mapping):
"""
Set key to value within hash ``name`` for each corresponding
key and value from the ``mapping`` dict.
"""
if not mapping:
raise DataError("'hmset' with 'mapping' of length 0")
items = []
for pair in iteritems(mapping):
items.extend(pair)
return self.execute_command('HMSET', name, *items)
def hmget(self, name, keys, *args):
"Returns a list of values ordered identically to ``keys``"
args = list_or_args(keys, args)
return self.execute_command('HMGET', name, *args)
def hvals(self, name):
"Return the list of values within hash ``name``"
return self.execute_command('HVALS', name)
def publish(self, channel, message):
"""
Publish ``message`` on ``channel``.
Returns the number of subscribers the message was delivered to.
"""
return self.execute_command('PUBLISH', channel, message)
def eval(self, script, numkeys, *keys_and_args):
"""
Execute the Lua ``script``, specifying the ``numkeys`` the script
will touch and the key names and argument values in ``keys_and_args``.
Returns the result of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVAL', script, numkeys, *keys_and_args)
def evalsha(self, sha, numkeys, *keys_and_args):
"""
Use the ``sha`` to execute a Lua script already registered via EVAL
or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the
key names and argument values in ``keys_and_args``. Returns the result
of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args)
def script_exists(self, *args):
"""
Check if a script exists in the script cache by specifying the SHAs of
each script as ``args``. Returns a list of boolean values indicating if
if each already script exists in the cache.
"""
return self.execute_command('SCRIPT EXISTS', *args)
def script_flush(self):
"Flush all scripts from the script cache"
return self.execute_command('SCRIPT FLUSH')
def script_kill(self):
"Kill the currently executing Lua script"
return self.execute_command('SCRIPT KILL')
def script_load(self, script):
"Load a Lua ``script`` into the script cache. Returns the SHA."
return self.execute_command('SCRIPT LOAD', script)
def register_script(self, script):
"""
Register a Lua ``script`` specifying the ``keys`` it will touch.
Returns a Script object that is callable and hides the complexity of
deal with scripts, keys, and shas. This is the preferred way to work
with Lua scripts.
"""
return Script(self, script)
class Redis(StrictRedis):
"""
Provides backwards compatibility with older versions of redis-py that
changed arguments to some commands to be more Pythonic, sane, or by
accident.
"""
# Overridden callbacks
RESPONSE_CALLBACKS = dict_merge(
StrictRedis.RESPONSE_CALLBACKS,
{
'TTL': lambda r: r >= 0 and r or None,
'PTTL': lambda r: r >= 0 and r or None,
}
)
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return Pipeline(
self.connection_pool,
self.response_callbacks,
transaction,
shard_hint)
def setex(self, name, value, time):
"""
Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return self.execute_command('SETEX', name, time, value)
def lrem(self, name, value, num=0):
"""
Remove the first ``num`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The ``num`` argument influences the operation in the following ways:
num > 0: Remove elements equal to value moving from head to tail.
num < 0: Remove elements equal to value moving from tail to head.
num = 0: Remove all elements equal to value.
"""
return self.execute_command('LREM', name, num, value)
def zadd(self, name, *args, **kwargs):
"""
NOTE: The order of arguments differs from that of the official ZADD
command. For backwards compatability, this method accepts arguments
in the form of name1, score1, name2, score2, while the official Redis
documents expects score1, name1, score2, name2.
If you're looking to use the standard syntax, consider using the
StrictRedis class. See the API Reference section of the docs for more
information.
Set any number of element-name, score pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: name1, score1, name2, score2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4)
"""
pieces = []
if args:
if len(args) % 2 != 0:
raise RedisError("ZADD requires an equal number of "
"values and scores")
pieces.extend(reversed(args))
for pair in iteritems(kwargs):
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces)
class PubSub(object):
"""
PubSub provides publish, subscribe and listen support to Redis channels.
After subscribing to one or more channels, the listen() method will block
until a message arrives on one of the subscribed channels. That message
will be returned and it's safe to start listening again.
"""
PUBLISH_MESSAGE_TYPES = ('message', 'pmessage')
UNSUBSCRIBE_MESSAGE_TYPES = ('unsubscribe', 'punsubscribe')
def __init__(self, connection_pool, shard_hint=None,
ignore_subscribe_messages=False):
self.connection_pool = connection_pool
self.shard_hint = shard_hint
self.ignore_subscribe_messages = ignore_subscribe_messages
self.connection = None
# we need to know the encoding options for this connection in order
# to lookup channel and pattern names for callback handlers.
conn = connection_pool.get_connection('pubsub', shard_hint)
try:
self.encoding = conn.encoding
self.encoding_errors = conn.encoding_errors
self.decode_responses = conn.decode_responses
finally:
connection_pool.release(conn)
self.reset()
def __del__(self):
try:
# if this object went out of scope prior to shutting down
# subscriptions, close the connection manually before
# returning it to the connection pool
self.reset()
except Exception:
pass
def reset(self):
if self.connection:
self.connection.disconnect()
self.connection.clear_connect_callbacks()
self.connection_pool.release(self.connection)
self.connection = None
self.channels = {}
self.patterns = {}
def close(self):
self.reset()
def on_connect(self, connection):
"Re-subscribe to any channels and patterns previously subscribed to"
# NOTE: for python3, we can't pass bytestrings as keyword arguments
# so we need to decode channel/pattern names back to unicode strings
# before passing them to [p]subscribe.
if self.channels:
channels = {}
for k, v in iteritems(self.channels):
if not self.decode_responses:
k = k.decode(self.encoding, self.encoding_errors)
channels[k] = v
self.subscribe(**channels)
if self.patterns:
patterns = {}
for k, v in iteritems(self.patterns):
if not self.decode_responses:
k = k.decode(self.encoding, self.encoding_errors)
patterns[k] = v
self.psubscribe(**patterns)
def encode(self, value):
"""
Encode the value so that it's identical to what we'll
read off the connection
"""
if self.decode_responses and isinstance(value, bytes):
value = value.decode(self.encoding, self.encoding_errors)
elif not self.decode_responses and isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
@property
def subscribed(self):
"Indicates if there are subscriptions to any channels or patterns"
return bool(self.channels or self.patterns)
def execute_command(self, *args, **kwargs):
"Execute a publish/subscribe command"
# NOTE: don't parse the response in this function -- it could pull a
# legitimate message off the stack if the connection is already
# subscribed to one or more channels
if self.connection is None:
self.connection = self.connection_pool.get_connection(
'pubsub',
self.shard_hint
)
# register a callback that re-subscribes to any channels we
# were listening to when we were disconnected
self.connection.register_connect_callback(self.on_connect)
connection = self.connection
self._execute(connection, connection.send_command, *args)
def _execute(self, connection, command, *args):
try:
return command(*args)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if not connection.retry_on_timeout and isinstance(e, TimeoutError):
raise
# Connect manually here. If the Redis server is down, this will
# fail and raise a ConnectionError as desired.
connection.connect()
# the ``on_connect`` callback should haven been called by the
# connection to resubscribe us to any channels and patterns we were
# previously listening to
return command(*args)
def parse_response(self, block=True, timeout=0):
"Parse the response from a publish/subscribe command"
connection = self.connection
if not block and not connection.can_read(timeout=timeout):
return None
return self._execute(connection, connection.read_response)
def psubscribe(self, *args, **kwargs):
"""
Subscribe to channel patterns. Patterns supplied as keyword arguments
expect a pattern name as the key and a callable as the value. A
pattern's callable will be invoked automatically when a message is
received on that pattern rather than producing a message via
``listen()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_patterns = {}
new_patterns.update(dict.fromkeys(imap(self.encode, args)))
for pattern, handler in iteritems(kwargs):
new_patterns[self.encode(pattern)] = handler
ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns))
# update the patterns dict AFTER we send the command. we don't want to
# subscribe twice to these patterns, once for the command and again
# for the reconnection.
self.patterns.update(new_patterns)
return ret_val
def punsubscribe(self, *args):
"""
Unsubscribe from the supplied patterns. If empy, unsubscribe from
all patterns.
"""
if args:
args = list_or_args(args[0], args[1:])
return self.execute_command('PUNSUBSCRIBE', *args)
def subscribe(self, *args, **kwargs):
"""
Subscribe to channels. Channels supplied as keyword arguments expect
a channel name as the key and a callable as the value. A channel's
callable will be invoked automatically when a message is received on
that channel rather than producing a message via ``listen()`` or
``get_message()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_channels = {}
new_channels.update(dict.fromkeys(imap(self.encode, args)))
for channel, handler in iteritems(kwargs):
new_channels[self.encode(channel)] = handler
ret_val = self.execute_command('SUBSCRIBE', *iterkeys(new_channels))
# update the channels dict AFTER we send the command. we don't want to
# subscribe twice to these channels, once for the command and again
# for the reconnection.
self.channels.update(new_channels)
return ret_val
def unsubscribe(self, *args):
"""
Unsubscribe from the supplied channels. If empty, unsubscribe from
all channels
"""
if args:
args = list_or_args(args[0], args[1:])
return self.execute_command('UNSUBSCRIBE', *args)
def listen(self):
"Listen for messages on channels this client has been subscribed to"
while self.subscribed:
response = self.handle_message(self.parse_response(block=True))
if response is not None:
yield response
def get_message(self, ignore_subscribe_messages=False, timeout=0):
"""
Get the next message if one is available, otherwise None.
If timeout is specified, the system will wait for `timeout` seconds
before returning. Timeout should be specified as a floating point
number.
"""
response = self.parse_response(block=False, timeout=timeout)
if response:
return self.handle_message(response, ignore_subscribe_messages)
return None
def handle_message(self, response, ignore_subscribe_messages=False):
"""
Parses a pub/sub message. If the channel or pattern was subscribed to
with a message handler, the handler is invoked instead of a parsed
message being returned.
"""
message_type = nativestr(response[0])
if message_type == 'pmessage':
message = {
'type': message_type,
'pattern': response[1],
'channel': response[2],
'data': response[3]
}
else:
message = {
'type': message_type,
'pattern': None,
'channel': response[1],
'data': response[2]
}
# if this is an unsubscribe message, remove it from memory
if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
subscribed_dict = None
if message_type == 'punsubscribe':
subscribed_dict = self.patterns
else:
subscribed_dict = self.channels
try:
del subscribed_dict[message['channel']]
except KeyError:
pass
if message_type in self.PUBLISH_MESSAGE_TYPES:
# if there's a message handler, invoke it
handler = None
if message_type == 'pmessage':
handler = self.patterns.get(message['pattern'], None)
else:
handler = self.channels.get(message['channel'], None)
if handler:
handler(message)
return None
else:
# this is a subscribe/unsubscribe message. ignore if we don't
# want them
if ignore_subscribe_messages or self.ignore_subscribe_messages:
return None
return message
def run_in_thread(self, sleep_time=0):
for channel, handler in iteritems(self.channels):
if handler is None:
raise PubSubError("Channel: '%s' has no handler registered")
for pattern, handler in iteritems(self.patterns):
if handler is None:
raise PubSubError("Pattern: '%s' has no handler registered")
thread = PubSubWorkerThread(self, sleep_time)
thread.start()
return thread
class PubSubWorkerThread(threading.Thread):
def __init__(self, pubsub, sleep_time):
super(PubSubWorkerThread, self).__init__()
self.pubsub = pubsub
self.sleep_time = sleep_time
self._running = False
def run(self):
if self._running:
return
self._running = True
pubsub = self.pubsub
sleep_time = self.sleep_time
while pubsub.subscribed:
pubsub.get_message(ignore_subscribe_messages=True,
timeout=sleep_time)
pubsub.close()
self._running = False
def stop(self):
# stopping simply unsubscribes from all channels and patterns.
# the unsubscribe responses that are generated will short circuit
# the loop in run(), calling pubsub.close() to clean up the connection
self.pubsub.unsubscribe()
self.pubsub.punsubscribe()
class BasePipeline(object):
"""
Pipelines provide a way to transmit multiple commands to the Redis server
in one transmission. This is convenient for batch processing, such as
saving all the values in a list to Redis.
All commands executed within a pipeline are wrapped with MULTI and EXEC
calls. This guarantees all commands executed in the pipeline will be
executed atomically.
Any command raising an exception does *not* halt the execution of
subsequent commands in the pipeline. Instead, the exception is caught
and its instance is placed into the response list returned by execute().
Code iterating over the response list should be able to deal with an
instance of an exception as a potential value. In general, these will be
ResponseError exceptions, such as those raised when issuing a command
on a key of a different datatype.
"""
UNWATCH_COMMANDS = set(('DISCARD', 'EXEC', 'UNWATCH'))
def __init__(self, connection_pool, response_callbacks, transaction,
shard_hint):
self.connection_pool = connection_pool
self.connection = None
self.response_callbacks = response_callbacks
self.transaction = transaction
self.shard_hint = shard_hint
self.watching = False
self.reset()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.reset()
def __del__(self):
try:
self.reset()
except Exception:
pass
def __len__(self):
return len(self.command_stack)
def reset(self):
self.command_stack = []
self.scripts = set()
# make sure to reset the connection state in the event that we were
# watching something
if self.watching and self.connection:
try:
# call this manually since our unwatch or
# immediate_execute_command methods can call reset()
self.connection.send_command('UNWATCH')
self.connection.read_response()
except ConnectionError:
# disconnect will also remove any previous WATCHes
self.connection.disconnect()
# clean up the other instance attributes
self.watching = False
self.explicit_transaction = False
# we can safely return the connection to the pool here since we're
# sure we're no longer WATCHing anything
if self.connection:
self.connection_pool.release(self.connection)
self.connection = None
def multi(self):
"""
Start a transactional block of the pipeline after WATCH commands
are issued. End the transactional block with `execute`.
"""
if self.explicit_transaction:
raise RedisError('Cannot issue nested calls to MULTI')
if self.command_stack:
raise RedisError('Commands without an initial WATCH have already '
'been issued')
self.explicit_transaction = True
def execute_command(self, *args, **kwargs):
if (self.watching or args[0] == 'WATCH') and \
not self.explicit_transaction:
return self.immediate_execute_command(*args, **kwargs)
return self.pipeline_execute_command(*args, **kwargs)
def immediate_execute_command(self, *args, **options):
"""
Execute a command immediately, but don't auto-retry on a
ConnectionError if we're already WATCHing a variable. Used when
issuing WATCH or subsequent commands retrieving their values but before
MULTI is called.
"""
command_name = args[0]
conn = self.connection
# if this is the first call, we need a connection
if not conn:
conn = self.connection_pool.get_connection(command_name,
self.shard_hint)
self.connection = conn
try:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
if not conn.retry_on_timeout and isinstance(e, TimeoutError):
raise
# if we're not already watching, we can safely retry the command
try:
if not self.watching:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except ConnectionError:
# the retry failed so cleanup.
conn.disconnect()
self.reset()
raise
def pipeline_execute_command(self, *args, **options):
"""
Stage a command to be executed when execute() is next called
Returns the current Pipeline object back so commands can be
chained together, such as:
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
At some other point, you can then run: pipe.execute(),
which will execute all commands queued in the pipe.
"""
self.command_stack.append((args, options))
return self
def _execute_transaction(self, connection, commands, raise_on_error):
cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})])
all_cmds = connection.pack_commands([args for args, _ in cmds])
connection.send_packed_command(all_cmds)
errors = []
# parse off the response for MULTI
# NOTE: we need to handle ResponseErrors here and continue
# so that we read all the additional command messages from
# the socket
try:
self.parse_response(connection, '_')
except ResponseError:
errors.append((0, sys.exc_info()[1]))
# and all the other commands
for i, command in enumerate(commands):
try:
self.parse_response(connection, '_')
except ResponseError:
ex = sys.exc_info()[1]
self.annotate_exception(ex, i + 1, command[0])
errors.append((i, ex))
# parse the EXEC.
try:
response = self.parse_response(connection, '_')
except ExecAbortError:
if self.explicit_transaction:
self.immediate_execute_command('DISCARD')
if errors:
raise errors[0][1]
raise sys.exc_info()[1]
if response is None:
raise WatchError("Watched variable changed.")
# put any parse errors into the response
for i, e in errors:
response.insert(i, e)
if len(response) != len(commands):
self.connection.disconnect()
raise ResponseError("Wrong number of response items from "
"pipeline execution")
# find any errors in the response and raise if necessary
if raise_on_error:
self.raise_first_error(commands, response)
# We have to run response callbacks manually
data = []
for r, cmd in izip(response, commands):
if not isinstance(r, Exception):
args, options = cmd
command_name = args[0]
if command_name in self.response_callbacks:
r = self.response_callbacks[command_name](r, **options)
data.append(r)
return data
def _execute_pipeline(self, connection, commands, raise_on_error):
# build up all commands into a single request to increase network perf
all_cmds = connection.pack_commands([args for args, _ in commands])
connection.send_packed_command(all_cmds)
response = []
for args, options in commands:
try:
response.append(
self.parse_response(connection, args[0], **options))
except ResponseError:
response.append(sys.exc_info()[1])
if raise_on_error:
self.raise_first_error(commands, response)
return response
def raise_first_error(self, commands, response):
for i, r in enumerate(response):
if isinstance(r, ResponseError):
self.annotate_exception(r, i + 1, commands[i][0])
raise r
def annotate_exception(self, exception, number, command):
cmd = safe_unicode(' ').join(imap(safe_unicode, command))
msg = unicode('Command # %d (%s) of pipeline caused error: %s') % (
number, cmd, safe_unicode(exception.args[0]))
exception.args = (msg,) + exception.args[1:]
def parse_response(self, connection, command_name, **options):
result = StrictRedis.parse_response(
self, connection, command_name, **options)
if command_name in self.UNWATCH_COMMANDS:
self.watching = False
elif command_name == 'WATCH':
self.watching = True
return result
def load_scripts(self):
# make sure all scripts that are about to be run on this pipeline exist
scripts = list(self.scripts)
immediate = self.immediate_execute_command
shas = [s.sha for s in scripts]
# we can't use the normal script_* methods because they would just
# get buffered in the pipeline.
exists = immediate('SCRIPT', 'EXISTS', *shas, **{'parse': 'EXISTS'})
if not all(exists):
for s, exist in izip(scripts, exists):
if not exist:
s.sha = immediate('SCRIPT', 'LOAD', s.script,
**{'parse': 'LOAD'})
def execute(self, raise_on_error=True):
"Execute all the commands in the current pipeline"
stack = self.command_stack
if not stack:
return []
if self.scripts:
self.load_scripts()
if self.transaction or self.explicit_transaction:
execute = self._execute_transaction
else:
execute = self._execute_pipeline
conn = self.connection
if not conn:
conn = self.connection_pool.get_connection('MULTI',
self.shard_hint)
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self.connection = conn
try:
return execute(conn, stack, raise_on_error)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
if not conn.retry_on_timeout and isinstance(e, TimeoutError):
raise
# if we were watching a variable, the watch is no longer valid
# since this connection has died. raise a WatchError, which
# indicates the user should retry his transaction. If this is more
# than a temporary failure, the WATCH that the user next issues
# will fail, propegating the real ConnectionError
if self.watching:
raise WatchError("A ConnectionError occured on while watching "
"one or more keys")
# otherwise, it's safe to retry since the transaction isn't
# predicated on any state
return execute(conn, stack, raise_on_error)
finally:
self.reset()
def watch(self, *names):
"Watches the values at keys ``names``"
if self.explicit_transaction:
raise RedisError('Cannot issue a WATCH after a MULTI')
return self.execute_command('WATCH', *names)
def unwatch(self):
"Unwatches all previously specified keys"
return self.watching and self.execute_command('UNWATCH') or True
def script_load_for_pipeline(self, script):
"Make sure scripts are loaded prior to pipeline execution"
# we need the sha now so that Script.__call__ can use it to run
# evalsha.
if not script.sha:
script.sha = self.immediate_execute_command('SCRIPT', 'LOAD',
script.script,
**{'parse': 'LOAD'})
self.scripts.add(script)
class StrictPipeline(BasePipeline, StrictRedis):
"Pipeline for the StrictRedis class"
pass
class Pipeline(BasePipeline, Redis):
"Pipeline for the Redis class"
pass
class Script(object):
"An executable Lua script object returned by ``register_script``"
def __init__(self, registered_client, script):
self.registered_client = registered_client
self.script = script
self.sha = ''
def __call__(self, keys=[], args=[], client=None):
"Execute the script, passing any required ``args``"
if client is None:
client = self.registered_client
args = tuple(keys) + tuple(args)
# make sure the Redis server knows about the script
if isinstance(client, BasePipeline):
# make sure this script is good to go on pipeline
client.script_load_for_pipeline(self)
try:
return client.evalsha(self.sha, len(keys), *args)
except NoScriptError:
# Maybe the client is pointed to a differnet server than the client
# that created this instance?
self.sha = client.script_load(self.script)
return client.evalsha(self.sha, len(keys), *args)
| fengsp/redis-py | redis/client.py | Python | mit | 101,789 |
#!/usr/bin/env python
"""
No app built with Seshat does much without controllers. This module provides a
base controller class which can be used right away in its current state, or can
be inherited from to create more advanced or custom controllers.
Basic use is like so::
from seshat.controller import BaseController
class index(BaseController):
def GET(self):
return "<h1>WAT</h1>"
If you see something along the lines of 'Content Generating Request Method' it
will usually mean ``GET()``, ``POST()``, or any other HTTP method verb which
might be given to the controller.
"""
"""
For more information and licensing, see: https://github.com/JoshAshby/seshat
http://xkcd.com/353/
Josh Ashby
2014
http://joshashby.com
joshuaashby@joshashby.com
"""
import traceback
import actions
import logging
from head import Head
logger = logging.getLogger("seshat.controller")
class BaseController(object):
"""
The parent of all controllers which Seshat will serve.
To use this to make a controller, override or add the request method (in
all caps) which will be called for this controller. Eg, with the
controller::
from seshat.controller import BaseController
class index(BaseController):
def GET(self):
return "<h1>WAT</h1>"
then all GET method requests to this controller will return with the text
`<h1>WAT</h1>` however all POST, PUT, DELETE calls will return as a blank
page, since those methods are not overridden.
.. note::
Support for `Not Supported` status codes may be added later, ironically.
"""
error = None
def __init__(self, request):
self.head = Head()
self.request = request
self.post_init_hook()
def post_init_hook(self):
"""
Called at the end of `__init__` this allows you to customize the
creation process of your controller, without having to override
`__init__ itself`.
This should accept nothing and return nothing.
"""
pass
def _build(self):
content = u""
try:
c = self.pre_content_hook()
if c is not None:
if isinstance(c, actions.BaseAction):
return u"", c.head
elif isinstance(c, Head):
return u"", c
content = getattr(self, self.request.method)()
if isinstance(content, actions.BaseAction):
self.head = content.head
self.post_content_hook(content)
except Exception as e:
tb = str(traceback.format_exc())
logger.exception(e)
logger.error(tb)
self.head = Head("500 INTERNAL SERVER ERROR", errors=[e, tb])
return content, self.head
def pre_content_hook(self):
"""
Called before the generating request method is called and should return either
`None` or :py:class:`.Head` or :py:class:`.BaseAction` object.
If there is a returned value other than None, this will skip calling
the content generating request method and simply return directly to
dispatch.
A good example of the use for this hook would be for authentication.
You could for example, check the id set through the cookie and compare
it to a database entry. If the cookie is not currently in use (ie, user
not logged in, or similar) then you could do::
return Head("401")
or perhaps::
return actions.Unauthorized()
:rtype: :py:class:`.Head` or :py:class:`.BaseAction` or `None`
"""
return None
def post_content_hook(self, content):
"""
Gets called after the content generating request method has been
called. This can be to further modify the content which is returned, or
perform some other action after each request.
:param content: the content from the content generating request method
that was called.
:type content: `str`
:return: The original or modified content
:rtype: `str`
"""
return content
def HEAD(self):
"""
Will be called if the request method is HEAD
By default this will call `GET()` but return nothing, so that only the
Headers are returned to the client.
"""
self.GET()
def GET(self):
"""
Will be called if the request method is GET
"""
pass
| JoshAshby/seshat | seshat/controller.py | Python | gpl-3.0 | 4,488 |
from __future__ import division
from sympy import (Abs, Catalan, cos, Derivative, E, EulerGamma, exp,
factorial, factorial2, Function, GoldenRatio, I, Integer, Integral,
Interval, Lambda, Limit, Matrix, nan, O, oo, pi, Rational, Float, Rel,
S, sin, SparseMatrix, sqrt, summation, Sum, Symbol, symbols, Wild,
WildFunction, zeta, zoo, Dummy, Dict, Tuple, FiniteSet, factor,
subfactorial, true, false, Equivalent, Xor, Complement, SymmetricDifference)
from sympy.core import Expr
from sympy.physics.units import second, joule
from sympy.polys import Poly, RootOf, RootSum, groebner, ring, field, ZZ, QQ, lex, grlex
from sympy.geometry import Point, Circle
from sympy.utilities.pytest import raises
from sympy.core.compatibility import range
from sympy.printing import sstr, sstrrepr, StrPrinter
from sympy.core.trace import Tr
x, y, z, w = symbols('x,y,z,w')
d = Dummy('d')
def test_printmethod():
class R(Abs):
def _sympystr(self, printer):
return "foo(%s)" % printer._print(self.args[0])
assert sstr(R(x)) == "foo(x)"
class R(Abs):
def _sympystr(self, printer):
return "foo"
assert sstr(R(x)) == "foo"
def test_Abs():
assert str(Abs(x)) == "Abs(x)"
assert str(Abs(Rational(1, 6))) == "1/6"
assert str(Abs(Rational(-1, 6))) == "1/6"
def test_Add():
assert str(x + y) == "x + y"
assert str(x + 1) == "x + 1"
assert str(x + x**2) == "x**2 + x"
assert str(5 + x + y + x*y + x**2 + y**2) == "x**2 + x*y + x + y**2 + y + 5"
assert str(1 + x + x**2/2 + x**3/3) == "x**3/3 + x**2/2 + x + 1"
assert str(2*x - 7*x**2 + 2 + 3*y) == "-7*x**2 + 2*x + 3*y + 2"
assert str(x - y) == "x - y"
assert str(2 - x) == "-x + 2"
assert str(x - 2) == "x - 2"
assert str(x - y - z - w) == "-w + x - y - z"
assert str(x - z*y**2*z*w) == "-w*y**2*z**2 + x"
assert str(x - 1*y*x*y) == "-x*y**2 + x"
assert str(sin(x).series(x, 0, 15)) == "x - x**3/6 + x**5/120 - x**7/5040 + x**9/362880 - x**11/39916800 + x**13/6227020800 + O(x**15)"
def test_Catalan():
assert str(Catalan) == "Catalan"
def test_ComplexInfinity():
assert str(zoo) == "zoo"
def test_Derivative():
assert str(Derivative(x, y)) == "Derivative(x, y)"
assert str(Derivative(x**2, x, evaluate=False)) == "Derivative(x**2, x)"
assert str(Derivative(
x**2/y, x, y, evaluate=False)) == "Derivative(x**2/y, x, y)"
def test_dict():
assert str({1: 1 + x}) == sstr({1: 1 + x}) == "{1: x + 1}"
assert str({1: x**2, 2: y*x}) in ("{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr({1: x**2, 2: y*x}) == "{1: x**2, 2: x*y}"
def test_Dict():
assert str(Dict({1: 1 + x})) == sstr({1: 1 + x}) == "{1: x + 1}"
assert str(Dict({1: x**2, 2: y*x})) in (
"{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr(Dict({1: x**2, 2: y*x})) == "{1: x**2, 2: x*y}"
def test_Dummy():
assert str(d) == "_d"
assert str(d + x) == "_d + x"
def test_EulerGamma():
assert str(EulerGamma) == "EulerGamma"
def test_Exp():
assert str(E) == "E"
def test_factorial():
n = Symbol('n', integer=True)
assert str(factorial(-2)) == "zoo"
assert str(factorial(0)) == "1"
assert str(factorial(7)) == "5040"
assert str(factorial(n)) == "factorial(n)"
assert str(factorial(2*n)) == "factorial(2*n)"
assert str(factorial(factorial(n))) == 'factorial(factorial(n))'
assert str(factorial(factorial2(n))) == 'factorial(factorial2(n))'
assert str(factorial2(factorial(n))) == 'factorial2(factorial(n))'
assert str(factorial2(factorial2(n))) == 'factorial2(factorial2(n))'
assert str(subfactorial(3)) == "2"
assert str(subfactorial(n)) == "subfactorial(n)"
assert str(subfactorial(2*n)) == "subfactorial(2*n)"
def test_Function():
f = Function('f')
fx = f(x)
w = WildFunction('w')
assert str(f) == "f"
assert str(fx) == "f(x)"
assert str(w) == "w_"
def test_Geometry():
assert sstr(Point(0, 0)) == 'Point2D(0, 0)'
assert sstr(Circle(Point(0, 0), 3)) == 'Circle(Point2D(0, 0), 3)'
# TODO test other Geometry entities
def test_GoldenRatio():
assert str(GoldenRatio) == "GoldenRatio"
def test_ImaginaryUnit():
assert str(I) == "I"
def test_Infinity():
assert str(oo) == "oo"
assert str(oo*I) == "oo*I"
def test_Integer():
assert str(Integer(-1)) == "-1"
assert str(Integer(1)) == "1"
assert str(Integer(-3)) == "-3"
assert str(Integer(0)) == "0"
assert str(Integer(25)) == "25"
def test_Integral():
assert str(Integral(sin(x), y)) == "Integral(sin(x), y)"
assert str(Integral(sin(x), (y, 0, 1))) == "Integral(sin(x), (y, 0, 1))"
def test_Interval():
a = Symbol('a', real=True)
assert str(Interval(0, a)) == "[0, a]"
assert str(Interval(0, a, False, False)) == "[0, a]"
assert str(Interval(0, a, True, False)) == "(0, a]"
assert str(Interval(0, a, False, True)) == "[0, a)"
assert str(Interval(0, a, True, True)) == "(0, a)"
def test_Lambda():
assert str(Lambda(d, d**2)) == "Lambda(_d, _d**2)"
# issue 2908
assert str(Lambda((), 1)) == "Lambda((), 1)"
assert str(Lambda((), x)) == "Lambda((), x)"
def test_Limit():
assert str(Limit(sin(x)/x, x, y)) == "Limit(sin(x)/x, x, y)"
assert str(Limit(1/x, x, 0)) == "Limit(1/x, x, 0)"
assert str(
Limit(sin(x)/x, x, y, dir="-")) == "Limit(sin(x)/x, x, y, dir='-')"
def test_list():
assert str([x]) == sstr([x]) == "[x]"
assert str([x**2, x*y + 1]) == sstr([x**2, x*y + 1]) == "[x**2, x*y + 1]"
assert str([x**2, [y + x]]) == sstr([x**2, [y + x]]) == "[x**2, [x + y]]"
def test_Matrix_str():
M = Matrix([[x**+1, 1], [y, x + y]])
assert str(M) == "Matrix([[x, 1], [y, x + y]])"
assert sstr(M) == "Matrix([\n[x, 1],\n[y, x + y]])"
M = Matrix([[1]])
assert str(M) == sstr(M) == "Matrix([[1]])"
M = Matrix([[1, 2]])
assert str(M) == sstr(M) == "Matrix([[1, 2]])"
M = Matrix()
assert str(M) == sstr(M) == "Matrix(0, 0, [])"
M = Matrix(0, 1, lambda i, j: 0)
assert str(M) == sstr(M) == "Matrix(0, 1, [])"
def test_Mul():
assert str(x/y) == "x/y"
assert str(y/x) == "y/x"
assert str(x/y/z) == "x/(y*z)"
assert str((x + 1)/(y + 2)) == "(x + 1)/(y + 2)"
assert str(2*x/3) == '2*x/3'
assert str(-2*x/3) == '-2*x/3'
assert str(-1.0*x) == '-1.0*x'
assert str(1.0*x) == '1.0*x'
class CustomClass1(Expr):
is_commutative = True
class CustomClass2(Expr):
is_commutative = True
cc1 = CustomClass1()
cc2 = CustomClass2()
assert str(Rational(2)*cc1) == '2*CustomClass1()'
assert str(cc1*Rational(2)) == '2*CustomClass1()'
assert str(cc1*Float("1.5")) == '1.5*CustomClass1()'
assert str(cc2*Rational(2)) == '2*CustomClass2()'
assert str(cc2*Rational(2)*cc1) == '2*CustomClass1()*CustomClass2()'
assert str(cc1*Rational(2)*cc2) == '2*CustomClass1()*CustomClass2()'
def test_NaN():
assert str(nan) == "nan"
def test_NegativeInfinity():
assert str(-oo) == "-oo"
def test_Order():
assert str(O(x)) == "O(x)"
assert str(O(x**2)) == "O(x**2)"
assert str(O(x*y)) == "O(x*y, x, y)"
assert str(O(x, x)) == "O(x)"
assert str(O(x, (x, 0))) == "O(x)"
assert str(O(x, (x, oo))) == "O(x, (x, oo))"
assert str(O(x, x, y)) == "O(x, x, y)"
assert str(O(x, x, y)) == "O(x, x, y)"
assert str(O(x, (x, oo), (y, oo))) == "O(x, (x, oo), (y, oo))"
def test_Permutation_Cycle():
from sympy.combinatorics import Permutation, Cycle
# general principle: economically, canonically show all moved elements
# and the size of the permutation.
for p, s in [
(Cycle(),
'Cycle()'),
(Cycle(2),
'Cycle(2)'),
(Cycle(2, 1),
'Cycle(1, 2)'),
(Cycle(1, 2)(5)(6, 7)(10),
'Cycle(1, 2)(6, 7)(10)'),
(Cycle(3, 4)(1, 2)(3, 4),
'Cycle(1, 2)(4)'),
]:
assert str(p) == s
Permutation.print_cyclic = False
for p, s in [
(Permutation([]),
'Permutation([])'),
(Permutation([], size=1),
'Permutation([0])'),
(Permutation([], size=2),
'Permutation([0, 1])'),
(Permutation([], size=10),
'Permutation([], size=10)'),
(Permutation([1, 0, 2]),
'Permutation([1, 0, 2])'),
(Permutation([1, 0, 2, 3, 4, 5]),
'Permutation([1, 0], size=6)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'Permutation([1, 0], size=10)'),
]:
assert str(p) == s
Permutation.print_cyclic = True
for p, s in [
(Permutation([]),
'Permutation()'),
(Permutation([], size=1),
'Permutation(0)'),
(Permutation([], size=2),
'Permutation(1)'),
(Permutation([], size=10),
'Permutation(9)'),
(Permutation([1, 0, 2]),
'Permutation(2)(0, 1)'),
(Permutation([1, 0, 2, 3, 4, 5]),
'Permutation(5)(0, 1)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'Permutation(9)(0, 1)'),
(Permutation([0, 1, 3, 2, 4, 5], size=10),
'Permutation(9)(2, 3)'),
]:
assert str(p) == s
def test_Pi():
assert str(pi) == "pi"
def test_Poly():
assert str(Poly(0, x)) == "Poly(0, x, domain='ZZ')"
assert str(Poly(1, x)) == "Poly(1, x, domain='ZZ')"
assert str(Poly(x, x)) == "Poly(x, x, domain='ZZ')"
assert str(Poly(2*x + 1, x)) == "Poly(2*x + 1, x, domain='ZZ')"
assert str(Poly(2*x - 1, x)) == "Poly(2*x - 1, x, domain='ZZ')"
assert str(Poly(-1, x)) == "Poly(-1, x, domain='ZZ')"
assert str(Poly(-x, x)) == "Poly(-x, x, domain='ZZ')"
assert str(Poly(-2*x + 1, x)) == "Poly(-2*x + 1, x, domain='ZZ')"
assert str(Poly(-2*x - 1, x)) == "Poly(-2*x - 1, x, domain='ZZ')"
assert str(Poly(x - 1, x)) == "Poly(x - 1, x, domain='ZZ')"
assert str(
Poly(x**2 + 1 + y, x)) == "Poly(x**2 + y + 1, x, domain='ZZ[y]')"
assert str(
Poly(x**2 - 1 + y, x)) == "Poly(x**2 + y - 1, x, domain='ZZ[y]')"
assert str(Poly(x**2 + I*x, x)) == "Poly(x**2 + I*x, x, domain='EX')"
assert str(Poly(x**2 - I*x, x)) == "Poly(x**2 - I*x, x, domain='EX')"
assert str(Poly(-x*y*z + x*y - 1, x, y, z)
) == "Poly(-x*y*z + x*y - 1, x, y, z, domain='ZZ')"
assert str(Poly(-w*x**21*y**7*z + (1 + w)*z**3 - 2*x*z + 1, x, y, z)) == \
"Poly(-w*x**21*y**7*z - 2*x*z + (w + 1)*z**3 + 1, x, y, z, domain='ZZ[w]')"
assert str(Poly(x**2 + 1, x, modulus=2)) == "Poly(x**2 + 1, x, modulus=2)"
assert str(Poly(2*x**2 + 3*x + 4, x, modulus=17)) == "Poly(2*x**2 + 3*x + 4, x, modulus=17)"
def test_PolyRing():
assert str(ring("x", ZZ, lex)[0]) == "Polynomial ring in x over ZZ with lex order"
assert str(ring("x,y", QQ, grlex)[0]) == "Polynomial ring in x, y over QQ with grlex order"
assert str(ring("x,y,z", ZZ["t"], lex)[0]) == "Polynomial ring in x, y, z over ZZ[t] with lex order"
def test_FracField():
assert str(field("x", ZZ, lex)[0]) == "Rational function field in x over ZZ with lex order"
assert str(field("x,y", QQ, grlex)[0]) == "Rational function field in x, y over QQ with grlex order"
assert str(field("x,y,z", ZZ["t"], lex)[0]) == "Rational function field in x, y, z over ZZ[t] with lex order"
def test_PolyElement():
Ruv, u,v = ring("u,v", ZZ)
Rxyz, x,y,z = ring("x,y,z", Ruv)
assert str(x - x) == "0"
assert str(x - 1) == "x - 1"
assert str(x + 1) == "x + 1"
assert str((u**2 + 3*u*v + 1)*x**2*y + u + 1) == "(u**2 + 3*u*v + 1)*x**2*y + u + 1"
assert str((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x) == "(u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x"
assert str((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1) == "(u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1"
assert str((-u**2 + 3*u*v - 1)*x**2*y - (u + 1)*x - 1) == "-(u**2 - 3*u*v + 1)*x**2*y - (u + 1)*x - 1"
assert str(-(v**2 + v + 1)*x + 3*u*v + 1) == "-(v**2 + v + 1)*x + 3*u*v + 1"
assert str(-(v**2 + v + 1)*x - 3*u*v + 1) == "-(v**2 + v + 1)*x - 3*u*v + 1"
def test_FracElement():
Fuv, u,v = field("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Fuv)
assert str(x - x) == "0"
assert str(x - 1) == "x - 1"
assert str(x + 1) == "x + 1"
assert str(x/3) == "x/3"
assert str(x/z) == "x/z"
assert str(x*y/z) == "x*y/z"
assert str(x/(z*t)) == "x/(z*t)"
assert str(x*y/(z*t)) == "x*y/(z*t)"
assert str((x - 1)/y) == "(x - 1)/y"
assert str((x + 1)/y) == "(x + 1)/y"
assert str((-x - 1)/y) == "(-x - 1)/y"
assert str((x + 1)/(y*z)) == "(x + 1)/(y*z)"
assert str(-y/(x + 1)) == "-y/(x + 1)"
assert str(y*z/(x + 1)) == "y*z/(x + 1)"
assert str(((u + 1)*x*y + 1)/((v - 1)*z - 1)) == "((u + 1)*x*y + 1)/((v - 1)*z - 1)"
assert str(((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1)) == "((u + 1)*x*y + 1)/((v - 1)*z - u*v*t - 1)"
def test_Pow():
assert str(x**-1) == "1/x"
assert str(x**-2) == "x**(-2)"
assert str(x**2) == "x**2"
assert str((x + y)**-1) == "1/(x + y)"
assert str((x + y)**-2) == "(x + y)**(-2)"
assert str((x + y)**2) == "(x + y)**2"
assert str((x + y)**(1 + x)) == "(x + y)**(x + 1)"
assert str(x**Rational(1, 3)) == "x**(1/3)"
assert str(1/x**Rational(1, 3)) == "x**(-1/3)"
assert str(sqrt(sqrt(x))) == "x**(1/4)"
# not the same as x**-1
assert str(x**-1.0) == 'x**(-1.0)'
# see issue #2860
assert str(S(2)**-1.0) == '2**(-1.0)'
def test_sqrt():
assert str(sqrt(x)) == "sqrt(x)"
assert str(sqrt(x**2)) == "sqrt(x**2)"
assert str(1/sqrt(x)) == "1/sqrt(x)"
assert str(1/sqrt(x**2)) == "1/sqrt(x**2)"
assert str(y/sqrt(x)) == "y/sqrt(x)"
assert str(x**(1/2)) == "x**0.5"
assert str(1/x**(1/2)) == "x**(-0.5)"
def test_Rational():
n1 = Rational(1, 4)
n2 = Rational(1, 3)
n3 = Rational(2, 4)
n4 = Rational(2, -4)
n5 = Rational(0)
n7 = Rational(3)
n8 = Rational(-3)
assert str(n1*n2) == "1/12"
assert str(n1*n2) == "1/12"
assert str(n3) == "1/2"
assert str(n1*n3) == "1/8"
assert str(n1 + n3) == "3/4"
assert str(n1 + n2) == "7/12"
assert str(n1 + n4) == "-1/4"
assert str(n4*n4) == "1/4"
assert str(n4 + n2) == "-1/6"
assert str(n4 + n5) == "-1/2"
assert str(n4*n5) == "0"
assert str(n3 + n4) == "0"
assert str(n1**n7) == "1/64"
assert str(n2**n7) == "1/27"
assert str(n2**n8) == "27"
assert str(n7**n8) == "1/27"
assert str(Rational("-25")) == "-25"
assert str(Rational("1.25")) == "5/4"
assert str(Rational("-2.6e-2")) == "-13/500"
assert str(S("25/7")) == "25/7"
assert str(S("-123/569")) == "-123/569"
assert str(S("0.1[23]", rational=1)) == "61/495"
assert str(S("5.1[666]", rational=1)) == "31/6"
assert str(S("-5.1[666]", rational=1)) == "-31/6"
assert str(S("0.[9]", rational=1)) == "1"
assert str(S("-0.[9]", rational=1)) == "-1"
assert str(sqrt(Rational(1, 4))) == "1/2"
assert str(sqrt(Rational(1, 36))) == "1/6"
assert str((123**25) ** Rational(1, 25)) == "123"
assert str((123**25 + 1)**Rational(1, 25)) != "123"
assert str((123**25 - 1)**Rational(1, 25)) != "123"
assert str((123**25 - 1)**Rational(1, 25)) != "122"
assert str(sqrt(Rational(81, 36))**3) == "27/8"
assert str(1/sqrt(Rational(81, 36))**3) == "8/27"
assert str(sqrt(-4)) == str(2*I)
assert str(2**Rational(1, 10**10)) == "2**(1/10000000000)"
def test_Float():
# NOTE prec is the whole number of decimal digits
assert str(Float('1.23', prec=1 + 2)) == '1.23'
assert str(Float('1.23456789', prec=1 + 8)) == '1.23456789'
assert str(
Float('1.234567890123456789', prec=1 + 18)) == '1.234567890123456789'
assert str(pi.evalf(1 + 2)) == '3.14'
assert str(pi.evalf(1 + 14)) == '3.14159265358979'
assert str(pi.evalf(1 + 64)) == ('3.141592653589793238462643383279'
'5028841971693993751058209749445923')
assert str(pi.round(-1)) == '0.'
assert str((pi**400 - (pi**400).round(1)).n(2)) == '-0.e+88'
def test_Relational():
assert str(Rel(x, y, "<")) == "x < y"
assert str(Rel(x + y, y, "==")) == "Eq(x + y, y)"
assert str(Rel(x, y, "!=")) == "Ne(x, y)"
def test_RootOf():
assert str(RootOf(x**5 + 2*x - 1, 0)) == "RootOf(x**5 + 2*x - 1, 0)"
def test_RootSum():
f = x**5 + 2*x - 1
assert str(
RootSum(f, Lambda(z, z), auto=False)) == "RootSum(x**5 + 2*x - 1)"
assert str(RootSum(f, Lambda(
z, z**2), auto=False)) == "RootSum(x**5 + 2*x - 1, Lambda(z, z**2))"
def test_GroebnerBasis():
assert str(groebner(
[], x, y)) == "GroebnerBasis([], x, y, domain='ZZ', order='lex')"
F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
assert str(groebner(F, order='grlex')) == \
"GroebnerBasis([x**2 - x - 3*y + 1, y**2 - 2*x + y - 1], x, y, domain='ZZ', order='grlex')"
assert str(groebner(F, order='lex')) == \
"GroebnerBasis([2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7], x, y, domain='ZZ', order='lex')"
def test_set():
assert sstr(set()) == 'set()'
assert sstr(frozenset()) == 'frozenset()'
assert sstr(set([1, 2, 3])) == 'set([1, 2, 3])'
assert sstr(
set([1, x, x**2, x**3, x**4])) == 'set([1, x, x**2, x**3, x**4])'
def test_SparseMatrix():
M = SparseMatrix([[x**+1, 1], [y, x + y]])
assert str(M) == "Matrix([[x, 1], [y, x + y]])"
assert sstr(M) == "Matrix([\n[x, 1],\n[y, x + y]])"
def test_Sum():
assert str(summation(cos(3*z), (z, x, y))) == "Sum(cos(3*z), (z, x, y))"
assert str(Sum(x*y**2, (x, -2, 2), (y, -5, 5))) == \
"Sum(x*y**2, (x, -2, 2), (y, -5, 5))"
def test_Symbol():
assert str(y) == "y"
assert str(x) == "x"
e = x
assert str(e) == "x"
def test_tuple():
assert str((x,)) == sstr((x,)) == "(x,)"
assert str((x + y, 1 + x)) == sstr((x + y, 1 + x)) == "(x + y, x + 1)"
assert str((x + y, (
1 + x, x**2))) == sstr((x + y, (1 + x, x**2))) == "(x + y, (x + 1, x**2))"
def test_Unit():
assert str(second) == "s"
assert str(joule) == "kg*m**2/s**2" # issue 5560
def test_wild_str():
# Check expressions containing Wild not causing infinite recursion
w = Wild('x')
assert str(w + 1) == 'x_ + 1'
assert str(exp(2**w) + 5) == 'exp(2**x_) + 5'
assert str(3*w + 1) == '3*x_ + 1'
assert str(1/w + 1) == '1 + 1/x_'
assert str(w**2 + 1) == 'x_**2 + 1'
assert str(1/(1 - w)) == '1/(-x_ + 1)'
def test_zeta():
assert str(zeta(3)) == "zeta(3)"
def test_issue_3101():
e = x - y
a = str(e)
b = str(e)
assert a == b
def test_issue_3103():
e = -2*sqrt(x) - y/sqrt(x)/2
assert str(e) not in ["(-2)*x**1/2(-1/2)*x**(-1/2)*y",
"-2*x**1/2(-1/2)*x**(-1/2)*y", "-2*x**1/2-1/2*x**-1/2*w"]
assert str(e) == "-2*sqrt(x) - y/(2*sqrt(x))"
def test_issue_4021():
e = Integral(x, x) + 1
assert str(e) == 'Integral(x, x) + 1'
def test_sstrrepr():
assert sstr('abc') == 'abc'
assert sstrrepr('abc') == "'abc'"
e = ['a', 'b', 'c', x]
assert sstr(e) == "[a, b, c, x]"
assert sstrrepr(e) == "['a', 'b', 'c', x]"
def test_infinity():
assert sstr(oo*I) == "oo*I"
def test_full_prec():
assert sstr(S("0.3"), full_prec=True) == "0.300000000000000"
assert sstr(S("0.3"), full_prec="auto") == "0.300000000000000"
assert sstr(S("0.3"), full_prec=False) == "0.3"
assert sstr(S("0.3")*x, full_prec=True) in [
"0.300000000000000*x",
"x*0.300000000000000"
]
assert sstr(S("0.3")*x, full_prec="auto") in [
"0.3*x",
"x*0.3"
]
assert sstr(S("0.3")*x, full_prec=False) in [
"0.3*x",
"x*0.3"
]
def test_noncommutative():
A, B, C = symbols('A,B,C', commutative=False)
assert sstr(A*B*C**-1) == "A*B*C**(-1)"
assert sstr(C**-1*A*B) == "C**(-1)*A*B"
assert sstr(A*C**-1*B) == "A*C**(-1)*B"
assert sstr(sqrt(A)) == "sqrt(A)"
assert sstr(1/sqrt(A)) == "A**(-1/2)"
def test_empty_printer():
str_printer = StrPrinter()
assert str_printer.emptyPrinter("foo") == "foo"
assert str_printer.emptyPrinter(x*y) == "x*y"
assert str_printer.emptyPrinter(32) == "32"
def test_settings():
raises(TypeError, lambda: sstr(S(4), method="garbage"))
def test_RandomDomain():
from sympy.stats import Normal, Die, Exponential, pspace, where
X = Normal('x1', 0, 1)
assert str(where(X > 0)) == "Domain: And(0 < x1, x1 < oo)"
D = Die('d1', 6)
assert str(where(D > 4)) == "Domain: Or(Eq(d1, 5), Eq(d1, 6))"
A = Exponential('a', 1)
B = Exponential('b', 1)
assert str(pspace(Tuple(A, B)).domain) == "Domain: And(0 <= a, 0 <= b, a < oo, b < oo)"
def test_FiniteSet():
assert str(FiniteSet(*range(1, 51))) == '{1, 2, 3, ..., 48, 49, 50}'
assert str(FiniteSet(*range(1, 6))) == '{1, 2, 3, 4, 5}'
def test_PrettyPoly():
from sympy.polys.domains import QQ
F = QQ.frac_field(x, y)
R = QQ[x, y]
assert sstr(F.convert(x/(x + y))) == sstr(x/(x + y))
assert sstr(R.convert(x + y)) == sstr(x + y)
def test_categories():
from sympy.categories import (Object, NamedMorphism,
IdentityMorphism, Category)
A = Object("A")
B = Object("B")
f = NamedMorphism(A, B, "f")
id_A = IdentityMorphism(A)
K = Category("K")
assert str(A) == 'Object("A")'
assert str(f) == 'NamedMorphism(Object("A"), Object("B"), "f")'
assert str(id_A) == 'IdentityMorphism(Object("A"))'
assert str(K) == 'Category("K")'
def test_Tr():
A, B = symbols('A B', commutative=False)
t = Tr(A*B)
assert str(t) == 'Tr(A*B)'
def test_issue_6387():
assert str(factor(-3.0*z + 3)) == '-3.0*(1.0*z - 1.0)'
def test_MatMul_MatAdd():
from sympy import MatrixSymbol
assert str(2*(MatrixSymbol("X", 2, 2) + MatrixSymbol("Y", 2, 2))) == \
"2*(X + Y)"
def test_MatrixSlice():
from sympy.matrices.expressions import MatrixSymbol
assert str(MatrixSymbol('X', 10, 10)[:5, 1:9:2]) == 'X[:5, 1:9:2]'
assert str(MatrixSymbol('X', 10, 10)[5, :5:2]) == 'X[5, :5:2]'
def test_true_false():
assert str(true) == repr(true) == sstr(true) == "True"
assert str(false) == repr(false) == sstr(false) == "False"
def test_Equivalent():
assert str(Equivalent(y, x)) == "Equivalent(x, y)"
def test_Xor():
assert str(Xor(y, x, evaluate=False)) == "Xor(x, y)"
def test_Complement():
assert str(Complement(S.Reals, S.Naturals)) == '(-oo, oo) \ Naturals()'
def test_SymmetricDifference():
assert str(SymmetricDifference(Interval(2,3), Interval(3,4),evaluate=False)) == \
'SymmetricDifference([2, 3], [3, 4])'
| grevutiu-gabriel/sympy | sympy/printing/tests/test_str.py | Python | bsd-3-clause | 22,751 |
import pprint
from yum_metadata_diff.metadata import MetadataItem
from yum_metadata_diff.diff_objects import PackageDiff
class Package(MetadataItem):
DIFF_CLASS = PackageDiff
DIFF_ATTR = ('checksum')
NONE_AND_EMPTY_ARE_SAME = ("location_base",
"vendor",
"description",
"sourcerpm")
def __init__(self):
MetadataItem.__init__(self)
self.checksum = '' # pkgid
self.epoch = ''
self.name = ''
self.version = ''
self.release = ''
self.arch = ''
def __repr__(self):
str_repr = self.checksum
if self.nevra():
str_repr += "-%s" % self.nevra()
return str_repr
def nevra(self):
nevra = self.name
if self.version:
nevra += "-"
if self.epoch and self.epoch != "0":
nevra += self.epoch + ":"
nevra += self.version
nevra += "-" + self.release
if self.arch:
nevra += "." + self.arch
return nevra
def pprint(self):
msg = "%s (%s)\n" % (self.nevra(), self.checksum)
print msg
| Tojaj/yum-metadata-diff | yum_metadata_diff/package.py | Python | lgpl-2.1 | 1,204 |
import math
import time
R_0 = 100
a = 3.9083 * (10**(-3))
b = -5.775 * (10**(-7))
def space():
print()
print()
def main():
space()
R = float(input("R = "))
space()
t = ((((-R_0)*a) + math.sqrt((R_0 ** 2) * (a ** 2) - 4 * R_0 * b * (R_0 - R))) / (2 * R_0 * b))
space()
if(t == -0.0):
print("t = 0 C")
else:
print("t =",t,"C")
x = input("")
main()
main()
R = result
t = ((((-R_0)*a) + math.sqrt((R_0 ** 2) * (a ** 2) - 4 * R_0 * b * (R_0 - R))) / (2 * R_0 * b))
print(t)
x = input("")
| Tekbear99/MetInstitutt | Server/calc.py | Python | mit | 552 |
def prepare_dataset(dataset_path):
'''
Read a comma separated text file where
- the first field is a ID number
- the second field is a class label 'B' or 'M'
- the remaining fields are real-valued
Return two numpy arrays X and y where
- X is two dimensional. X[i,:] is the ith example
- y is one dimensional. y[i] is the class label of X[i,:]
y[i] should be set to 1 for 'M', and 0 for 'B'
@param dataset_path: full path of the dataset text file
@return
X,y
'''
## "INSERT YOUR CODE HERE"
import numpy as np
import pandas as pd
path = dataset_path
df = pd.read_csv(path) # read a comma separated text file
df = df.fillna(df) # filter out rows which has blank values
dataset1 = df.values[:, 9:10]
dataset2 = df.values[:, 0:9]
y = np.array(dataset1) # set numpy array for y
X = np.array(dataset2) # set numpy array for X
return X, y
#print(X)
# raise NotImplementedError()
def build_tain_data(X_data, y_data):
from sklearn.model_selection import train_test_split
X = X_data
y = y_data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=4)
return X_train, X_test, y_train, y_test
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def build_NB_classifier(X_training, y_training):
'''
Build a Naive Bayes classifier based on the training set X_training, y_training.
@param
X_training: X_training[i,:] is the ith example
y_training: y_training[i] is the class label of X_training[i,:]
@return
clf : the classifier built in this function
'''
## "INSERT YOUR CODE HERE"
from sklearn.naive_bayes import GaussianNB
X = X_training
y = y_training
clf = GaussianNB()
clf.fit(X, y)
return clf
# raise NotImplementedError()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def build_DT_classifier(X_training, y_training):
'''
Build a Decision Tree classifier based on the training set X_training, y_training.
@param
X_training: X_training[i,:] is the ith example
y_training: y_training[i] is the class label of X_training[i,:]
@return
clf : the classifier built in this function
'''
## "INSERT YOUR CODE HERE"
from sklearn import tree
X = X_training
y = y_training
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
return clf
# raise NotImplementedError()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def build_NN_classifier(X_training, y_training):
'''
Build a Nearrest Neighbours classifier based on the training set X_training, y_training.
@param
X_training: X_training[i,:] is the ith example
y_training: y_training[i] is the class label of X_training[i,:]
@return
clf : the classifier built in this function
'''
## "INSERT YOUR CODE HERE"
from sklearn.neighbors import NearestNeighbors
X = X_training
y = y_training
clf = NearestNeighbors()
clf.fit(X, y)
return clf
# raise NotImplementedError()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def build_KNC_classifier(X_training,y_training):
'''
Build a K-Nearest Classifier Machine classifier based on the training set X_training, y_training.
@param
X_training: X_training[i,:] is the ith example
y_training: y_training[i] is the class label of X_training[i,:]
@return
clf : the classifier built in this function
'''
## "INSERT YOUR CODE HERE"
from sklearn.neighbors import KNeighborsClassifier
X = X_training
y = y_training
clf = KNeighborsClassifier()
clf.fit(X, y)
return clf
def build_KNR_classifier(X_training,y_training):
'''
Build a K-Nearest Regressor Machine classifier based on the training set X_training, y_training.
@param
X_training: X_training[i,:] is the ith example
y_training: y_training[i] is the class label of X_training[i,:]
@return
clf : the classifier built in this function
'''
## "INSERT YOUR CODE HERE"
from sklearn.neighbors import KNeighborsRegressor
X = X_training
y = y_training
clf = KNeighborsRegressor()
clf.fit(X, y)
return clf
def build_SVM_classifier(X_training, y_training):
'''
Build a Support Vector Machine classifier based on the training set X_training, y_training.
@param
X_training: X_training[i,:] is the ith example
y_training: y_training[i] is the class label of X_training[i,:]
@return
clf : the classifier built in this function
'''
## "INSERT YOUR CODE HERE"
from sklearn import svm
X = X_training
y = y_training
clf = svm.SVC()
clf.fit(X, y)
return clf
# raise NotImplementedError()
def accuracy_test(clf, X_testing,Y_testing):
from sklearn import metrics
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == "__main__":
pass
# call your functions here
X_data_set, y_data_set = prepare_dataset("newdata.csv")
X_training, X_testing, y_training, y_testing = build_tain_data(X_data_set, y_data_set)
NBClf=build_NB_classifier(X_training, y_training)
DTClf=build_DT_classifier(X_training, y_training)
NNClf=build_NN_classifier(X_training, y_training)
SVMClf=build_SVM_classifier(X_training, y_training)
KNCClf=build_KNC_classifier(X_training,y_training)
KNRClf=build_KNR_classifier(X_training,y_training)
| Dishcreate/MachineLearning | performance.py | Python | apache-2.0 | 5,608 |
r"""
Logging objects (:mod: `qiita_db.logger`)
====================================
..currentmodule:: qiita_db.logger
This module provides objects for recording log information
Classes
-------
..autosummary::
:toctree: generated/
LogEntry
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from json import loads, dumps
from qiita_db.util import convert_to_id
from .sql_connection import TRN
from .base import QiitaObject
class LogEntry(QiitaObject):
"""
Attributes
----------
severity
time
info
msg
Methods
-------
clear_info
add_info
"""
_table = 'logging'
@classmethod
def newest_records(cls, numrecords=100):
"""Return a list of the newest records in the logging table
Parameters
----------
numrecords : int, optional
The number of records to return. Default 100
Returns
-------
list of LogEntry objects
list of the log entries
"""
with TRN:
sql = """SELECT logging_id
FROM qiita.{0}
ORDER BY logging_id DESC LIMIT %s""".format(cls._table)
TRN.add(sql, [numrecords])
return [cls(i) for i in TRN.execute_fetchflatten()]
@classmethod
def create(cls, severity, msg, info=None):
"""Creates a new LogEntry object
Parameters
----------
severity : str {Warning, Runtime, Fatal}
The level of severity to use for the LogEntry. Refers to an entry
in the SEVERITY table.
msg : str
The message text
info : dict, optional
Defaults to ``None``. If supplied, the information will be added
as the first entry in a list of information dicts. If ``None``,
an empty dict will be added.
Notes
-----
- When `info` is added, keys can be of any type, but upon retrieval,
they will be of type str
"""
if info is None:
info = {}
info = dumps([info])
with TRN:
sql = """INSERT INTO qiita.{} (time, severity_id, msg, information)
VALUES (NOW(), %s, %s, %s)
RETURNING logging_id""".format(cls._table)
severity_id = convert_to_id(severity, "severity")
TRN.add(sql, [severity_id, msg, info])
return cls(TRN.execute_fetchlast())
@property
def severity(self):
"""Returns the severity_id associated with this LogEntry
Returns
-------
int
This is a key to the SEVERITY table
"""
with TRN:
sql = """SELECT severity_id FROM qiita.{}
WHERE logging_id = %s""".format(self._table)
TRN.add(sql, [self.id])
return TRN.execute_fetchlast()
@property
def time(self):
"""Returns the time that this LogEntry was created
Returns
-------
datetime
"""
with TRN:
sql = "SELECT time FROM qiita.{} WHERE logging_id = %s".format(
self._table)
TRN.add(sql, [self.id])
return TRN.execute_fetchlast()
@property
def info(self):
"""Returns the info associated with this LogEntry
Returns
-------
list of dict
Each entry in the list is information that was added (the info
added upon creation will be index 0, and if additional info
was supplied subsequently, those entries will occupy subsequent
indices)
Notes
-----
- When `info` is added, keys can be of any type, but upon retrieval,
they will be of type str
"""
with TRN:
sql = """SELECT information FROM qiita.{} WHERE
logging_id = %s""".format(self._table)
TRN.add(sql, [self.id])
return loads(TRN.execute_fetchlast())
@property
def msg(self):
"""Gets the message text for this LogEntry
Returns
-------
str
"""
with TRN:
sql = "SELECT msg FROM qiita.{0} WHERE logging_id = %s".format(
self._table)
TRN.add(sql, [self.id])
return TRN.execute_fetchlast()
def clear_info(self):
"""Resets the list of info dicts to be an empty list
"""
with TRN:
sql = """UPDATE qiita.{} SET information = %s
WHERE logging_id = %s""".format(self._table)
TRN.add(sql, [dumps([]), self.id])
TRN.execute()
def add_info(self, info):
"""Adds new information to the info associated with this LogEntry
Parameters
----------
info : dict
The information to add.
Notes
-----
- When `info` is added, keys can be of any type, but upon retrieval,
they will be of type str
"""
with TRN:
current_info = self.info
current_info.append(info)
new_info = dumps(current_info)
sql = """UPDATE qiita.{} SET information = %s
WHERE logging_id = %s""".format(self._table)
TRN.add(sql, [new_info, self.id])
TRN.execute()
| adamrp/qiita | qiita_db/logger.py | Python | bsd-3-clause | 5,700 |
import json
import webapp2
import time
import model
import server.seed
import logging
logging.info('Main')
def AsDict(aire):
return {'id': aire.key.id(), 'timestamp' : str(aire.timestamp), 'parameter' : aire.parameter, 'tecnic' : aire.tecnic,
'period' : aire.period, 'value' : aire.value, 'ce01' : aire.ce01, 'ce02' : aire.ce02, 'ce03' : aire.ce03,
'stationObject' : { 'Latitud_D' : aire.station.Latitud_D, 'Longitud_D' : aire.station.Longitud_D, 'Name' : aire.station.Name} }
class RestHandler(webapp2.RequestHandler):
def dispatch(self):
#time.sleep(1)
super(RestHandler, self).dispatch()
def SendJson(self, r):
self.response.headers['content-type'] = 'text/plain'
self.response.write(json.dumps(r))
class QueryHandler(RestHandler):
def get(self):
parameter = self.request.get('parameter').split(",")
parameter = [ int(x) for x in parameter ]
year = self.request.GET.get('year')
month = self.request.GET.get('month')
day = self.request.GET.get('day')
hour = self.request.GET.get('hour')
aires = model.AllAire(parameter, year, month, day, hour)
r = [ AsDict(aire) for aire in aires ]
self.SendJson(r)
class UpdateHandler(RestHandler):
def post(self):
r = json.loads(self.request.body)
guest = model.UpdateGuest(r['id'], r['first'], r['last'])
r = AsDict(guest)
self.SendJson(r)
class InsertHandler(RestHandler):
def post(self):
r = json.loads(self.request.body)
guest = model.InsertGuest(r['first'], r['last'])
r = AsDict(guest)
self.SendJson(r)
class DeleteHandler(RestHandler):
def post(self):
r = json.loads(self.request.body)
model.DeleteGuest(r['id'])
class SeedHandler(RestHandler):
def get(self):
seed = server.seed.Inflate()
APP = webapp2.WSGIApplication([
('/api/aires', QueryHandler),
('/seed', SeedHandler)
#('/rest/insert', InsertHandler),
#('/rest/delete', DeleteHandler),
#('/rest/update', UpdateHandler),
], debug=True)
| emarinizquierdo/breathe-better | main.py | Python | apache-2.0 | 2,019 |
"""Demo of several ways to work with Amara's Atom tools
Some configurations can be changed in akara.conf. The default settings are:
class atomtools:
entries = "/path/to/entry/files/*.atom"
feed_envelope = '''<feed xmlns="http://www.w3.org/2005/Atom">
<title>This is my feed</title><id>http://example.com/my_feed</id>
</feed>'''
'entries' is the glob path to a set of Atom entries, where the root
element to each XML document must be "entry" in the Atom
namespace). The "feed_envelope" goes around the entries to make the
full Atom feed. The entries are listed after the <title>.
"""
# "Make Emacs happy with a close quote. Otherwise it gets confused.
from datetime import datetime, timedelta
import glob
from itertools import dropwhile
import amara
from amara import bindery
from amara.tools import atomtools
from amara.thirdparty import httplib2
from amara.lib.util import first_item
from amara.thirdparty import json
from akara.services import simple_service
from akara import request, response
from akara import logger, module_config
# These come from the akara.demos.atomtools section of the Akara configuration file
ENTRIES = module_config().warn("entries", "/path/to/entry/files/*.atom",
"glob path to Atom entries")
FEED_ENVELOPE = module_config().warn("feed_envelope",
'''<feed xmlns="http://www.w3.org/2005/Atom">
<title>This is my feed</title><id>http://example.com/my_feed</id>
</feed>''', "XML envelope around the Atom entries")
#text/uri-list from RFC 2483
SERVICE_ID = 'http://purl.org/akara/services/demo/atom.json'
@simple_service('GET', SERVICE_ID, 'akara.atom.json', 'application/json')
def atom_json(url):
'''
Convert Atom syntax to Exhibit JSON
(see: http://www.ibm.com/developerworks/web/library/wa-realweb6/ ; this is based on listing 3)
Sample requests:
* curl "http://localhost:8880/akara.atom.json?url=url=http://zepheira.com/feeds/news.atom"
* curl "http://localhost:8880/akara.atom.json?url=http://picasaweb.google.com/data/feed/base/user/dysryi/albumid/5342439351589940049"
* curl "http://localhost:8880/akara.atom.json?url=http://earthquake.usgs.gov/eqcenter/catalogs/7day-M2.5.xml"
'''
entries = atomtools.ejsonize(url)
return json.dumps({'items': entries}, indent=4)
# This uses a simple caching mechanism.
# If the cache is over 15 minutes old then rebuild the cache.
DOC_CACHE = None
def _need_refresh():
if DOC_CACHE is None:
return True
if datetime.now() > DOC_CACHE[1]: # check for expiration
return True
return False
SERVICE_ID = 'http://purl.org/akara/services/demo/aggregate.atom'
@simple_service('GET', SERVICE_ID, 'akara.aggregate.atom', str(atomtools.ATOM_IMT))
def aggregate_atom():
"""Aggregate a set of Atom entries and return as an Atom feed
Sample request:
* curl "http://localhost:8880/akara.aggregate.atom"
"""
global DOC_CACHE
if _need_refresh():
filenames = glob.glob(ENTRIES)
doc, metadata = atomtools.aggregate_entries(FEED_ENVELOPE, filenames)
DOC_CACHE = doc.xml_encode('xml-indent'), datetime.now() + timedelta(minutes=15)
return DOC_CACHE[0]
# We love Atom, but for sake of practicality (and JSON fans), here is
# a transform for general feeds
SERVICE_ID = 'http://purl.org/akara/services/demo/webfeed.json'
@simple_service('GET', SERVICE_ID, 'akara.webfeed.json', 'application/json')
def webfeed_json(url):
"""Convert an Atom feed to Exhibit JSON
Sample request:
* curl "http://localhost:8880/akara.webfeed.json?url=http://feeds.delicious.com/v2/rss/recent%3Fmin=1%26count=15"
* curl http://localhost:8880/akara.webfeed.json?url=http://localhost:8880/akara.aggregate.atom
"""
import feedparser # From http://www.feedparser.org/
feed = feedparser.parse(url)
# Note: bad URLs might mean the feed doesn't have headers
def process_entry(e):
data = {
u'id': e.link,
u'label': e.link,
u'title': e.title,
u'link': e.link,
u'updated': e.updated,
}
#Optional bits
if 'content' in data:
data[u'content'] = e.content
if 'description' in data:
data[u'description'] = e.description
if 'author_detail' in data:
data[u'author_name'] = e.author_detail.name
return data
entries = [ process_entry(e) for e in feed.entries ]
return json.dumps({'items': entries}, indent=4)
RDF_IMT = 'application/rdf+xml'
ATOM_IMT = 'application/atom+xml'
# Read RSS2, and generate Atom or other format
SERVICE_ID = 'http://purl.org/akara/services/demo/rss2translate'
@simple_service('GET', SERVICE_ID, 'akara.rss2translate')
def rss2translate(url=None, format=None):
"""Convert RSS 2.0 feed to Atom or RSS 1.0
Sample request:
* curl "http://localhost:8880/akara.rss2translate?url=http://feeds.delicious.com/v2/rss/recent"
This is a demo and is not meant as an industrial-strength converter.
"""
# Support connection-negotiation in addition to query parameter
if not format:
accepted_imts = request.environ.get('HTTP_ACCEPT', '').split(',')
imt = first_item(dropwhile(lambda x: '*' in x, accepted_imts))
if imt == 'RDF_IMT':
format = 'rss1'
else:
format = 'atom'
if not url:
raise AssertionError("The 'url' query parameter is mandatory.")
import feedparser # From http://www.feedparser.org/
feed = feedparser.parse(url)
# Note: bad URLs might mean the feed doesn't have headers
logger.debug('Feed info: ' + repr((url, feed.version, feed.encoding, feed.headers.get('Content-type'))))
updated = getattr(feed.feed, 'updated_parsed', None)
if updated:
#FIXME: Double-check this conversion
updated = datetime(*updated[:7]).isoformat()
f = atomtools.feed(title=feed.feed.title, updated=updated, id=feed.feed.link)
for e in feed.entries:
updated = getattr(e, 'updated_parsed', None)
if updated:
#FIXME: Double-check this conversion
updated = datetime(*updated[:7]).isoformat()
links = [
#FIXME: self?
(e.link, u'alternate'),
]
f.append(
e.link,
e.title,
updated = updated,
summary=e.description,
#e.author_detail.name
#authors=authors,
links=links,
)
if format == 'atom':
result = f.xml_encode()
response.add_header("Content-Type", ATOM_IMT)
else:
result = f.rss1format()
response.add_header("Content-Type", RDF_IMT)
return result
| uogbuji/akara | lib/demo/atomtools.py | Python | apache-2.0 | 6,737 |
# -*- coding: ascii -*-
# $Id$
#
# Author: vvlachoudis@gmail.com
# Date: 18-Jun-2015
__author__ = "Vasilis Vlachoudis"
__email__ = "vvlachoudis@gmail.com"
try:
from Tkinter import *
except ImportError:
from tkinter import *
import tkExtra
import Utils
import Ribbon
import CNCList
import CNCRibbon
from CNCCanvas import ACTION_MOVE, ACTION_ORIGIN
#===============================================================================
# Clipboard Group
#===============================================================================
class ClipboardGroup(CNCRibbon.ButtonGroup):
def __init__(self, master, app):
CNCRibbon.ButtonGroup.__init__(self, master, N_("Clipboard"), app)
self.grid2rows()
# ---
b = Ribbon.LabelButton(self.frame, self, "<<Paste>>",
image=Utils.icons["paste32"],
text=_("Paste"),
compound=TOP,
takefocus=FALSE,
background=Ribbon._BACKGROUND)
b.grid(row=0, column=0, rowspan=2, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Paste [Ctrl-V]"))
self.addWidget(b)
# ---
b = Ribbon.LabelButton(self.frame, self, "<<Cut>>",
image=Utils.icons["cut"],
text=_("Cut"),
compound=LEFT,
anchor=W,
takefocus=FALSE,
background=Ribbon._BACKGROUND)
tkExtra.Balloon.set(b, _("Cut [Ctrl-X]"))
b.grid(row=0, column=1, padx=0, pady=1, sticky=NSEW)
self.addWidget(b)
# ---
b = Ribbon.LabelButton(self.frame, self, "<<Copy>>",
image=Utils.icons["copy"],
text=_("Copy"),
compound=LEFT,
anchor=W,
takefocus=FALSE,
background=Ribbon._BACKGROUND)
tkExtra.Balloon.set(b, _("Copy [Ctrl-C]"))
b.grid(row=1, column=1, padx=0, pady=1, sticky=NSEW)
self.addWidget(b)
#===============================================================================
# Select Group
#===============================================================================
class SelectGroup(CNCRibbon.ButtonGroup):
def __init__(self, master, app):
CNCRibbon.ButtonGroup.__init__(self, master, N_("Select"), app)
self.grid3rows()
# ---
col,row=0,0
b = Ribbon.LabelButton(self.frame, app, "<<SelectAll>>",
image=Utils.icons["select_all"],
text=_("All"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Select all blocks [Ctrl-A]"))
self.addWidget(b)
# ---
col += 1
b = Ribbon.LabelButton(self.frame, app, "<<SelectNone>>",
image=Utils.icons["select_none"],
text=_("None"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Unselect all blocks [Ctrl-Shift-A]"))
self.addWidget(b)
# ---
col,row=0,1
b = Ribbon.LabelButton(self.frame, app, "<<SelectInvert>>",
image=Utils.icons["select_invert"],
text=_("Invert"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Invert selection [Ctrl-I]"))
self.addWidget(b)
# ---
col += 1
b = Ribbon.LabelButton(self.frame, app, "<<SelectLayer>>",
image=Utils.icons["select_layer"],
text=_("Layer"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Select all blocks from current layer"))
self.addWidget(b)
# ---
col, row = 0,2
self.filterString = tkExtra.LabelEntry(self.frame,
"Filter",
"DarkGray",
background="White",
width=16)
self.filterString.grid(row=row, column=col, columnspan=2, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(self.filterString, _("Filter blocks"))
self.addWidget(self.filterString)
self.filterString.bind("<Return>", self.filter)
self.filterString.bind("<KP_Enter>", self.filter)
#-----------------------------------------------------------------------
def filter(self, event=None):
txt = self.filterString.get()
self.app.insertCommand("FILTER %s"%(txt), True)
#===============================================================================
# Edit Group
#===============================================================================
class EditGroup(CNCRibbon.ButtonMenuGroup):
def __init__(self, master, app):
CNCRibbon.ButtonMenuGroup.__init__(self, master, N_("Edit"), app,
[(_("Color"), "color", lambda a=app:a.event_generate("<<ChangeColor>>")),
(_("Import"), "load", lambda a=app:a.insertCommand("IMPORT",True)),
(_("Inkscape"), "inkscape", lambda a=app:a.insertCommand("INKSCAPE all",True)),
(_("Round"), "digits", lambda s=app:s.insertCommand("ROUND", True))
])
self.grid3rows()
# ---
col,row=0,0
b = Ribbon.LabelButton(self.frame, app, "<<Add>>",
image=Utils.icons["add"],
text=_("Add"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Insert a new block or line of code [Ins or Ctrl-Enter]"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame, app, "<<Clone>>",
image=Utils.icons["clone"],
text=_("Clone"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Clone selected lines or blocks [Ctrl-D]"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame, app, "<<Delete>>",
image=Utils.icons["x"],
text=_("Delete"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Delete selected lines or blocks [Del]"))
self.addWidget(b)
# ---
col,row=1,0
b = Ribbon.LabelButton(self.frame, self.app, "<<EnableToggle>>",
image=Utils.icons["toggle"],
#text=_("Toggle"),
#compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Toggle enable/disable block of g-code [Ctrl-L]"))
self.addWidget(b)
menulist = [ (_("Enable"), "enable",
lambda a=self.app : a.event_generate("<<Enable>>")),
(_("Disable"), "disable",
lambda a=self.app : a.event_generate("<<Disable>>"))]
b = Ribbon.MenuButton(self.frame, menulist,
text=_("Active"),
image=Utils.icons["triangle_down"],
compound=RIGHT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col+1, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Enable or disable blocks of gcode"))
# ---
row += 1
b = Ribbon.LabelButton(self.frame, self.app, "<<Expand>>",
image=Utils.icons["expand"],
text=_("Expand"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, columnspan=2, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Toggle expand/collapse blocks of gcode [Ctrl-E]"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["comment"],
text=_("Comment"),
compound=LEFT,
anchor=W,
state=DISABLED,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, columnspan=2, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("(Un)Comment selected lines"))
self.addWidget(b)
#===============================================================================
# Move Group
#===============================================================================
class MoveGroup(CNCRibbon.ButtonMenuGroup):
def __init__(self, master, app):
CNCRibbon.ButtonMenuGroup.__init__(self, master, N_("Move"), app)
self.grid3rows()
# ===
col,row = 0,0
b = Ribbon.LabelRadiobutton(self.frame,
image=Utils.icons["move32"],
text=_("Move"),
compound=TOP,
anchor=W,
variable=app.canvas.actionVar,
value=ACTION_MOVE,
command=app.canvas.setActionMove,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, rowspan=3, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move objects [M]"))
self.addWidget(b)
# ---
col += 1
b = Ribbon.LabelRadiobutton(self.frame,
image=Utils.icons["origin32"],
text=_("Origin"),
compound=TOP,
anchor=W,
variable=app.canvas.actionVar,
value=ACTION_ORIGIN,
command=app.canvas.setActionOrigin,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, rowspan=3, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move all gcode such as origin is on mouse location [O]"))
self.addWidget(b)
#----------------------------------------------------------------------
def createMenu(self):
menu = Menu(self, tearoff=0)
for i,n,c in ( ("TL", _("Top-Left"), "MOVE TL"),
("LC", _("Left"), "MOVE LC"),
("BL", _("Bottom-Left"), "MOVE BL"),
("TC", _("Top"), "MOVE TC"),
("center", _("Center"), "MOVE CENTER"),
("BC", _("Bottom"), "MOVE BC"),
("TR", _("Top-Right"), "MOVE TR"),
("RC", _("Right"), "MOVE RC"),
("BR", _("Bottom-Right"),"MOVE BR")):
menu.add_command(label=n,
image=Utils.icons[i], compound=LEFT,
command=lambda a=self.app,c=c:a.insertCommand(c,True))
return menu
#===============================================================================
# Order Group
#===============================================================================
class OrderGroup(CNCRibbon.ButtonMenuGroup):
def __init__(self, master, app):
CNCRibbon.ButtonMenuGroup.__init__(self, master, N_("Order"), app,
[(_("Optimize"), "optimize", lambda a=app:a.insertCommand("OPTIMIZE",True)),
])
self.grid2rows()
# ===
col,row=0,0
b = Ribbon.LabelButton(self.frame, self, "<Control-Key-Prior>",
image=Utils.icons["up"],
text=_("Up"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move selected g-code up [Ctrl-Up, Ctrl-PgUp]"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame, self, "<Control-Key-Next>",
image=Utils.icons["down"],
text=_("Down"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move selected g-code down [Ctrl-Down, Ctrl-PgDn]"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame, self, "<<Invert>>",
image=Utils.icons["swap"],
text=_("Invert"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Invert cutting order of selected blocks"))
self.addWidget(b)
#===============================================================================
# Transform Group
#===============================================================================
class TransformGroup(CNCRibbon.ButtonGroup):
def __init__(self, master, app):
CNCRibbon.ButtonGroup.__init__(self, master, N_("Transform"), app)
self.grid3rows()
# ---
col,row=0,0
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["rotate90"],
text=_("CW"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("ROTATE CW",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Rotate selected gcode clock-wise (-90deg)"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["rotate180"],
text=_("Flip"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("ROTATE FLIP",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Rotate selected gcode by 180deg"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["rotate270"],
text=_("CCW"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("ROTATE CCW",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Rotate selected gcode counter-clock-wise (90deg)"))
self.addWidget(b)
# ---
col,row=1,0
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["flip-horizontal"],
text=_("Horizontal"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MIRROR horizontal",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Mirror horizontally X=-X selected gcode"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["flip-vertical"],
text=_("Vertical"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MIRROR vertical",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Mirror vertically Y=-Y selected gcode"))
self.addWidget(b)
# submenu.add_command(label=_("Rotate command"), underline=0,
# command=lambda s=self:s.insertCommand("ROTATE ang x0 y0", False))
#===============================================================================
# Route Group
#===============================================================================
class RouteGroup(CNCRibbon.ButtonGroup):
def __init__(self, master, app):
CNCRibbon.ButtonGroup.__init__(self, master, N_("Route"), app)
self.grid3rows()
# ---
col,row=0,0
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["conventional"],
text=_("Conventional"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("DIRECTION CONVENTIONAL", True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Change cut direction to conventional for selected gcode blocks"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["climb"],
text=_("Climb"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("DIRECTION CLIMB", True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Change cut direction to climb for selected gcode blocks"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["reverse"],
text=_("Reverse"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("REVERSE", True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Reverse cut direction for selected gcode blocks"))
self.addWidget(b)
#===============================================================================
# Info Group
#===============================================================================
class InfoGroup(CNCRibbon.ButtonGroup):
def __init__(self, master, app):
CNCRibbon.ButtonGroup.__init__(self, master, N_("Info"), app)
self.grid2rows()
# ---
col,row=0,0
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["stats"],
text=_("Statistics"),
compound=LEFT,
anchor=W,
command=app.showStats,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Show statistics for enabled gcode"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["info"],
text=_("Info"),
compound=LEFT,
anchor=W,
command=app.showInfo,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Show cutting information on selected blocks [Ctrl-n]"))
self.addWidget(b)
#===============================================================================
# Main Frame of Editor
#===============================================================================
class EditorFrame(CNCRibbon.PageFrame):
def __init__(self, master, app):
CNCRibbon.PageFrame.__init__(self, master, "Editor", app)
self.editor = CNCList.CNCListbox(self, app,
selectmode=EXTENDED,
exportselection=0,
background="White")
self.editor.pack(side=LEFT,expand=TRUE, fill=BOTH)
self.addWidget(self.editor)
sb = Scrollbar(self, orient=VERTICAL, command=self.editor.yview)
sb.pack(side=RIGHT, fill=Y)
self.editor.config(yscrollcommand=sb.set)
#===============================================================================
# Editor Page
#===============================================================================
class EditorPage(CNCRibbon.Page):
__doc__ = _("GCode editor")
_name_ = N_("Editor")
_icon_ = "edit"
#----------------------------------------------------------------------
# Add a widget in the widgets list to enable disable during the run
#----------------------------------------------------------------------
def register(self):
self._register((ClipboardGroup, SelectGroup, EditGroup, MoveGroup,
OrderGroup, TransformGroup, RouteGroup, InfoGroup),
(EditorFrame,))
| carlosgs/bCNC | EditorPage.py | Python | gpl-2.0 | 17,227 |
import os
from imgurpython import ImgurClient
import markov
import keys
import image_maker
class ImageMakerGlue():
def __init__(self, file):
self.m = markov.Markov(2, file)
print "Loaded corpus"
self.client = ImgurClient(keys.imgur_client_id, keys.imgur_client_secret)
print "Connected to Imgur"
def gen(self, msg, max=None):
comic, lengths = image_maker.pick()
dialog = []
for l in lengths:
dialog.append(self.m.gen(msg, l))
msg = ''
print dialog
file_name = image_maker.make(comic, dialog)
link = str(self.client.upload_from_path(file_name)["link"])
os.unlink(file_name)
return link
if __name__ == "__main__":
i = ImageMakerGlue("lines.txt")
print i.gen("")
| BenjaminFair/hackohio2015 | image_maker_glue.py | Python | gpl-2.0 | 732 |
from main import normaliseAndScale, readData, binariseLabels, getTrueLabels, buildDataMatrix, removePacketsAfterChange, printMetrics
from voting import Voting
import pickle
from sklearn import metrics
from matplotlib import pyplot as plt
import matplotlib2tikz
# models = {
# "boost": dict(),
# "logistic": dict(),
# "SVM_linear": dict(),
# "SVM_RBF": dict(),
# "SVM_poly": dict(),
# "SVM_sigmoid": dict()
# }
test_set = 2
number_of_models = {
"boost": 4,
"logistic": 4,
}
print "Test set:", (test_set+1)
print "Used models:", number_of_models
test_data = normaliseAndScale(readData("..\\data\\test5_results_" + str(test_set + 1) + "_all.csv"))
test_labels = readData("..\\data\\test5_targets_" + str(test_set + 1) + ".csv")
labels = getTrueLabels(test_labels)
binarised_labels = [binariseLabels(labels, target) for target in [1,2,3]]
data_matrix = [buildDataMatrix(test_data, target) for target in [0,1,2]]
for target in [0,1,2]:
data_matrix[target], binarised_labels[target] =\
removePacketsAfterChange(data_matrix[target], binarised_labels[target], test_labels, 256)
print "The data has been read in!"
estimators = {
1: [],
2: [],
3: []
}
for target in [1, 2, 3]:
for dataset in [0, 1, 2]:
if dataset != test_set:
for model_name in number_of_models:
for k in range(number_of_models[model_name]):
file_name = model_name + "_dataset_" + str(dataset) + "_target_" + str(target) + "_" + str(k) + ".pkl"
estimators[target].append((file_name, pickle.load(file('../pickle/models/' + file_name))))
type = "soft"
voters = {
1: Voting(estimators[1], voting=type), #, weights={True: 0.8, False: 0.2}
2: Voting(estimators[2], voting=type),
3: Voting(estimators[3], voting=type)
}
# voters = { # Estimators used in DM project
# 1: Voting(estimators[1][8:10]+estimators[1][23:24], voting=type), #, weights={True: 0.8, False: 0.2}
# 2: Voting(estimators[2][0:8]+estimators[2][21:22], voting=type),
# 3: Voting(estimators[3][0:8]+estimators[3][23:24], voting=type)
# }
print "Models have been read in!"
for target in [1, 2, 3]:
decision = voters[target].transform(data_matrix[target-1])
if type == "soft":
decision = sum(decision).transpose()[0]
elif type == "hard":
decision = sum(decision.transpose())
fpr, tpr, threshold = metrics.roc_curve(binarised_labels[target-1], decision, pos_label=True)
# printMetrics(fpr, tpr, threshold, 0.99, decision[0], binarised_labels[target-1])
# printMetrics(fpr, tpr, threshold, 1, decision[0], binarised_labels[target-1])
prediction = printMetrics(fpr, tpr, threshold, 0.01, decision, binarised_labels[target-1])
printMetrics(fpr, tpr, threshold, 0, decision, binarised_labels[target-1])
plt.subplot(2, 2, 1)
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.plot((0, 1), (0, 1))
plt.subplot(2, 2, target+1)
axes = plt.gca()
axes.set_ylim([-0.1, 1.1])
plt.plot(map(lambda x: x, prediction))
plt.plot(binarised_labels[target-1], "--")
matplotlib2tikz.save("roc.tex")
plt.show()
| kahvel/MAProject | src/test_models.py | Python | mit | 3,209 |
#!/usr/bin/python3
import json
import logger
import os.path
DATA_FILE_NAME = 'users.json'
def save_to_file(data):
"""Saves python data to JSON file named :py:const:`DATA_FILE_NAME`.
Args:
data (dict): Python dict to save
"""
f = open(DATA_FILE_NAME, 'w')
json.dump(data, f)
f.close()
def is_user_registered(user_id):
"""Checks if user registered in system
Args:
user_id (int): Telegram id of user to check
Returns:
bool: Check result
"""
return str(user_id) in data
def register_user(user_id):
"""Registers user with id `user_id` in system. By default, `admin` is False and there are no groups
Args:
user_id (int): Telegram id of user to register
"""
global data
data[str(user_id)] = {'admin': False, 'groups': []}
def is_user_admin(user_id):
"""Checks if user admin or not
Args:
user_id (int): Telegram id of user to check
Returns:
bool: Check result
"""
global data
return str(user_id) in data and data[str(user_id)]['admin']
def set_user_admin(user_id, admin_flag):
"""Sets user admin status
Args:
user_id (int): Telegram id of user to change
admin_flag (bool): New state of admin flag
"""
global data
if str(user_id) not in data:
register_user(str(user_id))
data[str(user_id)]['admin'] = admin_flag
save_to_file(data)
def is_user_in_group(user_id, group):
"""Checks whether user in group
Args:
user_id (int): Telegram id of user to check
group (str): Name of group
Returns:
bool: Check result
"""
global data
return str(user_id) in data and group in data[str(user_id)]['groups']
def list_groups(user_id):
"""List groups user members in
Args:
user_id (int): Telegram id of user to check
Returns:
list: List of groups
"""
global data
if str(user_id) in data:
return data[str(user_id)]['groups']
else:
return []
def add_user_to_group(user_id, group):
"""Add user to specified group
Args:
user_id (int): Telegram id of user to add to group
group (str): The name of group
"""
global data
if str(user_id) not in data:
register_user(str(user_id))
if is_user_in_group(str(user_id), group):
return
data[str(user_id)]['groups'].append(group)
save_to_file(data)
def delete_user_from_group(user_id, group):
"""Delete user from specified group
Args:
user_id (int): Telegram id of user to delete from group
group (str): The name of group
"""
global data
if not is_user_in_group(str(user_id), group):
return
data[str(user_id)]['groups'].remove(group)
save_to_file(data)
try:
data = json.load(open(DATA_FILE_NAME, 'r'))
if type(data) != dict:
raise ValueError
else:
for u in data.values():
if 'admin' not in u or type(u['admin']) != bool or 'groups' not in u or type(u['groups']) != list:
raise ValueError
except FileNotFoundError:
logger.info('File with users not found and will be created')
data = {}
except ValueError:
logger.warning('Invalid users file, creating new')
data = {}
except:
logger.warning('Some error occured with users file, creating new')
data = {}
| TCP-Joker-BOT/TCPJoker | extensions/users.py | Python | mit | 3,380 |
import sys
path = sys.argv[1]
file = open(path,'r')
for line in file:
#strip our line from everything that is not an integer
#we know the content of the line so it doesn't actually strip everything, but just the needed chars
j = line.translate(None,'abcehisourVZHWmpt,:')
k = j.strip()
#turn what's left to a list of integers
k = [int(v) for v in k.split()]
i = 0
total = 0
#do the math to calulate the total number of candies
total = (k[0] * 3 + k[1] * 4 + k[2] * 5) * k[3]
#print the total number of candies devided by the number of childern
print int(total/(k[0]+k[1]+k[2]))
| Da5hes/CodeEvalChallenges | Trick-or-Treat/Solution.py | Python | gpl-2.0 | 597 |
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import argparse
import multiprocessing
import platform
import android.adb.commands
from swift_build_support.swift_build_support import arguments
from swift_build_support.swift_build_support import host
from swift_build_support.swift_build_support import targets
from swift_build_support.swift_build_support import workspace
from swift_build_support.swift_build_support.targets import \
StdlibDeploymentTarget
from . import defaults
__all__ = [
'create_argument_parser',
]
class _ApplyDefaultsArgumentParser(argparse.ArgumentParser):
"""Wrapper class around the default ArgumentParser that allows for
post-processing the parsed argument namespace to apply default argument
transformations.
"""
def __init__(self, apply_defaults=None, *args, **kwargs):
self._apply_defaults = apply_defaults
super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs)
def parse_known_args(self, args=None, namespace=None):
args, argv = super(_ApplyDefaultsArgumentParser, self)\
.parse_known_args(args, namespace)
self._apply_defaults(args)
return args, argv
def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None:
args.build_lldb = True
# Set the default build variant.
if args.build_variant is None:
args.build_variant = "Debug"
# Set the default stdlib-deployment-targets, if none were provided.
if args.stdlib_deployment_targets is None:
stdlib_targets = \
StdlibDeploymentTarget.default_stdlib_deployment_targets()
args.stdlib_deployment_targets = [
target.name for target in stdlib_targets]
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = "Ninja"
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError("error: --ios-all is unavailable in open-source "
"Swift.\nUse --ios to skip iOS device tests.")
if args.tvos_all:
raise ValueError("error: --tvos-all is unavailable in open-source "
"Swift.\nUse --tvos to skip tvOS device tests.")
if args.watchos_all:
raise ValueError("error: --watchos-all is unavailable in open-source "
"Swift.\nUse --watchos to skip watchOS device tests.")
# SwiftPM and XCTest have a dependency on Foundation.
# On OS X, Foundation is built automatically using xcodebuild.
# On Linux, we must ensure that it is built manually.
if ((args.build_swiftpm or args.build_xctest) and
platform.system() != "Darwin"):
args.build_foundation = True
# Foundation has a dependency on libdispatch.
# On OS X, libdispatch is provided by the OS.
# On Linux, we must ensure that it is built manually.
if (args.build_foundation and
platform.system() != "Darwin"):
args.build_libdispatch = True
# Propagate global --skip-build
if args.skip_build:
args.build_linux = False
args.build_freebsd = False
args.build_cygwin = False
args.build_osx = False
args.build_ios = False
args.build_tvos = False
args.build_watchos = False
args.build_android = False
args.build_benchmarks = False
args.build_lldb = False
args.build_llbuild = False
args.build_swiftpm = False
args.build_xctest = False
args.build_foundation = False
args.build_libdispatch = False
args.build_libicu = False
args.build_playgroundlogger = False
args.build_playgroundsupport = False
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# --test-paths implies --test and/or --validation-test
# depending on what directories/files have been specified.
if args.test_paths:
for path in args.test_paths:
if path.startswith('test'):
args.test = True
elif path.startswith('validation-test'):
args.test = True
args.validation_test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_device = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_device = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_device = False
args.test_watchos_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_device = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_device = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_device = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_android:
args.test_android_device = False
if not args.host_test:
args.test_ios_device = False
args.test_tvos_device = False
args.test_watchos_device = False
args.test_android_device = False
if args.build_subdir is None:
args.build_subdir = \
workspace.compute_build_subdir(args)
# Add optional stdlib-deployment-targets
if args.android:
args.stdlib_deployment_targets.append(
StdlibDeploymentTarget.Android.armv7.name)
# Infer platform flags from manually-specified configure targets.
# This doesn't apply to Darwin platforms, as they are
# already configured. No building without the platform flag, though.
android_tgts = [tgt for tgt in args.stdlib_deployment_targets
if StdlibDeploymentTarget.Android.contains(tgt)]
if not args.android and len(android_tgts) > 0:
args.android = True
args.build_android = False
def create_argument_parser():
"""Return a configured argument parser."""
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
parser.add_argument(
"-n", "--dry-run",
help="print the commands that would be executed, but do not execute "
"them",
action="store_true",
default=False)
parser.add_argument(
"--no-legacy-impl", dest="legacy_impl",
help="avoid legacy implementation",
action="store_false",
default=True)
targets_group = parser.add_argument_group(
title="Host and cross-compilation targets")
targets_group.add_argument(
"--host-target",
help="The host target. LLVM, Clang, and Swift will be built for this "
"target. The built LLVM and Clang will be used to compile Swift "
"for the cross-compilation targets.",
default=StdlibDeploymentTarget.host_target().name)
targets_group.add_argument(
"--cross-compile-hosts",
help="A space separated list of targets to cross-compile host Swift "
"tools for. Can be used multiple times.",
action=arguments.action.concat, type=arguments.type.shell_split,
default=[])
targets_group.add_argument(
"--stdlib-deployment-targets",
help="list of targets to compile or cross-compile the Swift standard "
"library for. %(default)s by default.",
action=arguments.action.concat, type=arguments.type.shell_split,
default=None)
targets_group.add_argument(
"--build-stdlib-deployment-targets",
help="A space-separated list that filters which of the configured "
"targets to build the Swift standard library for, or 'all'.",
type=arguments.type.shell_split, default=["all"])
projects_group = parser.add_argument_group(
title="Options to select projects")
projects_group.add_argument(
"-l", "--lldb",
help="build LLDB",
action="store_true",
dest="build_lldb")
projects_group.add_argument(
"-b", "--llbuild",
help="build llbuild",
action="store_true",
dest="build_llbuild")
projects_group.add_argument(
"-p", "--swiftpm",
help="build swiftpm",
action="store_true",
dest="build_swiftpm")
projects_group.add_argument(
"--xctest",
help="build xctest",
action=arguments.action.optional_bool,
dest="build_xctest")
projects_group.add_argument(
"--foundation",
help="build foundation",
action=arguments.action.optional_bool,
dest="build_foundation")
projects_group.add_argument(
"--libdispatch",
help="build libdispatch",
action=arguments.action.optional_bool,
dest="build_libdispatch")
projects_group.add_argument(
"--libicu",
help="build libicu",
action=arguments.action.optional_bool,
dest="build_libicu")
projects_group.add_argument(
"--playgroundlogger",
help="build playgroundlogger",
action="store_true",
dest="build_playgroundlogger")
projects_group.add_argument(
"--playgroundsupport",
help="build PlaygroundSupport",
action="store_true",
dest="build_playgroundsupport")
projects_group.add_argument(
"--build-ninja",
help="build the Ninja tool",
action=arguments.action.optional_bool)
extra_actions_group = parser.add_argument_group(
title="Extra actions to perform before or in addition to building")
extra_actions_group.add_argument(
"-c", "--clean",
help="do a clean build",
action="store_true")
extra_actions_group.add_argument(
"--export-compile-commands",
help="generate compilation databases in addition to building",
action=arguments.action.optional_bool)
extra_actions_group.add_argument(
"--symbols-package",
metavar="PATH",
help="if provided, an archive of the symbols directory will be "
"generated at this path")
build_variant_group = parser.add_mutually_exclusive_group(required=False)
build_variant_group.add_argument(
"-d", "--debug",
help="build the Debug variant of everything (LLVM, Clang, Swift host "
"tools, target Swift standard libraries, LLDB (if enabled) "
"(default)",
action="store_const",
const="Debug",
dest="build_variant")
build_variant_group.add_argument(
"-r", "--release-debuginfo",
help="build the RelWithDebInfo variant of everything (default is "
"Debug)",
action="store_const",
const="RelWithDebInfo",
dest="build_variant")
build_variant_group.add_argument(
"-R", "--release",
help="build the Release variant of everything (default is Debug)",
action="store_const",
const="Release",
dest="build_variant")
build_variant_override_group = parser.add_argument_group(
title="Override build variant for a specific project")
build_variant_override_group.add_argument(
"--debug-llvm",
help="build the Debug variant of LLVM",
action="store_const",
const="Debug",
dest="llvm_build_variant")
build_variant_override_group.add_argument(
"--debug-swift",
help="build the Debug variant of Swift host tools",
action="store_const",
const="Debug",
dest="swift_build_variant")
build_variant_override_group.add_argument(
"--debug-swift-stdlib",
help="build the Debug variant of the Swift standard library and SDK "
"overlay",
action="store_const",
const="Debug",
dest="swift_stdlib_build_variant")
build_variant_override_group.add_argument(
"--debug-lldb",
help="build the Debug variant of LLDB",
action="store_const",
const="Debug",
dest="lldb_build_variant")
build_variant_override_group.add_argument(
"--debug-cmark",
help="build the Debug variant of CommonMark",
action="store_const",
const="Debug",
dest="cmark_build_variant")
build_variant_override_group.add_argument(
"--debug-foundation",
help="build the Debug variant of Foundation",
action="store_const",
const="Debug",
dest="foundation_build_variant")
build_variant_override_group.add_argument(
"--debug-libdispatch",
help="build the Debug variant of libdispatch",
action="store_const",
const="Debug",
dest="libdispatch_build_variant")
build_variant_override_group.add_argument(
"--debug-libicu",
help="build the Debug variant of libicu",
action="store_const",
const="Debug",
dest="libicu_build_variant")
assertions_group = parser.add_mutually_exclusive_group(required=False)
assertions_group.add_argument(
"--assertions",
help="enable assertions in all projects",
action="store_const",
const=True,
dest="assertions")
assertions_group.add_argument(
"--no-assertions",
help="disable assertions in all projects",
action="store_const",
const=False,
dest="assertions")
assertions_override_group = parser.add_argument_group(
title="Control assertions in a specific project")
assertions_override_group.add_argument(
"--cmark-assertions",
help="enable assertions in CommonMark",
action="store_const",
const=True,
dest="cmark_assertions")
assertions_override_group.add_argument(
"--llvm-assertions",
help="enable assertions in LLVM",
action="store_const",
const=True,
dest="llvm_assertions")
assertions_override_group.add_argument(
"--no-llvm-assertions",
help="disable assertions in LLVM",
action="store_const",
const=False,
dest="llvm_assertions")
assertions_override_group.add_argument(
"--swift-assertions",
help="enable assertions in Swift",
action="store_const",
const=True,
dest="swift_assertions")
assertions_override_group.add_argument(
"--no-swift-assertions",
help="disable assertions in Swift",
action="store_const",
const=False,
dest="swift_assertions")
assertions_override_group.add_argument(
"--swift-stdlib-assertions",
help="enable assertions in the Swift standard library",
action="store_const",
const=True,
dest="swift_stdlib_assertions")
assertions_override_group.add_argument(
"--no-swift-stdlib-assertions",
help="disable assertions in the Swift standard library",
action="store_const",
const=False,
dest="swift_stdlib_assertions")
assertions_override_group.add_argument(
"--lldb-assertions",
help="enable assertions in LLDB",
action="store_const",
const=True,
dest="lldb_assertions")
assertions_override_group.add_argument(
"--no-lldb-assertions",
help="disable assertions in LLDB",
action="store_const",
const=False,
dest="lldb_assertions")
# FIXME: This should be one option using choices=[...]
cmake_generator_group = parser.add_argument_group(
title="Select the CMake generator")
cmake_generator_group.add_argument(
"-x", "--xcode",
help="use CMake's Xcode generator (default is Ninja)",
action="store_const",
const="Xcode",
dest="cmake_generator")
cmake_generator_group.add_argument(
"-m", "--make",
help="use CMake's Makefile generator (default is Ninja)",
action="store_const",
const="Unix Makefiles",
dest="cmake_generator")
cmake_generator_group.add_argument(
"-e", "--eclipse",
help="use CMake's Eclipse generator (default is Ninja)",
action="store_const",
const="Eclipse CDT4 - Ninja",
dest="cmake_generator")
run_tests_group = parser.add_argument_group(
title="Run tests")
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
run_tests_group.add_argument(
"-t",
help="test Swift after building",
action="store_const",
const=True,
dest="test")
run_tests_group.add_argument(
"--test",
help="test Swift after building",
action=arguments.action.optional_bool)
run_tests_group.add_argument(
"-T",
help="run the validation test suite (implies --test)",
action="store_const",
const=True,
dest="validation_test")
run_tests_group.add_argument(
"--validation-test",
help="run the validation test suite (implies --test)",
action=arguments.action.optional_bool)
run_tests_group.add_argument(
"--test-paths",
help="run tests located in specific directories and/or files \
(implies --test and/or --validation-test)",
action=arguments.action.concat, type=arguments.type.shell_split,
default=[])
run_tests_group.add_argument(
"-o",
help="run the test suite in optimized mode too (implies --test)",
action="store_const",
const=True,
dest="test_optimized")
run_tests_group.add_argument(
"--test-optimized",
help="run the test suite in optimized mode too (implies --test)",
action=arguments.action.optional_bool)
run_tests_group.add_argument(
"-s",
help="run the test suite in optimize for size mode too \
(implies --test)",
action="store_const",
const=True,
dest="test_optimize_for_size")
run_tests_group.add_argument(
"--test-optimize-for-size",
help="run the test suite in optimize for size mode too \
(implies --test)",
action=arguments.action.optional_bool)
run_tests_group.add_argument(
"--long-test",
help="run the long test suite",
action=arguments.action.optional_bool)
run_tests_group.add_argument(
"--host-test",
help="run executable tests on host devices (such as iOS or tvOS)",
action=arguments.action.optional_bool)
run_tests_group.add_argument(
"-B", "--benchmark",
help="run the Swift Benchmark Suite after building",
action="store_true")
run_tests_group.add_argument(
"--benchmark-num-o-iterations",
help="if the Swift Benchmark Suite is run after building, run N \
iterations with -O",
metavar='N', type=int, default=3)
run_tests_group.add_argument(
"--benchmark-num-onone-iterations",
help="if the Swift Benchmark Suite is run after building, run N \
iterations with -Onone", metavar='N', type=int, default=3)
run_tests_group.add_argument(
"--skip-test-osx",
dest='test_osx',
action=arguments.action.optional_false,
help="skip testing Swift stdlibs for Mac OS X")
run_tests_group.add_argument(
"--skip-test-linux",
dest='test_linux',
action=arguments.action.optional_false,
help="skip testing Swift stdlibs for Linux")
run_tests_group.add_argument(
"--skip-test-freebsd",
dest='test_freebsd',
action=arguments.action.optional_false,
help="skip testing Swift stdlibs for FreeBSD")
run_tests_group.add_argument(
"--skip-test-cygwin",
dest='test_cygwin',
action=arguments.action.optional_false,
help="skip testing Swift stdlibs for Cygwin")
parser.add_argument(
"--build-runtime-with-host-compiler",
help="Use the host compiler, not the self-built one to compile the "
"Swift runtime",
action=arguments.action.optional_bool)
run_build_group = parser.add_argument_group(
title="Run build")
run_build_group.add_argument(
"--build-swift-dynamic-stdlib",
help="build dynamic variants of the Swift standard library",
action=arguments.action.optional_bool,
default=True)
run_build_group.add_argument(
"--build-swift-static-stdlib",
help="build static variants of the Swift standard library",
action=arguments.action.optional_bool)
run_build_group.add_argument(
"--build-swift-dynamic-sdk-overlay",
help="build dynamic variants of the Swift SDK overlay",
action=arguments.action.optional_bool,
default=True)
run_build_group.add_argument(
"--build-swift-static-sdk-overlay",
help="build static variants of the Swift SDK overlay",
action=arguments.action.optional_bool)
run_build_group.add_argument(
"--build-swift-stdlib-unittest-extra",
help="Build optional StdlibUnittest components",
action=arguments.action.optional_bool)
run_build_group.add_argument(
"-S", "--skip-build",
help="generate build directory only without building",
action="store_true")
run_build_group.add_argument(
"--skip-build-linux",
dest='build_linux',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for Linux")
run_build_group.add_argument(
"--skip-build-freebsd",
dest='build_freebsd',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for FreeBSD")
run_build_group.add_argument(
"--skip-build-cygwin",
dest='build_cygwin',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for Cygwin")
run_build_group.add_argument(
"--skip-build-osx",
dest='build_osx',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for MacOSX")
run_build_group.add_argument(
"--skip-build-ios",
dest='build_ios',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for iOS")
run_build_group.add_argument(
"--skip-build-ios-device",
dest='build_ios_device',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for iOS devices "
"(i.e. build simulators only)")
run_build_group.add_argument(
"--skip-build-ios-simulator",
dest='build_ios_simulator',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for iOS simulator "
"(i.e. build devices only)")
run_build_group.add_argument(
"--skip-build-tvos",
dest='build_tvos',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for tvOS")
run_build_group.add_argument(
"--skip-build-tvos-device",
dest='build_tvos_device',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for tvOS devices "
"(i.e. build simulators only)")
run_build_group.add_argument(
"--skip-build-tvos-simulator",
dest='build_tvos_simulator',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for tvOS simulator "
"(i.e. build devices only)")
run_build_group.add_argument(
"--skip-build-watchos",
dest='build_watchos',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for watchOS")
run_build_group.add_argument(
"--skip-build-watchos-device",
dest='build_watchos_device',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for watchOS devices "
"(i.e. build simulators only)")
run_build_group.add_argument(
"--skip-build-watchos-simulator",
dest='build_watchos_simulator',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for watchOS simulator "
"(i.e. build devices only)")
run_build_group.add_argument(
"--skip-build-android",
dest='build_android',
action=arguments.action.optional_false,
help="skip building Swift stdlibs for Android")
run_build_group.add_argument(
"--skip-build-benchmarks",
dest='build_benchmarks',
action=arguments.action.optional_false,
help="skip building Swift Benchmark Suite")
skip_test_group = parser.add_argument_group(
title="Skip testing specified targets")
skip_test_group.add_argument(
"--skip-test-ios",
dest='test_ios',
action=arguments.action.optional_false,
help="skip testing all iOS targets. Equivalent to specifying both "
"--skip-test-ios-simulator and --skip-test-ios-host")
skip_test_group.add_argument(
"--skip-test-ios-simulator",
dest='test_ios_simulator',
action=arguments.action.optional_false,
help="skip testing iOS simulator targets")
skip_test_group.add_argument(
"--skip-test-ios-32bit-simulator",
dest='test_ios_32bit_simulator',
action=arguments.action.optional_false,
help="skip testing iOS 32 bit simulator targets")
skip_test_group.add_argument(
"--skip-test-ios-host",
dest='test_ios_device',
action=arguments.action.optional_false,
help="skip testing iOS device targets on the host machine (the phone "
"itself)")
skip_test_group.add_argument(
"--skip-test-tvos",
dest='test_tvos',
action=arguments.action.optional_false,
help="skip testing all tvOS targets. Equivalent to specifying both "
"--skip-test-tvos-simulator and --skip-test-tvos-host")
skip_test_group.add_argument(
"--skip-test-tvos-simulator",
dest='test_tvos_simulator',
action=arguments.action.optional_false,
help="skip testing tvOS simulator targets")
skip_test_group.add_argument(
"--skip-test-tvos-host",
dest='test_tvos_device',
action=arguments.action.optional_false,
help="skip testing tvOS device targets on the host machine (the TV "
"itself)")
skip_test_group.add_argument(
"--skip-test-watchos",
dest='test_watchos',
action=arguments.action.optional_false,
help="skip testing all tvOS targets. Equivalent to specifying both "
"--skip-test-watchos-simulator and --skip-test-watchos-host")
skip_test_group.add_argument(
"--skip-test-watchos-simulator",
dest='test_watchos_simulator',
action=arguments.action.optional_false,
help="skip testing watchOS simulator targets")
skip_test_group.add_argument(
"--skip-test-watchos-host",
dest='test_watchos_device',
action=arguments.action.optional_false,
help="skip testing watchOS device targets on the host machine (the "
"watch itself)")
skip_test_group.add_argument(
"--skip-test-android-host",
dest='test_android_device',
action=arguments.action.optional_false,
help="skip testing Android device targets on the host machine (the "
"phone itself)")
parser.add_argument(
"-i", "--ios",
help="also build for iOS, but disallow tests that require an iOS "
"device",
action="store_true")
parser.add_argument(
"-I", "--ios-all",
help="also build for iOS, and allow all iOS tests",
action="store_true",
dest="ios_all")
parser.add_argument(
"--skip-ios",
help="set to skip everything iOS-related",
dest="ios",
action="store_false")
parser.add_argument(
"--tvos",
help="also build for tvOS, but disallow tests that require a tvos "
"device",
action=arguments.action.optional_bool)
parser.add_argument(
"--tvos-all",
help="also build for tvOS, and allow all tvOS tests",
action=arguments.action.optional_bool,
dest="tvos_all")
parser.add_argument(
"--skip-tvos",
help="set to skip everything tvOS-related",
dest="tvos",
action="store_false")
parser.add_argument(
"--watchos",
help="also build for watchOS, but disallow tests that require an "
"watchOS device",
action=arguments.action.optional_bool)
parser.add_argument(
"--watchos-all",
help="also build for Apple watchOS, and allow all Apple watchOS tests",
action=arguments.action.optional_bool,
dest="watchos_all")
parser.add_argument(
"--skip-watchos",
help="set to skip everything watchOS-related",
dest="watchos",
action="store_false")
parser.add_argument(
"--android",
help="also build for Android",
action=arguments.action.optional_bool)
parser.add_argument(
"--swift-analyze-code-coverage",
help="enable code coverage analysis in Swift (false, not-merged, "
"merged).",
choices=["false", "not-merged", "merged"],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
dest="swift_analyze_code_coverage")
parser.add_argument(
"--build-subdir",
help="name of the directory under $SWIFT_BUILD_ROOT where the build "
"products will be placed",
metavar="PATH")
parser.add_argument(
"--install-prefix",
help="The installation prefix. This is where built Swift products "
"(like bin, lib, and include) will be installed.",
metavar="PATH",
default=targets.install_prefix())
parser.add_argument(
"--install-symroot",
help="the path to install debug symbols into",
metavar="PATH")
parser.add_argument(
"-j", "--jobs",
help="the number of parallel build jobs to use",
type=int,
dest="build_jobs",
default=multiprocessing.cpu_count())
parser.add_argument(
"--darwin-xcrun-toolchain",
help="the name of the toolchain to use on Darwin",
default=defaults.DARWIN_XCRUN_TOOLCHAIN)
parser.add_argument(
"--cmake",
help="the path to a CMake executable that will be used to build "
"Swift",
type=arguments.type.executable,
metavar="PATH")
parser.add_argument(
"--show-sdks",
help="print installed Xcode and SDK versions",
action=arguments.action.optional_bool)
parser.add_argument(
"--extra-swift-args",
help="Pass through extra flags to swift in the form of a cmake list "
"'module_regexp;flag'. Can be called multiple times to add "
"multiple such module_regexp flag pairs. All semicolons in flags "
"must be escaped with a '\\'",
action="append", dest="extra_swift_args", default=[])
llvm_group = parser.add_argument_group(
title="Build settings specific for LLVM")
llvm_group.add_argument(
'--llvm-targets-to-build',
help='LLVM target generators to build',
default="X86;ARM;AArch64;PowerPC;SystemZ;Mips")
android_group = parser.add_argument_group(
title="Build settings for Android")
android_group.add_argument(
"--android-ndk",
help="An absolute path to the NDK that will be used as a libc "
"implementation for Android builds",
metavar="PATH")
android_group.add_argument(
"--android-api-level",
help="The Android API level to target when building for Android. "
"Currently only 21 or above is supported",
default="21")
android_group.add_argument(
"--android-ndk-gcc-version",
help="The GCC version to use when building for Android. Currently "
"only 4.9 is supported. %(default)s is also the default value. "
"This option may be used when experimenting with versions "
"of the Android NDK not officially supported by Swift",
choices=["4.8", "4.9"],
default="4.9")
android_group.add_argument(
"--android-icu-uc",
help="Path to a directory containing libicuuc.so",
metavar="PATH")
android_group.add_argument(
"--android-icu-uc-include",
help="Path to a directory containing headers for libicuuc",
metavar="PATH")
android_group.add_argument(
"--android-icu-i18n",
help="Path to a directory containing libicui18n.so",
metavar="PATH")
android_group.add_argument(
"--android-icu-i18n-include",
help="Path to a directory containing headers libicui18n",
metavar="PATH")
android_group.add_argument(
"--android-deploy-device-path",
help="Path on an Android device to which built Swift stdlib products "
"will be deployed. If running host tests, specify the '{}' "
"directory.".format(android.adb.commands.DEVICE_TEMP_DIR),
default=android.adb.commands.DEVICE_TEMP_DIR,
metavar="PATH")
parser.add_argument(
"--host-cc",
help="the absolute path to CC, the 'clang' compiler for the host "
"platform. Default is auto detected.",
type=arguments.type.executable,
metavar="PATH")
parser.add_argument(
"--host-cxx",
help="the absolute path to CXX, the 'clang++' compiler for the host "
"platform. Default is auto detected.",
type=arguments.type.executable,
metavar="PATH")
parser.add_argument(
"--host-lipo",
help="the absolute path to lipo. Default is auto detected.",
type=arguments.type.executable,
metavar="PATH")
parser.add_argument(
"--host-libtool",
help="the absolute path to libtool. Default is auto detected.",
type=arguments.type.executable,
metavar="PATH")
parser.add_argument(
"--distcc",
help="use distcc in pump mode",
action=arguments.action.optional_bool)
parser.add_argument(
"--enable-asan",
help="enable Address Sanitizer",
action=arguments.action.optional_bool)
parser.add_argument(
"--enable-ubsan",
help="enable Undefined Behavior Sanitizer",
action=arguments.action.optional_bool)
parser.add_argument(
"--enable-tsan",
help="enable Thread Sanitizer for swift tools",
action=arguments.action.optional_bool)
parser.add_argument(
"--enable-tsan-runtime",
help="enable Thread Sanitizer on the swift runtime")
parser.add_argument(
"--enable-lsan",
help="enable Leak Sanitizer for swift tools",
action=arguments.action.optional_bool)
parser.add_argument(
"--compiler-vendor",
choices=["none", "apple"],
default=defaults.COMPILER_VENDOR,
help="Compiler vendor name")
parser.add_argument(
"--clang-compiler-version",
help="string that indicates a compiler version for Clang",
type=arguments.type.clang_compiler_version,
metavar="MAJOR.MINOR.PATCH")
parser.add_argument(
"--clang-user-visible-version",
help="User-visible version of the embedded Clang and LLVM compilers",
type=arguments.type.clang_compiler_version,
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar="MAJOR.MINOR.PATCH")
parser.add_argument(
"--swift-compiler-version",
help="string that indicates a compiler version for Swift",
type=arguments.type.swift_compiler_version,
metavar="MAJOR.MINOR")
parser.add_argument(
"--swift-user-visible-version",
help="User-visible version of the embedded Swift compiler",
type=arguments.type.swift_compiler_version,
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar="MAJOR.MINOR")
parser.add_argument(
"--darwin-deployment-version-osx",
help="minimum deployment target version for OS X",
metavar="MAJOR.MINOR",
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX)
parser.add_argument(
"--darwin-deployment-version-ios",
help="minimum deployment target version for iOS",
metavar="MAJOR.MINOR",
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS)
parser.add_argument(
"--darwin-deployment-version-tvos",
help="minimum deployment target version for tvOS",
metavar="MAJOR.MINOR",
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS)
parser.add_argument(
"--darwin-deployment-version-watchos",
help="minimum deployment target version for watchOS",
metavar="MAJOR.MINOR",
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS)
parser.add_argument(
"--extra-cmake-options",
help="Pass through extra options to CMake in the form of comma "
"separated options '-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp'. Can be "
"called multiple times to add multiple such options.",
action=arguments.action.concat,
type=arguments.type.shell_split,
default=[])
parser.add_argument(
"--build-args",
help="arguments to the build tool. This would be prepended to the "
"default argument that is '-j8' when CMake generator is "
"\"Ninja\".",
type=arguments.type.shell_split,
default=[])
parser.add_argument(
"--verbose-build",
help="print the commands executed during the build",
action=arguments.action.optional_bool)
parser.add_argument(
"--lto",
help="use lto optimization on llvm/swift tools. This does not "
"imply using lto on the swift standard library or runtime. "
"Options: thin, full. If no optional arg is provided, full is "
"chosen by default",
metavar="LTO_TYPE",
nargs='?',
choices=['thin', 'full'],
default=None,
const='full',
dest='lto_type')
parser.add_argument(
"--clang-profile-instr-use",
help="profile file to use for clang PGO",
metavar="PATH")
default_max_lto_link_job_counts = host.max_lto_link_job_counts()
parser.add_argument(
"--llvm-max-parallel-lto-link-jobs",
help="the maximum number of parallel link jobs to use when compiling "
"llvm",
metavar="COUNT",
default=default_max_lto_link_job_counts['llvm'])
parser.add_argument(
"--swift-tools-max-parallel-lto-link-jobs",
help="the maximum number of parallel link jobs to use when compiling "
"swift tools.",
metavar="COUNT",
default=default_max_lto_link_job_counts['swift'])
parser.add_argument("--enable-sil-ownership",
help="Enable the SIL ownership model",
action='store_true')
parser.add_argument("--force-optimized-typechecker",
help="Force the type checker to be built with "
"optimization",
action='store_true')
parser.add_argument(
# Explicitly unavailable options here.
"--build-jobs",
"--common-cmake-options",
"--only-execute",
"--skip-test-optimize-for-size",
"--skip-test-optimized",
action=arguments.action.unavailable)
parser.add_argument(
"--lit-args",
help="lit args to use when testing",
metavar="LITARGS",
default="-sv")
parser.add_argument(
"--coverage-db",
help="coverage database to use when prioritizing testing",
metavar="PATH")
return parser
# ----------------------------------------------------------------------------
USAGE = """
%(prog)s [-h | --help] [OPTION ...]
%(prog)s --preset=NAME [SUBSTITUTION ...]
"""
DESCRIPTION = """
Use this tool to build, test, and prepare binary distribution archives of Swift
and related tools.
Builds Swift (and, optionally, LLDB), incrementally, optionally
testing it thereafter. Different build configurations are maintained in
parallel.
"""
EPILOG = """
Using option presets:
--preset-file=PATH load presets from the specified file
--preset=NAME use the specified option preset
The preset mode is mutually exclusive with other options. It is not
possible to add ad-hoc customizations to a preset. This is a deliberate
design decision. (Rationale: a preset is a certain important set of
options that we want to keep in a centralized location. If you need to
customize it, you should create another preset in a centralized location,
rather than scattering the knowledge about the build across the system.)
Presets support substitutions for controlled customizations. Substitutions
are defined in the preset file. Values for substitutions are supplied
using the name=value syntax on the command line.
Any arguments not listed are forwarded directly to Swift's
'build-script-impl'. See that script's help for details.
Environment variables
---------------------
This script respects a few environment variables, should you
choose to set them:
SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift.
If this script is located in a Swift
source directory, the location of SWIFT_SOURCE_ROOT will be
inferred if the variable is not set.
'build-script' expects the sources to be laid out in the following way:
$SWIFT_SOURCE_ROOT/llvm
/clang
/swift
/lldb (optional)
/llbuild (optional)
/swiftpm (optional, requires llbuild)
/compiler-rt (optional)
/swift-corelibs-xctest (optional)
/swift-corelibs-foundation (optional)
/swift-corelibs-libdispatch (optional)
/icu (optional)
SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds.
Defaults to "$SWIFT_SOURCE_ROOT/build/".
Preparing to run this script
----------------------------
See README.md for instructions on cloning Swift subprojects.
If you intend to use the -l, -L, --lldb, or --debug-lldb options.
That's it; you're ready to go!
Examples
--------
Given the above layout of sources, the simplest invocation of 'build-script' is
just:
[~/src/s]$ ./swift/utils/build-script
This builds LLVM, Clang, Swift and Swift standard library in debug mode.
All builds are incremental. To incrementally build changed files, repeat the
same 'build-script' command.
Typical uses of 'build-script'
------------------------------
To build everything with optimization without debug information:
[~/src/s]$ ./swift/utils/build-script -R
To run tests, add '-t':
[~/src/s]$ ./swift/utils/build-script -R -t
To run normal tests and validation tests, add '-T':
[~/src/s]$ ./swift/utils/build-script -R -T
To build LLVM+Clang with optimization without debug information, and a
debuggable Swift compiler:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift
To build a debuggable Swift standard library:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib
iOS build targets are always configured and present, but are not built by
default. To build the standard library for OS X, iOS simulator and iOS device:
[~/src/s]$ ./swift/utils/build-script -R -i
To run OS X and iOS tests that don't require a device:
[~/src/s]$ ./swift/utils/build-script -R -i -t
To use 'make' instead of 'ninja', use '-m':
[~/src/s]$ ./swift/utils/build-script -m -R
To create Xcode projects that can build Swift, use '-x':
[~/src/s]$ ./swift/utils/build-script -x -R
Preset mode in build-script
---------------------------
All buildbots and automated environments use 'build-script' in *preset mode*.
In preset mode, the command line only specifies the preset name and allows
limited customization (extra output paths). The actual options come from
the selected preset in 'utils/build-presets.ini'. For example, to build like
the incremental buildbot, run:
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental
To build with AddressSanitizer:
[~/src/s]$ ./swift/utils/build-script --preset=asan
To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz':
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\
install_destdir="/tmp/install"
install_symroot="/tmp/symroot"
installable_package="/tmp/xcode-xyz-root.tar.gz"
If you have your own favorite set of options, you can create your own, local,
preset. For example, let's create a preset called 'ds' (which stands for
Debug Swift):
$ cat > ~/.swift-build-presets
[preset: ds]
release
debug-swift
debug-swift-stdlib
test
build-subdir=ds
To use it, specify the '--preset=' argument:
[~/src/s]$ ./swift/utils/build-script --preset=ds
./swift/utils/build-script: using preset 'ds', which expands to
./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \
--test
--build-subdir=ds --
...
Existing presets can be found in `utils/build-presets.ini`
Philosophy
----------
While you can invoke CMake directly to build Swift, this tool will save you
time by taking away the mechanical parts of the process, providing you controls
for the important options.
For all automated build environments, this tool is regarded as *the* *only* way
to build Swift. This is not a technical limitation of the Swift build system.
It is a policy decision aimed at making the builds uniform across all
environments and easily reproducible by engineers who are not familiar with the
details of the setups of other systems or automated environments.
"""
| uasys/swift | utils/build_swift/driver_arguments.py | Python | apache-2.0 | 50,012 |
#!/usr/bin/env python3
""" A usage example for lecs. """
from lecs import ECS, Entity, Component, System
__author__ = 'Mads H. Jakobsen (ragerin)'
__email__ = 'ragerin@gmail.com'
__version__ = '2.0.0'
__status__ = 'beta'
__license__ = 'MIT'
# Create a simple component
class MooComponent(Component):
"""Data container for cow sounds."""
def __init__(self):
super().__init__()
self.message = 'moo.'
# Create another simple component, from the MooComponent
class BullComponent(MooComponent):
"""A bull makes a louder MOO."""
def __init__(self):
super().__init__()
self.message = 'MOO!'
# Now create a system to handle the components
class CattleSystem(System):
def __init__(self, ecs, component_class):
super().__init__(ecs, component_class)
def execute(self):
for component in self.get_component_list():
print(component.message)
# Instantiate a base ECS container
ecs = ECS()
# Add a new empty entity
cow = ecs.add_entity()
# Add a MooComponent to the entity
cow.add_component(MooComponent())
# Let's add one more entity
bull = ecs.add_entity()
# This one is a bull, so we add the BullComponent
bull.add_component(BullComponent())
# We add the CattleSystem. We also add the class name of the component it looks for.
s = CattleSystem(ecs, ['MooComponent','BullComponent'])
# We call the System.execute() method.
s.execute()
| ragerin/lecs | examples/cattle.py | Python | mit | 1,425 |
# Tutorial 17: Neuron Models
# Nengo supports multiple different types of neurons. The default is the
# "Leaky Integrate-and-Fire" or LIF neuron. Other supported ones are shown
# here. The LIFRate neuron acts like the LIF neuron, but does not have spikes.
# The Sigmoid is a standard model used in machine learning. The
# RectifiedLinear model simply outputs the absolute value of its input (the
# simplest possible neuron-like operation). The most complex neuron model
# here is the Izhikevich neuron, which has four parameters to adjust and
# has been shown to map very closely to a large number of real biological
# neurons.
import nengo
model = nengo.Network()
with model:
stim = nengo.Node(0)
a = nengo.Ensemble(n_neurons=50, dimensions=1,
neuron_type=nengo.LIF(tau_rc=0.02, tau_ref=0.002))
b = nengo.Ensemble(n_neurons=50, dimensions=1,
neuron_type=nengo.LIFRate(tau_rc=0.02, tau_ref=0.002))
#c = nengo.Ensemble(n_neurons=50, dimensions=1,
# neuron_type=nengo.Sigmoid(tau_ref=0.002))
d = nengo.Ensemble(n_neurons=50, dimensions=1,
neuron_type=nengo.RectifiedLinear())
e = nengo.Ensemble(n_neurons=50, dimensions=1,
neuron_type=nengo.Izhikevich(
tau_recovery=0.02,
coupling=0.2,
reset_voltage=-65,
reset_recovery=8))
nengo.Connection(stim, a)
nengo.Connection(stim, b)
#nengo.Connection(stim, c)
nengo.Connection(stim, d)
nengo.Connection(stim, e) | tcstewar/nengo_assignments | groningen_2018/sig_test.py | Python | gpl-3.0 | 1,691 |
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def add_tags_to_resource(ResourceArn=None, TagList=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Adds or overwrites one or more tags for the specified AWS CloudHSM resource.
Each tag consists of a key and a value. Tag keys must be unique to each resource.
See also: AWS API Documentation
Exceptions
:example: response = client.add_tags_to_resource(
ResourceArn='string',
TagList=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the AWS CloudHSM resource to tag.\n
:type TagList: list
:param TagList: [REQUIRED]\nOne or more tags.\n\n(dict) --A key-value pair that identifies or specifies metadata about an AWS CloudHSM resource.\n\nKey (string) -- [REQUIRED]The key of the tag.\n\nValue (string) -- [REQUIRED]The value of the tag.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Status': 'string'
}
Response Structure
(dict) --
Status (string) --
The status of the operation.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'Status': 'string'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def create_hapg(Label=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Creates a high-availability partition group. A high-availability partition group is a group of partitions that spans multiple physical HSMs.
See also: AWS API Documentation
Exceptions
:example: response = client.create_hapg(
Label='string'
)
:type Label: string
:param Label: [REQUIRED]\nThe label of the new high-availability partition group.\n
:rtype: dict
ReturnsResponse Syntax{
'HapgArn': 'string'
}
Response Structure
(dict) --Contains the output of the CreateHAPartitionGroup action.
HapgArn (string) --The ARN of the high-availability partition group.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'HapgArn': 'string'
}
"""
pass
def create_hsm(SubnetId=None, SshKey=None, EniIp=None, IamRoleArn=None, ExternalId=None, SubscriptionType=None, ClientToken=None, SyslogIp=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Creates an uninitialized HSM instance.
There is an upfront fee charged for each HSM instance that you create with the CreateHsm operation. If you accidentally provision an HSM and want to request a refund, delete the instance using the DeleteHsm operation, go to the AWS Support Center , create a new case, and select Account and Billing Support .
See also: AWS API Documentation
Exceptions
:example: response = client.create_hsm(
SubnetId='string',
SshKey='string',
EniIp='string',
IamRoleArn='string',
ExternalId='string',
SubscriptionType='PRODUCTION',
ClientToken='string',
SyslogIp='string'
)
:type SubnetId: string
:param SubnetId: [REQUIRED]\nThe identifier of the subnet in your VPC in which to place the HSM.\n
:type SshKey: string
:param SshKey: [REQUIRED]\nThe SSH public key to install on the HSM.\n
:type EniIp: string
:param EniIp: The IP address to assign to the HSM\'s ENI.\nIf an IP address is not specified, an IP address will be randomly chosen from the CIDR range of the subnet.\n
:type IamRoleArn: string
:param IamRoleArn: [REQUIRED]\nThe ARN of an IAM role to enable the AWS CloudHSM service to allocate an ENI on your behalf.\n
:type ExternalId: string
:param ExternalId: The external ID from IamRoleArn , if present.
:type SubscriptionType: string
:param SubscriptionType: [REQUIRED]\nSpecifies the type of subscription for the HSM.\n\nPRODUCTION - The HSM is being used in a production environment.\nTRIAL - The HSM is being used in a product trial.\n\n
:type ClientToken: string
:param ClientToken: A user-defined token to ensure idempotence. Subsequent calls to this operation with the same token will be ignored.
:type SyslogIp: string
:param SyslogIp: The IP address for the syslog monitoring server. The AWS CloudHSM service only supports one syslog monitoring server.
:rtype: dict
ReturnsResponse Syntax
{
'HsmArn': 'string'
}
Response Structure
(dict) --
Contains the output of the CreateHsm operation.
HsmArn (string) --
The ARN of the HSM.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'HsmArn': 'string'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
def create_luna_client(Label=None, Certificate=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Creates an HSM client.
See also: AWS API Documentation
Exceptions
:example: response = client.create_luna_client(
Label='string',
Certificate='string'
)
:type Label: string
:param Label: The label for the client.
:type Certificate: string
:param Certificate: [REQUIRED]\nThe contents of a Base64-Encoded X.509 v3 certificate to be installed on the HSMs used by this client.\n
:rtype: dict
ReturnsResponse Syntax
{
'ClientArn': 'string'
}
Response Structure
(dict) --
Contains the output of the CreateLunaClient action.
ClientArn (string) --
The ARN of the client.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'ClientArn': 'string'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
def delete_hapg(HapgArn=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Deletes a high-availability partition group.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_hapg(
HapgArn='string'
)
:type HapgArn: string
:param HapgArn: [REQUIRED]\nThe ARN of the high-availability partition group to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'Status': 'string'
}
Response Structure
(dict) --Contains the output of the DeleteHapg action.
Status (string) --The status of the action.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'Status': 'string'
}
"""
pass
def delete_hsm(HsmArn=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Deletes an HSM. After completion, this operation cannot be undone and your key material cannot be recovered.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_hsm(
HsmArn='string'
)
:type HsmArn: string
:param HsmArn: [REQUIRED]\nThe ARN of the HSM to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'Status': 'string'
}
Response Structure
(dict) --Contains the output of the DeleteHsm operation.
Status (string) --The status of the operation.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'Status': 'string'
}
"""
pass
def delete_luna_client(ClientArn=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Deletes a client.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_luna_client(
ClientArn='string'
)
:type ClientArn: string
:param ClientArn: [REQUIRED]\nThe ARN of the client to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'Status': 'string'
}
Response Structure
(dict) --
Status (string) --The status of the action.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'Status': 'string'
}
"""
pass
def describe_hapg(HapgArn=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Retrieves information about a high-availability partition group.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_hapg(
HapgArn='string'
)
:type HapgArn: string
:param HapgArn: [REQUIRED]\nThe ARN of the high-availability partition group to describe.\n
:rtype: dict
ReturnsResponse Syntax{
'HapgArn': 'string',
'HapgSerial': 'string',
'HsmsLastActionFailed': [
'string',
],
'HsmsPendingDeletion': [
'string',
],
'HsmsPendingRegistration': [
'string',
],
'Label': 'string',
'LastModifiedTimestamp': 'string',
'PartitionSerialList': [
'string',
],
'State': 'READY'|'UPDATING'|'DEGRADED'
}
Response Structure
(dict) --Contains the output of the DescribeHapg action.
HapgArn (string) --The ARN of the high-availability partition group.
HapgSerial (string) --The serial number of the high-availability partition group.
HsmsLastActionFailed (list) --
(string) --An ARN that identifies an HSM.
HsmsPendingDeletion (list) --
(string) --An ARN that identifies an HSM.
HsmsPendingRegistration (list) --
(string) --An ARN that identifies an HSM.
Label (string) --The label for the high-availability partition group.
LastModifiedTimestamp (string) --The date and time the high-availability partition group was last modified.
PartitionSerialList (list) --The list of partition serial numbers that belong to the high-availability partition group.
(string) --
State (string) --The state of the high-availability partition group.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'HapgArn': 'string',
'HapgSerial': 'string',
'HsmsLastActionFailed': [
'string',
],
'HsmsPendingDeletion': [
'string',
],
'HsmsPendingRegistration': [
'string',
],
'Label': 'string',
'LastModifiedTimestamp': 'string',
'PartitionSerialList': [
'string',
],
'State': 'READY'|'UPDATING'|'DEGRADED'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
def describe_hsm(HsmArn=None, HsmSerialNumber=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Retrieves information about an HSM. You can identify the HSM by its ARN or its serial number.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_hsm(
HsmArn='string',
HsmSerialNumber='string'
)
:type HsmArn: string
:param HsmArn: The ARN of the HSM. Either the HsmArn or the SerialNumber parameter must be specified.
:type HsmSerialNumber: string
:param HsmSerialNumber: The serial number of the HSM. Either the HsmArn or the HsmSerialNumber parameter must be specified.
:rtype: dict
ReturnsResponse Syntax
{
'HsmArn': 'string',
'Status': 'PENDING'|'RUNNING'|'UPDATING'|'SUSPENDED'|'TERMINATING'|'TERMINATED'|'DEGRADED',
'StatusDetails': 'string',
'AvailabilityZone': 'string',
'EniId': 'string',
'EniIp': 'string',
'SubscriptionType': 'PRODUCTION',
'SubscriptionStartDate': 'string',
'SubscriptionEndDate': 'string',
'VpcId': 'string',
'SubnetId': 'string',
'IamRoleArn': 'string',
'SerialNumber': 'string',
'VendorName': 'string',
'HsmType': 'string',
'SoftwareVersion': 'string',
'SshPublicKey': 'string',
'SshKeyLastUpdated': 'string',
'ServerCertUri': 'string',
'ServerCertLastUpdated': 'string',
'Partitions': [
'string',
]
}
Response Structure
(dict) --
Contains the output of the DescribeHsm operation.
HsmArn (string) --
The ARN of the HSM.
Status (string) --
The status of the HSM.
StatusDetails (string) --
Contains additional information about the status of the HSM.
AvailabilityZone (string) --
The Availability Zone that the HSM is in.
EniId (string) --
The identifier of the elastic network interface (ENI) attached to the HSM.
EniIp (string) --
The IP address assigned to the HSM\'s ENI.
SubscriptionType (string) --
Specifies the type of subscription for the HSM.
PRODUCTION - The HSM is being used in a production environment.
TRIAL - The HSM is being used in a product trial.
SubscriptionStartDate (string) --
The subscription start date.
SubscriptionEndDate (string) --
The subscription end date.
VpcId (string) --
The identifier of the VPC that the HSM is in.
SubnetId (string) --
The identifier of the subnet that the HSM is in.
IamRoleArn (string) --
The ARN of the IAM role assigned to the HSM.
SerialNumber (string) --
The serial number of the HSM.
VendorName (string) --
The name of the HSM vendor.
HsmType (string) --
The HSM model type.
SoftwareVersion (string) --
The HSM software version.
SshPublicKey (string) --
The public SSH key.
SshKeyLastUpdated (string) --
The date and time that the SSH key was last updated.
ServerCertUri (string) --
The URI of the certificate server.
ServerCertLastUpdated (string) --
The date and time that the server certificate was last updated.
Partitions (list) --
The list of partitions on the HSM.
(string) --
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'HsmArn': 'string',
'Status': 'PENDING'|'RUNNING'|'UPDATING'|'SUSPENDED'|'TERMINATING'|'TERMINATED'|'DEGRADED',
'StatusDetails': 'string',
'AvailabilityZone': 'string',
'EniId': 'string',
'EniIp': 'string',
'SubscriptionType': 'PRODUCTION',
'SubscriptionStartDate': 'string',
'SubscriptionEndDate': 'string',
'VpcId': 'string',
'SubnetId': 'string',
'IamRoleArn': 'string',
'SerialNumber': 'string',
'VendorName': 'string',
'HsmType': 'string',
'SoftwareVersion': 'string',
'SshPublicKey': 'string',
'SshKeyLastUpdated': 'string',
'ServerCertUri': 'string',
'ServerCertLastUpdated': 'string',
'Partitions': [
'string',
]
}
:returns:
PRODUCTION - The HSM is being used in a production environment.
TRIAL - The HSM is being used in a product trial.
"""
pass
def describe_luna_client(ClientArn=None, CertificateFingerprint=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Retrieves information about an HSM client.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_luna_client(
ClientArn='string',
CertificateFingerprint='string'
)
:type ClientArn: string
:param ClientArn: The ARN of the client.
:type CertificateFingerprint: string
:param CertificateFingerprint: The certificate fingerprint.
:rtype: dict
ReturnsResponse Syntax
{
'ClientArn': 'string',
'Certificate': 'string',
'CertificateFingerprint': 'string',
'LastModifiedTimestamp': 'string',
'Label': 'string'
}
Response Structure
(dict) --
ClientArn (string) --
The ARN of the client.
Certificate (string) --
The certificate installed on the HSMs used by this client.
CertificateFingerprint (string) --
The certificate fingerprint.
LastModifiedTimestamp (string) --
The date and time the client was last modified.
Label (string) --
The label of the client.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'ClientArn': 'string',
'Certificate': 'string',
'CertificateFingerprint': 'string',
'LastModifiedTimestamp': 'string',
'Label': 'string'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_config(ClientArn=None, ClientVersion=None, HapgList=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Gets the configuration files necessary to connect to all high availability partition groups the client is associated with.
See also: AWS API Documentation
Exceptions
:example: response = client.get_config(
ClientArn='string',
ClientVersion='5.1'|'5.3',
HapgList=[
'string',
]
)
:type ClientArn: string
:param ClientArn: [REQUIRED]\nThe ARN of the client.\n
:type ClientVersion: string
:param ClientVersion: [REQUIRED]\nThe client version.\n
:type HapgList: list
:param HapgList: [REQUIRED]\nA list of ARNs that identify the high-availability partition groups that are associated with the client.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ConfigType': 'string',
'ConfigFile': 'string',
'ConfigCred': 'string'
}
Response Structure
(dict) --
ConfigType (string) --
The type of credentials.
ConfigFile (string) --
The chrystoki.conf configuration file.
ConfigCred (string) --
The certificate file containing the server.pem files of the HSMs.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'ConfigType': 'string',
'ConfigFile': 'string',
'ConfigCred': 'string'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def list_available_zones():
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Lists the Availability Zones that have available AWS CloudHSM capacity.
See also: AWS API Documentation
Exceptions
:example: response = client.list_available_zones()
:rtype: dict
ReturnsResponse Syntax{
'AZList': [
'string',
]
}
Response Structure
(dict) --
AZList (list) --The list of Availability Zones that have available AWS CloudHSM capacity.
(string) --
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'AZList': [
'string',
]
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
def list_hapgs(NextToken=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Lists the high-availability partition groups for the account.
This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListHapgs to retrieve the next set of items.
See also: AWS API Documentation
Exceptions
:example: response = client.list_hapgs(
NextToken='string'
)
:type NextToken: string
:param NextToken: The NextToken value from a previous call to ListHapgs . Pass null if this is the first call.
:rtype: dict
ReturnsResponse Syntax{
'HapgList': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
HapgList (list) --The list of high-availability partition groups.
(string) --
NextToken (string) --If not null, more results are available. Pass this value to ListHapgs to retrieve the next set of items.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'HapgList': [
'string',
],
'NextToken': 'string'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
def list_hsms(NextToken=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Retrieves the identifiers of all of the HSMs provisioned for the current customer.
This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListHsms to retrieve the next set of items.
See also: AWS API Documentation
Exceptions
:example: response = client.list_hsms(
NextToken='string'
)
:type NextToken: string
:param NextToken: The NextToken value from a previous call to ListHsms . Pass null if this is the first call.
:rtype: dict
ReturnsResponse Syntax{
'HsmList': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --Contains the output of the ListHsms operation.
HsmList (list) --The list of ARNs that identify the HSMs.
(string) --An ARN that identifies an HSM.
NextToken (string) --If not null, more results are available. Pass this value to ListHsms to retrieve the next set of items.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'HsmList': [
'string',
],
'NextToken': 'string'
}
"""
pass
def list_luna_clients(NextToken=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Lists all of the clients.
This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListLunaClients to retrieve the next set of items.
See also: AWS API Documentation
Exceptions
:example: response = client.list_luna_clients(
NextToken='string'
)
:type NextToken: string
:param NextToken: The NextToken value from a previous call to ListLunaClients . Pass null if this is the first call.
:rtype: dict
ReturnsResponse Syntax{
'ClientList': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
ClientList (list) --The list of clients.
(string) --
NextToken (string) --If not null, more results are available. Pass this to ListLunaClients to retrieve the next set of items.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'ClientList': [
'string',
],
'NextToken': 'string'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
def list_tags_for_resource(ResourceArn=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Returns a list of all tags for the specified AWS CloudHSM resource.
See also: AWS API Documentation
Exceptions
:example: response = client.list_tags_for_resource(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the AWS CloudHSM resource.\n
:rtype: dict
ReturnsResponse Syntax{
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
Response Structure
(dict) --
TagList (list) --One or more tags.
(dict) --A key-value pair that identifies or specifies metadata about an AWS CloudHSM resource.
Key (string) --The key of the tag.
Value (string) --The value of the tag.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
"""
pass
def modify_hapg(HapgArn=None, Label=None, PartitionSerialList=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Modifies an existing high-availability partition group.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_hapg(
HapgArn='string',
Label='string',
PartitionSerialList=[
'string',
]
)
:type HapgArn: string
:param HapgArn: [REQUIRED]\nThe ARN of the high-availability partition group to modify.\n
:type Label: string
:param Label: The new label for the high-availability partition group.
:type PartitionSerialList: list
:param PartitionSerialList: The list of partition serial numbers to make members of the high-availability partition group.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'HapgArn': 'string'
}
Response Structure
(dict) --
HapgArn (string) --
The ARN of the high-availability partition group.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'HapgArn': 'string'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
def modify_hsm(HsmArn=None, SubnetId=None, EniIp=None, IamRoleArn=None, ExternalId=None, SyslogIp=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Modifies an HSM.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_hsm(
HsmArn='string',
SubnetId='string',
EniIp='string',
IamRoleArn='string',
ExternalId='string',
SyslogIp='string'
)
:type HsmArn: string
:param HsmArn: [REQUIRED]\nThe ARN of the HSM to modify.\n
:type SubnetId: string
:param SubnetId: The new identifier of the subnet that the HSM is in. The new subnet must be in the same Availability Zone as the current subnet.
:type EniIp: string
:param EniIp: The new IP address for the elastic network interface (ENI) attached to the HSM.\nIf the HSM is moved to a different subnet, and an IP address is not specified, an IP address will be randomly chosen from the CIDR range of the new subnet.\n
:type IamRoleArn: string
:param IamRoleArn: The new IAM role ARN.
:type ExternalId: string
:param ExternalId: The new external ID.
:type SyslogIp: string
:param SyslogIp: The new IP address for the syslog monitoring server. The AWS CloudHSM service only supports one syslog monitoring server.
:rtype: dict
ReturnsResponse Syntax
{
'HsmArn': 'string'
}
Response Structure
(dict) --
Contains the output of the ModifyHsm operation.
HsmArn (string) --
The ARN of the HSM.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'HsmArn': 'string'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
def modify_luna_client(ClientArn=None, Certificate=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Modifies the certificate used by the client.
This action can potentially start a workflow to install the new certificate on the client\'s HSMs.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_luna_client(
ClientArn='string',
Certificate='string'
)
:type ClientArn: string
:param ClientArn: [REQUIRED]\nThe ARN of the client.\n
:type Certificate: string
:param Certificate: [REQUIRED]\nThe new certificate for the client.\n
:rtype: dict
ReturnsResponse Syntax
{
'ClientArn': 'string'
}
Response Structure
(dict) --
ClientArn (string) --
The ARN of the client.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
:return: {
'ClientArn': 'string'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
"""
pass
def remove_tags_from_resource(ResourceArn=None, TagKeyList=None):
"""
This is documentation for AWS CloudHSM Classic . For more information, see AWS CloudHSM Classic FAQs , the AWS CloudHSM Classic User Guide , and the AWS CloudHSM Classic API Reference .
Removes one or more tags from the specified AWS CloudHSM resource.
To remove a tag, specify only the tag key to remove (not the value). To overwrite the value for an existing tag, use AddTagsToResource .
See also: AWS API Documentation
Exceptions
:example: response = client.remove_tags_from_resource(
ResourceArn='string',
TagKeyList=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the AWS CloudHSM resource.\n
:type TagKeyList: list
:param TagKeyList: [REQUIRED]\nThe tag key or keys to remove.\nSpecify only the tag key to remove (not the value). To overwrite the value for an existing tag, use AddTagsToResource .\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Status': 'string'
}
Response Structure
(dict) --
Status (string) --
The status of the operation.
Exceptions
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
:return: {
'Status': 'string'
}
:returns:
CloudHSM.Client.exceptions.CloudHsmServiceException
CloudHSM.Client.exceptions.CloudHsmInternalException
CloudHSM.Client.exceptions.InvalidRequestException
"""
pass
| wavycloud/pyboto3 | pyboto3/cloudhsm.py | Python | mit | 37,780 |
"""Minorization-maximization inference algorithms."""
import itertools
import numpy as np
from .convergence import NormOfDifferenceTest
from .utils import log_transform, exp_transform
def _mm(n_items, data, initial_params, alpha, max_iter, tol, mm_fun):
"""
Iteratively refine MM estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after `max_iter` iterations.
"""
if initial_params is None:
params = np.zeros(n_items)
else:
params = initial_params
converged = NormOfDifferenceTest(tol=tol, order=1)
for _ in range(max_iter):
nums, denoms = mm_fun(n_items, data, params)
params = log_transform((nums + alpha) / (denoms + alpha))
if converged(params):
return params
raise RuntimeError("Did not converge after {} iterations".format(max_iter))
def _mm_pairwise(n_items, data, params):
"""Inner loop of MM algorithm for pairwise data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for winner, loser in data:
wins[winner] += 1.0
val = 1.0 / (weights[winner] + weights[loser])
denoms[winner] += val
denoms[loser] += val
return wins, denoms
def mm_pairwise(
n_items, data, initial_params=None, alpha=0.0,
max_iter=10000, tol=1e-8):
"""Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given pairwise-comparison data (see :ref:`data-pairwise`), using
the minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
return _mm(
n_items, data, initial_params, alpha, max_iter, tol, _mm_pairwise)
def _mm_rankings(n_items, data, params):
"""Inner loop of MM algorithm for ranking data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for ranking in data:
sum_ = weights.take(ranking).sum()
for i, winner in enumerate(ranking[:-1]):
wins[winner] += 1
val = 1.0 / sum_
for item in ranking[i:]:
denoms[item] += val
sum_ -= weights[winner]
return wins, denoms
def mm_rankings(n_items, data, initial_params=None, alpha=0.0,
max_iter=10000, tol=1e-8):
"""Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
return _mm(n_items, data, initial_params, alpha, max_iter, tol,
_mm_rankings)
def _mm_top1(n_items, data, params):
"""Inner loop of MM algorithm for top1 data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for winner, losers in data:
wins[winner] += 1
val = 1 / (weights.take(losers).sum() + weights[winner])
for item in itertools.chain([winner], losers):
denoms[item] += val
return wins, denoms
def mm_top1(
n_items, data, initial_params=None, alpha=0.0,
max_iter=10000, tol=1e-8):
"""Compute the ML estimate of model parameters using the MM algorithm.
This function computes the maximum-likelihood (ML) estimate of model
parameters given top-1 data (see :ref:`data-top1`), using the
minorization-maximization (MM) algorithm [Hun04]_, [CD12]_.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for
details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Top-1 data.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
return _mm(n_items, data, initial_params, alpha, max_iter, tol, _mm_top1)
def _choicerank(n_items, data, params):
"""Inner loop of ChoiceRank algorithm."""
weights = exp_transform(params)
adj, adj_t, traffic_in, traffic_out = data
# First phase of message passing.
zs = adj.dot(weights)
# Second phase of message passing.
with np.errstate(invalid="ignore"):
denoms = adj_t.dot(traffic_out / zs)
return traffic_in, denoms
def choicerank(
digraph, traffic_in, traffic_out, weight=None,
initial_params=None, alpha=1.0, max_iter=10000, tol=1e-8):
"""Compute the MAP estimate of a network choice model's parameters.
This function computes the maximum-a-posteriori (MAP) estimate of model
parameters given a network structure and node-level traffic data (see
:ref:`data-network`), using the ChoiceRank algorithm [MG17]_, [KTVV15]_.
The nodes are assumed to be labeled using consecutive integers starting
from 0.
Parameters
----------
digraph : networkx.DiGraph
Directed graph representing the network.
traffic_in : array_like
Number of arrivals at each node.
traffic_out : array_like
Number of departures at each node.
weight : str, optional
The edge attribute that holds the numerical value used for the edge
weight. If None (default) then all edge weights are 1.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The MAP estimate of model parameters.
Raises
------
ImportError
If the NetworkX library cannot be imported.
"""
import networkx as nx
# Compute the (sparse) adjacency matrix.
n_items = len(digraph)
nodes = np.arange(n_items)
adj = nx.to_scipy_sparse_matrix(digraph, nodelist=nodes, weight=weight)
adj_t = adj.T.tocsr()
# Process the data into a standard form.
traffic_in = np.asarray(traffic_in)
traffic_out = np.asarray(traffic_out)
data = (adj, adj_t, traffic_in, traffic_out)
return _mm(
n_items, data, initial_params, alpha, max_iter, tol, _choicerank)
| lucasmaystre/choix | choix/mm.py | Python | mit | 8,450 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class KeyDesktopMoveCasesPage(page_module.Page):
def __init__(self, url, page_set, name='', credentials=None):
super(KeyDesktopMoveCasesPage, self).__init__(
url=url, page_set=page_set, name=name,
credentials_path='data/credentials.json',
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/key_desktop_move_cases.json'
self.credentials = credentials
class GmailMouseScrollPage(KeyDesktopMoveCasesPage):
""" Why: productivity, top google properties """
def __init__(self, page_set):
super(GmailMouseScrollPage, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set)
self.scrollable_element_function = '''
function(callback) {
gmonkey.load('2.0', function(api) {
callback(api.getScrollableElement());
});
}'''
self.credentials = 'google'
def RunNavigateSteps(self, action_runner):
super(GmailMouseScrollPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined &&'
'document.getElementById("gb") !== null')
# This check is needed for gmonkey to load completely.
action_runner.WaitForJavaScriptCondition(
'document.readyState == "complete"')
def RunPageInteractions(self, action_runner):
action_runner.ExecuteJavaScript('''
gmonkey.load('2.0', function(api) {
window.__scrollableElementForTelemetry = api.getScrollableElement();
});''')
action_runner.WaitForJavaScriptCondition(
'window.__scrollableElementForTelemetry != null')
scrollbar_x, start_y, end_y = self._CalculateScrollBarRatios(action_runner)
with action_runner.CreateGestureInteraction('DragAction'):
action_runner.DragPage(left_start_ratio=scrollbar_x,
top_start_ratio=start_y, left_end_ratio=scrollbar_x,
top_end_ratio=end_y, speed_in_pixels_per_second=100,
element_function='window.__scrollableElementForTelemetry')
def _CalculateScrollBarRatios(self, action_runner):
viewport_height = float(action_runner.EvaluateJavaScript(
'window.__scrollableElementForTelemetry.clientHeight'))
content_height = float(action_runner.EvaluateJavaScript(
'window.__scrollableElementForTelemetry.scrollHeight'))
viewport_width = float(action_runner.EvaluateJavaScript(
'window.__scrollableElementForTelemetry.offsetWidth'))
scrollbar_width = float(action_runner.EvaluateJavaScript('''
window.__scrollableElementForTelemetry.offsetWidth -
window.__scrollableElementForTelemetry.scrollWidth'''))
# This calculation is correct only when the element doesn't have border or
# padding or scroll buttons (eg: gmail mail element).
# Calculating the mid point of start of scrollbar.
scrollbar_height_ratio = viewport_height / content_height
scrollbar_start_mid_y = scrollbar_height_ratio / 2
scrollbar_width_ratio = scrollbar_width / viewport_width
scrollbar_mid_x_right_offset = scrollbar_width_ratio / 2
scrollbar_mid_x = 1 - scrollbar_mid_x_right_offset
# The End point of scrollbar (x remains same).
scrollbar_end_mid_y = 1 - scrollbar_start_mid_y
return scrollbar_mid_x, scrollbar_start_mid_y, scrollbar_end_mid_y
class GoogleMapsPage(KeyDesktopMoveCasesPage):
""" Why: productivity, top google properties; Supports drag gestures """
def __init__(self, page_set):
super(GoogleMapsPage, self).__init__(
url='https://www.google.co.uk/maps/@51.5043968,-0.1526806',
page_set=page_set,
name='Maps')
def RunNavigateSteps(self, action_runner):
super(GoogleMapsPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(selector='.widget-scene-canvas')
action_runner.WaitForElement(selector='.widget-zoom-in')
action_runner.WaitForElement(selector='.widget-zoom-out')
def RunPageInteractions(self, action_runner):
for _ in range(3):
action_runner.Wait(2)
with action_runner.CreateGestureInteraction(
'DragAction', repeatable=True):
action_runner.DragPage(left_start_ratio=0.5, top_start_ratio=0.75,
left_end_ratio=0.75, top_end_ratio=0.5)
# TODO(ssid): Add zoom gestures after fixing bug crbug.com/462214.
class KeyDesktopMoveCasesPageSet(story.StorySet):
""" Special cases for move gesture """
def __init__(self):
super(KeyDesktopMoveCasesPageSet, self).__init__(
archive_data_file='data/key_desktop_move_cases.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
self.AddUserStory(GmailMouseScrollPage(self))
self.AddUserStory(GoogleMapsPage(self))
| SaschaMester/delicium | tools/perf/page_sets/key_desktop_move_cases.py | Python | bsd-3-clause | 5,009 |
# Python side of the support for xmethods.
# Copyright (C) 2013-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for defining xmethods"""
import gdb
import re
import sys
if sys.version_info[0] > 2:
# Python 3 removed basestring and long
basestring = str
long = int
class XMethod(object):
"""Base class (or a template) for an xmethod description.
Currently, the description requires only the 'name' and 'enabled'
attributes. Description objects are managed by 'XMethodMatcher'
objects (see below). Note that this is only a template for the
interface of the XMethodMatcher.methods objects. One could use
this class or choose to use an object which supports this exact same
interface. Also, an XMethodMatcher can choose not use it 'methods'
attribute. In such cases this class (or an equivalent) is not used.
Attributes:
name: The name of the xmethod.
enabled: A boolean indicating if the xmethod is enabled.
"""
def __init__(self, name):
self.name = name
self.enabled = True
class XMethodMatcher(object):
"""Abstract base class for matching an xmethod.
When looking for xmethods, GDB invokes the `match' method of a
registered xmethod matcher to match the object type and method name.
The `match' method in concrete classes derived from this class should
return an `XMethodWorker' object, or a list of `XMethodWorker'
objects if there is a match (see below for 'XMethodWorker' class).
Attributes:
name: The name of the matcher.
enabled: A boolean indicating if the matcher is enabled.
methods: A sequence of objects of type 'XMethod', or objects
which have at least the attributes of an 'XMethod' object.
This list is used by the 'enable'/'disable'/'info' commands to
enable/disable/list the xmethods registered with GDB. See
the 'match' method below to know how this sequence is used.
This attribute is None if the matcher chooses not have any
xmethods managed by it.
"""
def __init__(self, name):
"""
Args:
name: An identifying name for the xmethod or the group of
xmethods returned by the `match' method.
"""
self.name = name
self.enabled = True
self.methods = None
def match(self, class_type, method_name):
"""Match class type and method name.
In derived classes, it should return an XMethodWorker object, or a
sequence of 'XMethodWorker' objects. Only those xmethod workers
whose corresponding 'XMethod' descriptor object is enabled should be
returned.
Args:
class_type: The class type (gdb.Type object) to match.
method_name: The name (string) of the method to match.
"""
raise NotImplementedError("XMethodMatcher match")
class XMethodWorker(object):
"""Base class for all xmethod workers defined in Python.
An xmethod worker is an object which matches the method arguments, and
invokes the method when GDB wants it to. Internally, GDB first invokes the
'get_arg_types' method to perform overload resolution. If GDB selects to
invoke this Python xmethod, then it invokes it via the overridden
'__call__' method.
Derived classes should override the 'get_arg_types' and '__call__' methods.
"""
def get_arg_types(self):
"""Return arguments types of an xmethod.
A sequence of gdb.Type objects corresponding to the arguments of the
xmethod are returned. If the xmethod takes no arguments, then 'None'
or an empty sequence is returned. If the xmethod takes only a single
argument, then a gdb.Type object or a sequence with a single gdb.Type
element is returned.
"""
raise NotImplementedError("XMethodWorker get_arg_types")
def __call__(self, *args):
"""Invoke the xmethod.
Args:
args: Arguments to the method. Each element of the tuple is a
gdb.Value object. The first element is the 'this' pointer
value.
Returns:
A gdb.Value corresponding to the value returned by the xmethod.
Returns 'None' if the method does not return anything.
"""
raise NotImplementedError("XMethodWorker __call__")
class SimpleXMethodMatcher(XMethodMatcher):
"""A utility class to implement simple xmethod mathers and workers.
See the __init__ method below for information on how instances of this
class can be used.
For simple classes and methods, one can choose to use this class. For
complex xmethods, which need to replace/implement template methods on
possibly template classes, one should implement their own xmethod
matchers and workers. See py-xmethods.py in testsuite/gdb.python
directory of the GDB source tree for examples.
"""
class SimpleXMethodWorker(XMethodWorker):
def __init__(self, method_function, arg_types):
self._arg_types = arg_types
self._method_function = method_function
def get_arg_types(self):
return self._arg_types
def __call__(self, *args):
return self._method_function(*args)
def __init__(self, name, class_matcher, method_matcher, method_function,
*arg_types):
"""
Args:
name: Name of the xmethod matcher.
class_matcher: A regular expression used to match the name of the
class whose method this xmethod is implementing/replacing.
method_matcher: A regular expression used to match the name of the
method this xmethod is implementing/replacing.
method_function: A Python callable which would be called via the
'invoke' method of the worker returned by the objects of this
class. This callable should accept the object (*this) as the
first argument followed by the rest of the arguments to the
method. All arguments to this function should be gdb.Value
objects.
arg_types: The gdb.Type objects corresponding to the arguments that
this xmethod takes. It can be None, or an empty sequence,
or a single gdb.Type object, or a sequence of gdb.Type objects.
"""
XMethodMatcher.__init__(self, name)
assert callable(method_function), (
"The 'method_function' argument to 'SimpleXMethodMatcher' "
"__init__ method should be a callable.")
self._method_function = method_function
self._class_matcher = class_matcher
self._method_matcher = method_matcher
self._arg_types = arg_types
def match(self, class_type, method_name):
cm = re.match(self._class_matcher, str(class_type.unqualified().tag))
mm = re.match(self._method_matcher, method_name)
if cm and mm:
return SimpleXMethodMatcher.SimpleXMethodWorker(
self._method_function, self._arg_types)
# A helper function for register_xmethod_matcher which returns an error
# object if MATCHER is not having the requisite attributes in the proper
# format.
def _validate_xmethod_matcher(matcher):
if not hasattr(matcher, "match"):
return TypeError("Xmethod matcher is missing method: match")
if not hasattr(matcher, "name"):
return TypeError("Xmethod matcher is missing attribute: name")
if not hasattr(matcher, "enabled"):
return TypeError("Xmethod matcher is missing attribute: enabled")
if not isinstance(matcher.name, basestring):
return TypeError("Attribute 'name' of xmethod matcher is not a "
"string")
if matcher.name.find(";") >= 0:
return ValueError("Xmethod matcher name cannot contain ';' in it")
# A helper function for register_xmethod_matcher which looks up an
# xmethod matcher with NAME in LOCUS. Returns the index of the xmethod
# matcher in 'xmethods' sequence attribute of the LOCUS. If NAME is not
# found in LOCUS, then -1 is returned.
def _lookup_xmethod_matcher(locus, name):
for i in range(0, len(locus.xmethods)):
if locus.xmethods[i].name == name:
return i
return -1
def register_xmethod_matcher(locus, matcher, replace=False):
"""Registers a xmethod matcher MATCHER with a LOCUS.
Arguments:
locus: The locus in which the xmethods should be registered.
It can be 'None' to indicate that the xmethods should be
registered globally. Or, it could be a gdb.Objfile or a
gdb.Progspace object in which the xmethods should be
registered.
matcher: The xmethod matcher to register with the LOCUS. It
should be an instance of 'XMethodMatcher' class.
replace: If True, replace any existing xmethod matcher with the
same name in the locus. Otherwise, if a matcher with the same name
exists in the locus, raise an exception.
"""
err = _validate_xmethod_matcher(matcher)
if err:
raise err
if not locus:
locus = gdb
if locus == gdb:
locus_name = "global"
else:
locus_name = locus.filename
index = _lookup_xmethod_matcher(locus, matcher.name)
if index >= 0:
if replace:
del locus.xmethods[index]
else:
raise RuntimeError("Xmethod matcher already registered with "
"%s: %s" % (locus_name, matcher.name))
if gdb.parameter("verbose"):
gdb.write("Registering xmethod matcher '%s' with %s' ...\n")
locus.xmethods.insert(0, matcher)
| ExploreEmbedded/Tit-Windows | tools/share/gdb/python/gdb/xmethod.py | Python | bsd-3-clause | 10,448 |
from viper.lexer import (
NEWLINE as NL, INDENT as IND, DEDENT as DED, PERIOD, COMMA, COLON, OPEN_PAREN as OP, CLOSE_PAREN as CP, R_ARROW,
EQUALS as EQ, ENDMARKER as EM,
Int,
Name as N, ReservedName as RN,
Class as C,
Operator as Op,
)
lexemes = [
RN('interface'), C('Shape'), COLON,
NL, IND,
RN('def'), N('get_area'), OP, CP, R_ARROW, C('Float'), NL,
DED,
C('Shape'), C('Circle'), COLON,
NL, IND,
RN('def'), N('init'), OP, N('radius'), COLON, C('Int'), CP, COLON,
NL, IND,
N('self'), PERIOD, N('radius'), COLON, C('Int'), EQ, N('radius'), NL,
DED,
RN('def'), N('get_area'), OP, CP, R_ARROW, C('Float'), COLON,
NL, IND,
RN('return'), N('pi'), Op('*'), OP, N('self'), PERIOD, N('radius'), Op('^'), Int('2'), CP, NL,
DED,
DED,
C('Shape'), C('Quadrilateral'), COLON,
NL, IND,
RN('def'), N('init'), OP, N('length'), COLON, C('Int'), COMMA, N('width'), COLON, C('Int'), CP, COLON,
NL, IND,
N('self'), PERIOD, N('length'), COLON, C('Int'), EQ, N('length'), NL,
N('self'), PERIOD, N('width'), COLON, C('Int'), EQ, N('width'), NL,
DED,
RN('def'), N('get_area'), OP, CP, R_ARROW, C('Float'), COLON,
NL, IND,
RN('return'), N('self'), PERIOD, N('length'), Op('*'), N('self'), PERIOD, N('width'), NL,
DED,
DED,
C('Quadrilateral'), C('Rectangle'), COLON,
NL, IND,
RN('pass'), NL,
DED,
C('Quadrilateral'), C('Square'), COLON,
NL, IND,
RN('def'), N('init'), OP, N('side'), COLON, C('Int'), CP, COLON,
NL, IND,
N('self'), PERIOD, N('length'), COLON, C('Int'), EQ, N('side'), NL,
N('self'), PERIOD, N('width'), COLON, C('Int'), EQ, N('side'), NL,
DED,
DED,
EM,
]
| pdarragh/Viper | tests/lexeme_files/interfaces.py | Python | apache-2.0 | 1,857 |
# -*- coding: utf-8 -*-
"""
# Copyright
Copyright (C) 2012 by Victor
victor@caern.de
# License
This file is part of SoulCreator.
SoulCreator is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
SoulCreator is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
SoulCreator. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Allgemeine Berechnungen.
"""
from PyQt4.QtCore import QDate
import src.Config as Config
#from src import Error
#from src.Widgets.Components.CharaTrait import CharaTrait
import src.Debug as Debug
def years(date_1, date_2):
"""
Berechnet die Anzahl der Jahre zwischen den beiden Daten.
Funktioniert nur mit QDate.
"""
days_between_dates = date_1.daysTo( date_2 )
years = days_between_dates // 365
#Debug.debug(date_1, date_2, time_between_dates)
return years
def calc_size( age, is_giant=False, is_small=False ):
"""
Berechnet den abstrakten Größenwert.
"""
result = Config.SIZE_DEFAULT["Adult"]
if age < Config.AGE_ADULT:
result = Config.SIZE_DEFAULT["Kid"]
if is_giant:
result += 1
elif is_small:
result -= 1
return result
def calc_initiative( *args ):
"""
Berechnet die Initiative.
Monster (Finesse und Resistance), addieren keinen Basiswert hinzu. Humanoide (normale Attribute) schon.
\todo Bislang nur von Dexterity, Composure und Fast Reflexes abhängig. Möglicherweise vorhandene übernatürliche Eigenschaften werden nicht berücksichtigt.
"""
result = sum( args )
return result
def calc_speed( *args, monster=False ):
"""
Berechnet die abstrakte Geschwindigkeit.
\todo Bislang nur von Dexterity, Composure und Fast Reflexes abhängig. Möglicherweise vorhandene übernatürliche Eigenschaften werden nicht berücksichtigt.
"""
result = sum( args )
if not monster:
result += Config.SPEED_BASE_VALUE_HUMAN
return result
def calc_defense( *args, age=None, size=None, maximize=False):
"""
Berechnet die Defense.
Einige Kreaturen (Tiere, Monster etc.) Nutzen die größte Eigenschaft als Defense, nicht die kleinste.
"""
result = min( args )
if maximize:
result = max( args )
## Bei kindern gibt auch die Größe (bzw. deren Abwesenheit) einen Bonus auf Defense.
if age and size and age < Config.AGE_ADULT:
modificator = Config.SIZE_DEFAULT["Adult"] - size
modificator = max(modificator, 0)
result = result + modificator
return result
def calc_health(stamina, size):
"""
Berechnet die Gesundheit.
"""
return stamina + size
def calc_willpower(resolve, composure):
"""
Berechnet die Willenskraft.
"""
return resolve + composure
def calc_rank_spirit(power, finesse, resistance):
"""
Berechnet den Rang eines Geistes aus dessen Attributen.
"""
result = power + finesse + resistance
rank = 1
if result > 25:
rank = 5
elif result > 19:
rank = 4
elif result > 13:
rank = 3
elif result > 7:
rank = 2
return rank
| GoliathLeviathan/SoulCreator | src/Calc/Calc.py | Python | gpl-3.0 | 3,452 |
import pytest
class TestWget:
@pytest.mark.complete("wget ")
def test_1(self, completion):
assert not completion
@pytest.mark.complete("wget --s", require_cmd=True)
def test_2(self, completion):
assert completion
| scop/bash-completion | test/t/test_wget.py | Python | gpl-2.0 | 248 |
# -*- coding: utf-8 -*-
"""
ProgramEnrollments Application Configuration
"""
from django.apps import AppConfig
from openedx.core.djangoapps.plugins.constants import PluginURLs, ProjectType
class ProgramEnrollmentsConfig(AppConfig):
"""
Application configuration for ProgramEnrollment
"""
name = 'lms.djangoapps.program_enrollments'
plugin_app = {
PluginURLs.CONFIG: {
ProjectType.LMS: {
PluginURLs.NAMESPACE: 'programs_api',
PluginURLs.REGEX: 'api/program_enrollments/',
PluginURLs.RELATIVE_PATH: 'rest_api.urls',
}
},
}
def ready(self):
"""
Connect handlers to signals.
"""
from . import signals # pylint: disable=unused-import
from . import tasks # pylint: disable=unused-import
| appsembler/edx-platform | lms/djangoapps/program_enrollments/apps.py | Python | agpl-3.0 | 848 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2007 Carabos Coop. V. All rights reserved
# Copyright (C) 2008-2019 Vicent Mas. All rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Vicent Mas - vmas@vitables.org
"""
This module defines a widget that wraps the view widget of leaves.
When a leaf node is opened in the tree of databases view the data stored in
that leaf will be displayed in the workspace using this wrapper widget.
"""
from qtpy import QtCore
from qtpy import QtWidgets
from . import leaf_view, leaf_model, df_model
from .. import utils as vtutils
from ..nodeprops import nodeinfo
from ..vtwidgets import zoom_cell
__docformat__ = 'restructuredtext'
class DataSheet(QtWidgets.QMdiSubWindow):
"""
The widget containing the displayed data of a given dataset.
:Parameter index: the index (in the tree of databases model) of the leaf
whose data will be displayed
"""
def __init__(self, index):
"""Display a given dataset in the MDI area.
"""
# The main application window
self.vtgui = vtutils.getVTApp().gui
# The data structure (LeafNode/LinkNode instance) whose dataset
# is being displayed
dbt_model = self.vtgui.dbs_tree_model
self.dbt_leaf = dbt_model.nodeFromIndex(index)
# The tables.Node instance tied to that data structure
pt_node = self.dbt_leaf.node
if hasattr(pt_node, 'target'):
# The selected item is a link and must be dereferenced
leaf = pt_node()
else:
leaf = pt_node
self.leaf_model = df_model.try_opening_as_dataframe(leaf)
if not self.leaf_model:
self.leaf_model = leaf_model.LeafModel(leaf)
self.leaf_view = leaf_view.LeafView(self.leaf_model)
super(DataSheet, self).__init__(self.vtgui.workspace,
QtCore.Qt.SubWindow)
self.setWidget(self.leaf_view)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# Customize the title bar
if not isinstance(leaf.title, str):
title = str(leaf.title)
else:
title = leaf.title
wtitle = "{0}\t{1}".format(self.dbt_leaf.name, title)
self.setWindowTitle(wtitle)
self.setWindowIcon(self.dbt_leaf.icon)
# Eventually update the Node menu actions
self.dbt_leaf.has_view = True
self.vtgui.updateActions()
self.pindex = QtCore.QPersistentModelIndex(index)
# Connect signals to slots
self.aboutToActivate.connect(self.syncTreeView)
self.leaf_view.doubleClicked.connect(self.zoomCell)
def closeEvent(self, event):
"""Close the window cleanly with the close button of the title bar.
:Parameter event: the event being processed
"""
# Ensure that Node menu actions are properly updated
self.dbt_leaf.has_view = False
self.vtgui.updateActions()
# Propagate the event. In the process, self.widget().closeEvent
# will be called
QtWidgets.QMdiSubWindow.closeEvent(self, event)
if not self.vtgui.workspace.subWindowList():
self.vtgui.dbs_tree_view.setFocus(True)
def focusInEvent(self, event):
"""Specialised handler for focus events.
Synchronize with the tree view when the view gets keyboard focus.
:Parameter event: the event being processed
"""
# Sync the workspace with the tree view (if needed) but keep the
# focus (giving focus to the tree view when a given view is
# clicked is counter intuitive)
QtWidgets.QMdiSubWindow.focusInEvent(self, event)
self.syncTreeView()
self.setFocus(True)
def syncTreeView(self):
"""
If the view is activated select its leaf in the tree of databases view.
"""
if self.vtgui.editing_dlg is not None:
self.vtgui.editing_dlg = None
return
# Locate the tree view leaf tied to this data sheet. Persistent
# indices are used to get direct access to the leaf so we don't
# have to walk the tree
self.vtgui.dbs_tree_view.setCurrentIndex(
QtCore.QModelIndex(self.pindex))
def zoomCell(self, index):
"""Display the inner dimensions of a cell.
:Parameter index: the index (in the leaf model) of the cell being zoomed
"""
row = index.row()
column = index.column()
tmodel = index.model()
data = tmodel.cell(row, column)
if data is None:
return
# The title of the zoomed view
node = self.dbt_leaf
info = nodeinfo.NodeInfo(node)
if node.node_kind == 'table':
col = info.columns_names[column]
title = '{0}: {1}[{2}]'.format(node.name, col,
tmodel.start + row)
else:
title = '{0}: ({1},{2})'.format(node.name,
tmodel.start + row, column)
zoom_cell.ZoomCell(data, title, self.vtgui.workspace,
self.dbt_leaf)
| uvemas/ViTables | vitables/vttables/datasheet.py | Python | gpl-3.0 | 5,873 |
#!/usr/bin/env python3
import rospy
from interactivespaces_msgs.msg import GenericMessage
P_SANDIEGO = '1ZPwYtCwgW6bu7gi7n3B4Q'
P_TEST = 'RJd2HuqmShMAAAQfCa3ulg'
P_PHOTOSPHERE = 'F:-gVtvWrACv2k/Vnh0Vg8Z8YI/AAAAAAABLWA/a-AT4Wb8MD8'
PANOID = P_SANDIEGO
DIRECTOR_MESSAGE = """
{
"description": "bogus",
"duration": 0,
"name": "test whatever",
"resource_uri": "bogus",
"slug": "test message",
"windows": [
{
"activity": "streetview",
"assets": [
{
"panoid": "%s"
}
],
"height": 1080,
"presentation_viewport": "center",
"width": 1920,
"x_coord": 0,
"y_coord": 0
},
{
"activity": "streetview",
"assets": [
{
"panoid": "%s"
}
],
"height": 1080,
"presentation_viewport": "right_one",
"width": 1920,
"x_coord": 0,
"y_coord": 0
},
{
"activity": "streetview",
"assets": [
{
"panoid": "%s"
}
],
"height": 1080,
"presentation_viewport": "right_two",
"width": 1920,
"x_coord": 0,
"y_coord": 0
},
{
"activity": "streetview",
"assets": [
{
"panoid": "%s"
}
],
"height": 1080,
"presentation_viewport": "right_three",
"width": 1920,
"x_coord": 0,
"y_coord": 0
},
{
"activity": "streetview",
"assets": [
{
"panoid": "%s"
}
],
"height": 1080,
"presentation_viewport": "left_one",
"width": 1920,
"x_coord": 0,
"y_coord": 0
},
{
"activity": "streetview",
"assets": [
{
"panoid": "%s"
}
],
"height": 1080,
"presentation_viewport": "left_two",
"width": 1920,
"x_coord": 0,
"y_coord": 0
},
{
"activity": "streetview",
"assets": [
{
"panoid": "%s"
}
],
"height": 1080,
"presentation_viewport": "left_three",
"width": 1920,
"x_coord": 0,
"y_coord": 0
}
]
}
""" % (PANOID, PANOID, PANOID, PANOID, PANOID, PANOID, PANOID)
scene_msg = GenericMessage()
scene_msg.type = 'json'
scene_msg.message = DIRECTOR_MESSAGE
rospy.init_node('sv_messager')
scene_pub = rospy.Publisher('/director/scene', GenericMessage, queue_size=100)
rospy.sleep(2)
scene_pub.publish(scene_msg)
rospy.sleep(2)
| EndPointCorp/lg_ros_nodes | lg_sv/test/test_scene.py | Python | apache-2.0 | 2,444 |
# coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright 1997 - July 2008 CWI, August 2008 - 2016 MonetDB B.V.
import os
import unittest
import pymonetdb
MAPIPORT = int(os.environ.get('MAPIPORT', 50000))
TSTDB = os.environ.get('TSTDB', 'demo')
TSTHOSTNAME = os.environ.get('TSTHOSTNAME', 'localhost')
TSTUSERNAME = os.environ.get('TSTUSERNAME', 'monetdb')
TSTPASSWORD = os.environ.get('TSTPASSWORD', 'monetdb')
class TestUnicode(unittest.TestCase):
def setUp(self):
self.con = pymonetdb.connect(database=TSTDB, port=MAPIPORT,
hostname=TSTHOSTNAME,
username=TSTUSERNAME,
password=TSTPASSWORD)
cursor = self.con.cursor()
cursor.execute('create table bla (s VARCHAR(1000))')
def tearDown(self):
cursor = self.con.cursor()
cursor.execute('drop table bla')
def test_unicode(self):
cursor = self.con.cursor()
x = u"drôle de m’a réveillé. « S’il plaît… dessine-moi un»"
cursor.execute(u'insert into bla VALUES (%s)', (x,))
cursor.execute(u'select * from bla')
self.assertEqual(x, cursor.fetchone()[0])
if __name__ == '__main__':
unittest.main()
| pholanda/pymonetdb | test/test_unicode.py | Python | mpl-2.0 | 1,442 |
PERMISSIONS_BY_HAND = {
"SEND_SMS" : { "android.telephony.SmsManager" : [
[ "F", "getDefault()", "static SmsManager" ],
[ "F", "sendDataMessage(java.lang.String, java.lang.String, short, byte[], PendingIntent, PendingIntent)", "void" ],
# [ "F", "sendMultipartTextMessage(String destinationAddress, String scAddress, ArrayList<String> parts, ArrayList<PendingIntent> sentIntents, ArrayList<PendingIntent> deliveryIntents", "void" ],
[ "F", "sendTextMessage(java.lang.String, java.lang.String, java.lang.String, PendingIntent, PendingIntent)", "void" ],
],
"android.telephony.gsm.SmsManager" : [
[ "F", "getDefault()", "static android.telephony.gsm.SmsManager" ],
[ "F", "sendDataMessage(java.lang.String, java.lang.String, short, byte[], PendingIntent, PendingIntent)", "void" ],
# [ "F", "sendMultipartTextMessage(String destinationAddress, String scAddress, ArrayList<String> parts, ArrayList<PendingIntent> sentIntents, ArrayList<PendingIntent> deliveryIntents", "void" ],
[ "F", "sendTextMessage(java.lang.String, java.lang.String, java.lang.String, PendingIntent, PendingIntent)", "void" ],
],
},
"SET_WALLPAPER" : { "android.app.WallpaperManager" : [
[ "F", "setBitmap(Bitmap)", "void" ],
],
},
"READ_CONTACTS" : { "android.provider.ContactsContract$CommonDataKinds$Phone" : [
[ "C", "CONTENT_URI", "Uri" ]
],
},
}
| Ramble01/androguard | tools/api/permissions_by_hand.py | Python | apache-2.0 | 1,718 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goals', '0142_auto_20160428_1858'),
]
operations = [
migrations.AlterModelOptions(
name='useraction',
options={'verbose_name': 'User Action', 'ordering': ['user', 'next_trigger_date', 'action'], 'verbose_name_plural': 'User Actions'},
),
]
| izzyalonso/tndata_backend | tndata_backend/goals/migrations/0143_auto_20160509_1721.py | Python | mit | 472 |
# Generated by Django 1.11.2 on 2017-08-08 02:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("letters", "0005_letter_note")]
operations = [
migrations.AlterField(
model_name="letter",
name="eml",
field=models.FileField(
blank=True,
null=True,
upload_to=b"messages/%Y/%m/%d",
verbose_name="File",
),
)
]
| watchdogpolska/feder | feder/letters/migrations/0006_auto_20170808_0252.py | Python | mit | 501 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
"""
:Author: Ronan Delacroix
:Copyright: (c) 2018 Ronan Delacroix
"""
import mongoengine
import jobmanager.common
class DockerImage(jobmanager.common.NamedDocument):
meta = {
'queryset_class': jobmanager.common.SerializableQuerySet,
'indexes': [
'uuid',
'created',
'name'
]
}
name = mongoengine.StringField(required=True)
image_id = mongoengine.StringField()
url = mongoengine.ListField(mongoengine.StringField())
tags = mongoengine.ListField(field=mongoengine.StringField())
jobs = mongoengine.ListField(field=mongoengine.StringField())
tasks = mongoengine.ListField(field=mongoengine.StringField())
requirements = mongoengine.ListField(field=mongoengine.StringField())
apt_packages = mongoengine.ListField(field=mongoengine.StringField())
dockerfile = mongoengine.StringField()
| ronhanson/python-jobmanager-common | jobmanager/common/docker.py | Python | mit | 964 |
from . import queues
from . import stacks
from . import lists
from . import arrays
| OctavianLee/Bauhinia | bauhinia/__init__.py | Python | mit | 83 |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## Modified by Maxence Tury <maxence.tury@ssi.gouv.fr>
## This program is published under a GPLv2 license
"""
ASN.1 (Abstract Syntax Notation One)
"""
import random
from datetime import datetime
from scapy.config import conf
from scapy.error import Scapy_Exception, warning
from scapy.volatile import RandField
from scapy.utils import Enum_metaclass, EnumElement, binrepr
class RandASN1Object(RandField):
def __init__(self, objlist=None):
self.objlist = [
x._asn1_obj
for x in ASN1_Class_UNIVERSAL.__rdict__.itervalues()
if hasattr(x, "_asn1_obj")
] if objlist is None else objlist
self.chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
def _fix(self, n=0):
o = random.choice(self.objlist)
if issubclass(o, ASN1_INTEGER):
return o(int(random.gauss(0,1000)))
elif issubclass(o, ASN1_IPADDRESS):
z = RandIP()._fix()
return o(z)
elif issubclass(o, ASN1_STRING):
z = int(random.expovariate(0.05)+1)
return o("".join(random.choice(self.chars) for _ in xrange(z)))
elif issubclass(o, ASN1_SEQUENCE) and (n < 10):
z = int(random.expovariate(0.08)+1)
return o([self.__class__(objlist=self.objlist)._fix(n + 1)
for _ in xrange(z)])
return ASN1_INTEGER(int(random.gauss(0,1000)))
##############
#### ASN1 ####
##############
class ASN1_Error(Scapy_Exception):
pass
class ASN1_Encoding_Error(ASN1_Error):
pass
class ASN1_Decoding_Error(ASN1_Error):
pass
class ASN1_BadTag_Decoding_Error(ASN1_Decoding_Error):
pass
class ASN1Codec(EnumElement):
def register_stem(cls, stem):
cls._stem = stem
def dec(cls, s, context=None):
return cls._stem.dec(s, context=context)
def safedec(cls, s, context=None):
return cls._stem.safedec(s, context=context)
def get_stem(cls):
return cls.stem
class ASN1_Codecs_metaclass(Enum_metaclass):
element_class = ASN1Codec
class ASN1_Codecs:
__metaclass__ = ASN1_Codecs_metaclass
BER = 1
DER = 2
PER = 3
CER = 4
LWER = 5
BACnet = 6
OER = 7
SER = 8
XER = 9
class ASN1Tag(EnumElement):
def __init__(self, key, value, context=None, codec=None):
EnumElement.__init__(self, key, value)
self._context = context
if codec == None:
codec = {}
self._codec = codec
def clone(self): # /!\ not a real deep copy. self.codec is shared
return self.__class__(self._key, self._value, self._context, self._codec)
def register_asn1_object(self, asn1obj):
self._asn1_obj = asn1obj
def asn1_object(self, val):
if hasattr(self,"_asn1_obj"):
return self._asn1_obj(val)
raise ASN1_Error("%r does not have any assigned ASN1 object" % self)
def register(self, codecnum, codec):
self._codec[codecnum] = codec
def get_codec(self, codec):
try:
c = self._codec[codec]
except KeyError,msg:
raise ASN1_Error("Codec %r not found for tag %r" % (codec, self))
return c
class ASN1_Class_metaclass(Enum_metaclass):
element_class = ASN1Tag
def __new__(cls, name, bases, dct): # XXX factorise a bit with Enum_metaclass.__new__()
for b in bases:
for k,v in b.__dict__.iteritems():
if k not in dct and isinstance(v,ASN1Tag):
dct[k] = v.clone()
rdict = {}
for k,v in dct.iteritems():
if type(v) is int:
v = ASN1Tag(k,v)
dct[k] = v
rdict[v] = v
elif isinstance(v, ASN1Tag):
rdict[v] = v
dct["__rdict__"] = rdict
cls = type.__new__(cls, name, bases, dct)
for v in cls.__dict__.values():
if isinstance(v, ASN1Tag):
v.context = cls # overwrite ASN1Tag contexts, even cloned ones
return cls
class ASN1_Class:
__metaclass__ = ASN1_Class_metaclass
class ASN1_Class_UNIVERSAL(ASN1_Class):
name = "UNIVERSAL"
ERROR = -3
RAW = -2
NONE = -1
ANY = 0
BOOLEAN = 1
INTEGER = 2
BIT_STRING = 3
STRING = 4
NULL = 5
OID = 6
OBJECT_DESCRIPTOR = 7
EXTERNAL = 8
REAL = 9
ENUMERATED = 10
EMBEDDED_PDF = 11
UTF8_STRING = 12
RELATIVE_OID = 13
SEQUENCE = 16|0x20 # constructed encoding
SET = 17|0x20 # constructed encoding
NUMERIC_STRING = 18
PRINTABLE_STRING = 19
T61_STRING = 20 # aka TELETEX_STRING
VIDEOTEX_STRING = 21
IA5_STRING = 22
UTC_TIME = 23
GENERALIZED_TIME = 24
GRAPHIC_STRING = 25
ISO646_STRING = 26 # aka VISIBLE_STRING
GENERAL_STRING = 27
UNIVERSAL_STRING = 28
CHAR_STRING = 29
BMP_STRING = 30
IPADDRESS = 0|0x40 # application-specific encoding
COUNTER32 = 1|0x40 # application-specific encoding
TIME_TICKS = 3|0x40 # application-specific encoding
class ASN1_Object_metaclass(type):
def __new__(cls, name, bases, dct):
c = super(ASN1_Object_metaclass, cls).__new__(cls, name, bases, dct)
try:
c.tag.register_asn1_object(c)
except:
warning("Error registering %r for %r" % (c.tag, c.codec))
return c
class ASN1_Object:
__metaclass__ = ASN1_Object_metaclass
tag = ASN1_Class_UNIVERSAL.ANY
def __init__(self, val):
self.val = val
def enc(self, codec):
return self.tag.get_codec(codec).enc(self.val)
def __repr__(self):
return "<%s[%r]>" % (self.__dict__.get("name", self.__class__.__name__), self.val)
def __str__(self):
return self.enc(conf.ASN1_default_codec)
def strshow(self, lvl=0):
return (" "*lvl)+repr(self)+"\n"
def show(self, lvl=0):
print self.strshow(lvl)
def __eq__(self, other):
return self.val == other
def __cmp__(self, other):
return cmp(self.val, other)
#######################
#### ASN1 objects ####
#######################
# on the whole, we order the classes by ASN1_Class_UNIVERSAL tag value
class ASN1_DECODING_ERROR(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.ERROR
def __init__(self, val, exc=None):
ASN1_Object.__init__(self, val)
self.exc = exc
def __repr__(self):
return "<%s[%r]{{%s}}>" % (self.__dict__.get("name", self.__class__.__name__),
self.val, self.exc.args[0])
def enc(self, codec):
if isinstance(self.val, ASN1_Object):
return self.val.enc(codec)
return self.val
class ASN1_force(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.RAW
def enc(self, codec):
if isinstance(self.val, ASN1_Object):
return self.val.enc(codec)
return self.val
class ASN1_BADTAG(ASN1_force):
pass
class ASN1_INTEGER(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.INTEGER
def __repr__(self):
h = hex(self.val)
if h[-1] == "L":
h = h[:-1]
# cut at 22 because with leading '0x', x509 serials should be < 23
if len(h) > 22:
h = h[:12] + "..." + h[-10:]
r = repr(self.val)
if len(r) > 20:
r = r[:10] + "..." + r[-10:]
return h + " <%s[%s]>" % (self.__dict__.get("name", self.__class__.__name__), r)
class ASN1_BOOLEAN(ASN1_INTEGER):
tag = ASN1_Class_UNIVERSAL.BOOLEAN
# BER: 0 means False, anything else means True
def __repr__(self):
return str((not (self.val==0))) + " " + ASN1_Object.__repr__(self)
class ASN1_BIT_STRING(ASN1_Object):
"""
/!\ ASN1_BIT_STRING values are bit strings like "011101".
/!\ A zero-bit padded readable string is provided nonetheless.
"""
tag = ASN1_Class_UNIVERSAL.BIT_STRING
def __init__(self, val, readable=False):
if readable:
self.val_readable = val
val = "".join(binrepr(ord(x)).zfill(8) for x in val)
self.unused_bits = 0
else:
if len(val) % 8 == 0:
self.unused_bits = 0
else:
self.unused_bits = 8 - len(val)%8
padded_val = val + "0"*self.unused_bits
bytes_arr = zip(*[iter(padded_val)]*8)
self.val_readable = "".join(chr(int("".join(x),2)) for x in bytes_arr)
ASN1_Object.__init__(self, val)
def __repr__(self):
if len(self.val) <= 16:
return "<%s[%r] (%d unused bit%s)>" % (self.__dict__.get("name", self.__class__.__name__), self.val, self.unused_bits, "s" if self.unused_bits>1 else "")
else:
s = self.val_readable
if len(s) > 20:
s = s[:10] + "..." + s[-10:]
return "<%s[%r] (%d unused bit%s)>" % (self.__dict__.get("name", self.__class__.__name__), s, self.unused_bits, "s" if self.unused_bits>1 else "")
class ASN1_STRING(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.STRING
class ASN1_NULL(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.NULL
def __repr__(self):
return ASN1_Object.__repr__(self)
class ASN1_OID(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.OID
def __init__(self, val):
val = conf.mib._oid(val)
ASN1_Object.__init__(self, val)
self.oidname = conf.mib._oidname(val)
def __repr__(self):
return "<%s[%r]>" % (self.__dict__.get("name", self.__class__.__name__), self.oidname)
class ASN1_ENUMERATED(ASN1_INTEGER):
tag = ASN1_Class_UNIVERSAL.ENUMERATED
class ASN1_UTF8_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.UTF8_STRING
class ASN1_NUMERIC_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.NUMERIC_STRING
class ASN1_PRINTABLE_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING
class ASN1_T61_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.T61_STRING
class ASN1_VIDEOTEX_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.VIDEOTEX_STRING
class ASN1_IA5_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.IA5_STRING
class ASN1_UTC_TIME(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.UTC_TIME
def __init__(self, val):
pretty_time = ""
if len(val) == 13 and val[-1] == "Z":
dt = datetime.strptime(val[:-1], "%y%m%d%H%M%S")
pretty_time = dt.strftime("%b %d %H:%M:%S %Y GMT")
self.pretty_time = pretty_time
ASN1_STRING.__init__(self, val)
def __repr__(self):
return self.pretty_time + " " + ASN1_STRING.__repr__(self)
class ASN1_GENERALIZED_TIME(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME
def __init__(self, val):
pretty_time = ""
if len(val) == 15 and val[-1] == "Z":
dt = datetime.strptime(val[:-1], "%Y%m%d%H%M%S")
pretty_time = dt.strftime("%b %d %H:%M:%S %Y GMT")
self.pretty_time = pretty_time
ASN1_STRING.__init__(self, val)
def __repr__(self):
return self.pretty_time + " " + ASN1_STRING.__repr__(self)
class ASN1_ISO646_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.ISO646_STRING
class ASN1_UNIVERSAL_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.UNIVERSAL_STRING
class ASN1_BMP_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.BMP_STRING
class ASN1_SEQUENCE(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.SEQUENCE
def strshow(self, lvl=0):
s = (" "*lvl)+("# %s:" % self.__class__.__name__)+"\n"
for o in self.val:
s += o.strshow(lvl=lvl+1)
return s
class ASN1_SET(ASN1_SEQUENCE):
tag = ASN1_Class_UNIVERSAL.SET
class ASN1_IPADDRESS(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.IPADDRESS
class ASN1_COUNTER32(ASN1_INTEGER):
tag = ASN1_Class_UNIVERSAL.COUNTER32
class ASN1_TIME_TICKS(ASN1_INTEGER):
tag = ASN1_Class_UNIVERSAL.TIME_TICKS
conf.ASN1_default_codec = ASN1_Codecs.BER
| mytliulei/Scapy | scapy/asn1/asn1.py | Python | apache-2.0 | 12,094 |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import (abs, arctan2, asarray, cos, exp, floor, log, log10,
arange, pi, prod, roll, sign, sin, sqrt, sum, where,
zeros, tan, tanh, dot)
try:
from scipy.special import factorial
except ImportError:
pass
from .go_benchmark import Benchmark
class Matyas(Benchmark):
r"""
Matyas objective function.
This class defines the Matyas [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Matyas}}(x) = 0.26(x_1^2 + x_2^2) - 0.48 x_1 x_2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1]
class McCormick(Benchmark):
r"""
McCormick objective function.
This class defines the McCormick [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{McCormick}}(x) = - x_{1} + 2 x_{2} + \left(x_{1}
- x_{2}\right)^{2} + \sin\left(x_{1} + x_{2}\right) + 1
with :math:`x_1 \in [-1.5, 4]`, :math:`x_2 \in [-3, 4]`.
*Global optimum*: :math:`f(x) = -1.913222954981037` for
:math:`x = [-0.5471975602214493, -1.547197559268372]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-1.5, 4.0), (-3.0, 3.0)]
self.global_optimum = [[-0.5471975602214493, -1.547197559268372]]
self.fglob = -1.913222954981037
def fun(self, x, *args):
self.nfev += 1
return (sin(x[0] + x[1]) + (x[0] - x[1]) ** 2 - 1.5 * x[0]
+ 2.5 * x[1] + 1)
class Meyer(Benchmark):
r"""
Meyer [1]_ objective function.
..[1] https://www.itl.nist.gov/div898/strd/nls/data/mgh10.shtml
TODO NIST regression standard
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0., 100., 100.],
[1, 1000., 500.]))
self.global_optimum = [[5.6096364710e-3, 6.1813463463e3,
3.4522363462e2]]
self.fglob = 8.7945855171e1
self.a = asarray([3.478E+04, 2.861E+04, 2.365E+04, 1.963E+04, 1.637E+04,
1.372E+04, 1.154E+04, 9.744E+03, 8.261E+03, 7.030E+03,
6.005E+03, 5.147E+03, 4.427E+03, 3.820E+03, 3.307E+03,
2.872E+03])
self.b = asarray([5.000E+01, 5.500E+01, 6.000E+01, 6.500E+01, 7.000E+01,
7.500E+01, 8.000E+01, 8.500E+01, 9.000E+01, 9.500E+01,
1.000E+02, 1.050E+02, 1.100E+02, 1.150E+02, 1.200E+02,
1.250E+02])
def fun(self, x, *args):
self.nfev += 1
vec = x[0] * exp(x[1] / (self.b + x[2]))
return sum((self.a - vec) ** 2)
class Michalewicz(Benchmark):
r"""
Michalewicz objective function.
This class defines the Michalewicz [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Michalewicz}}(x) = - \sum_{i=1}^{2} \sin\left(x_i\right)
\sin^{2 m}\left(\frac{i x_i^{2}}{\pi}\right)
Where, in this exercise, :math:`m = 10`.
with :math:`x_i \in [0, \pi]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = -1.8013` for :math:`x = [0, 0]`
.. [1] Adorio, E. MVF - "Multivariate Test Functions Library in C for
Unconstrained Global Optimization", 2005
TODO: could change dimensionality, but global minimum might change.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [pi] * self.N))
self.global_optimum = [[2.20290555, 1.570796]]
self.fglob = -1.8013
def fun(self, x, *args):
self.nfev += 1
m = 10.0
i = arange(1, self.N + 1)
return -sum(sin(x) * sin(i * x ** 2 / pi) ** (2 * m))
class MieleCantrell(Benchmark):
r"""
Miele-Cantrell [1]_ objective function.
This class defines the Miele-Cantrell global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{MieleCantrell}}({x}) = (e^{-x_1} - x_2)^4 + 100(x_2 - x_3)^6
+ \tan^4(x_3 - x_4) + x_1^8
with :math:`x_i \in [-1, 1]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 1, 1, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.0, 1.0, 1.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((exp(-x[0]) - x[1]) ** 4 + 100 * (x[1] - x[2]) ** 6
+ tan(x[2] - x[3]) ** 4 + x[0] ** 8)
class Mishra01(Benchmark):
r"""
Mishra 1 objective function.
This class defines the Mishra 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra01}}(x) = (1 + x_n)^{x_n}
where
.. math::
x_n = n - \sum_{i=1}^{n-1} x_i
with :math:`x_i \in [0, 1]` for :math:`i =1, ..., n`.
*Global optimum*: :math:`f(x) = 2` for :math:`x_i = 1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N,
[1.0 + 1e-9] * self.N))
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 2.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
xn = self.N - sum(x[0:-1])
return (1 + xn) ** xn
class Mishra02(Benchmark):
r"""
Mishra 2 objective function.
This class defines the Mishra 2 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra02}}({x}) = (1 + x_n)^{x_n}
with
.. math::
x_n = n - \sum_{i=1}^{n-1} \frac{(x_i + x_{i+1})}{2}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 2` for :math:`x_i = 1`
for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N,
[1.0 + 1e-9] * self.N))
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 2.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
xn = self.N - sum((x[:-1] + x[1:]) / 2.0)
return (1 + xn) ** xn
class Mishra03(Benchmark):
r"""
Mishra 3 objective function.
This class defines the Mishra 3 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra03}}(x) = \sqrt{\lvert \cos{\sqrt{\lvert x_1^2
+ x_2^2 \rvert}} \rvert} + 0.01(x_1 + x_2)
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.1999` for
:math:`x = [-9.99378322, -9.99918927]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: I think that Jamil#76 has the wrong global minimum, a smaller one
is possible
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-9.99378322, -9.99918927]]
self.fglob = -0.19990562
def fun(self, x, *args):
self.nfev += 1
return ((0.01 * (x[0] + x[1])
+ sqrt(abs(cos(sqrt(abs(x[0] ** 2 + x[1] ** 2)))))))
class Mishra04(Benchmark):
r"""
Mishra 4 objective function.
This class defines the Mishra 4 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra04}}({x}) = \sqrt{\lvert \sin{\sqrt{\lvert
x_1^2 + x_2^2 \rvert}} \rvert} + 0.01(x_1 + x_2)
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.17767` for
:math:`x = [-8.71499636, -9.0533148]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: I think that Jamil#77 has the wrong minimum, not possible
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-8.88055269734, -8.89097599857]]
self.fglob = -0.177715264826
def fun(self, x, *args):
self.nfev += 1
return ((0.01 * (x[0] + x[1])
+ sqrt(abs(sin(sqrt(abs(x[0] ** 2 + x[1] ** 2)))))))
class Mishra05(Benchmark):
r"""
Mishra 5 objective function.
This class defines the Mishra 5 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra05}}(x) = \left [ \sin^2 ((\cos(x_1) + \cos(x_2))^2)
+ \cos^2 ((\sin(x_1) + \sin(x_2))^2) + x_1 \right ]^2 + 0.01(x_1 + x_2)
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.119829` for :math:`x = [-1.98682, -10]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 381 in paper
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-1.98682, -10.0]]
self.fglob = -1.019829519930646
def fun(self, x, *args):
self.nfev += 1
return (0.01 * x[0] + 0.1 * x[1]
+ (sin((cos(x[0]) + cos(x[1])) ** 2) ** 2
+ cos((sin(x[0]) + sin(x[1])) ** 2) ** 2 + x[0]) ** 2)
class Mishra06(Benchmark):
r"""
Mishra 6 objective function.
This class defines the Mishra 6 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra06}}(x) = -\log{\left [ \sin^2 ((\cos(x_1)
+ \cos(x_2))^2) - \cos^2 ((\sin(x_1) + \sin(x_2))^2) + x_1 \right ]^2}
+ 0.01 \left[(x_1 -1)^2 + (x_2 - 1)^2 \right]
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = -2.28395` for :math:`x = [2.88631, 1.82326]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 397
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[2.88631, 1.82326]]
self.fglob = -2.28395
def fun(self, x, *args):
self.nfev += 1
a = 0.1 * ((x[0] - 1) ** 2 + (x[1] - 1) ** 2)
u = (cos(x[0]) + cos(x[1])) ** 2
v = (sin(x[0]) + sin(x[1])) ** 2
return a - log((sin(u) ** 2 - cos(v) ** 2 + x[0]) ** 2)
class Mishra07(Benchmark):
r"""
Mishra 7 objective function.
This class defines the Mishra 7 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra07}}(x) = \left [\prod_{i=1}^{n} x_i - n! \right]^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = \sqrt{n}`
for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [[sqrt(self.N)
for i in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return (prod(x) - factorial(self.N)) ** 2.0
class Mishra08(Benchmark):
r"""
Mishra 8 objective function.
This class defines the Mishra 8 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra08}}(x) = 0.001 \left[\lvert x_1^{10} - 20x_1^9
+ 180x_1^8 - 960 x_1^7 + 3360x_1^6 - 8064x_1^5 + 13340x_1^4 - 15360x_1^3
+ 11520x_1^2 - 5120x_1 + 2624 \rvert \lvert x_2^4 + 12x_2^3 + 54x_2^2
+ 108x_2 + 81 \rvert \right]^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [2, -3]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 1065
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(1.0, 2.0), (-4.0, 1.0)]
self.global_optimum = [[2.0, -3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
val = abs(x[0] ** 10 - 20 * x[0] ** 9 + 180 * x[0] ** 8
- 960 * x[0] ** 7 + 3360 * x[0] ** 6 - 8064 * x[0] ** 5
+ 13340 * x[0] ** 4 - 15360 * x[0] ** 3 + 11520 * x[0] ** 2
- 5120 * x[0] + 2624)
val += abs(x[1] ** 4 + 12 * x[1] ** 3 +
54 * x[1] ** 2 + 108 * x[1] + 81)
return 0.001 * val ** 2
class Mishra09(Benchmark):
r"""
Mishra 9 objective function.
This class defines the Mishra 9 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra09}}({x}) = \left[ ab^2c + abc^2 + b^2
+ (x_1 + x_2 - x_3)^2 \right]^2
Where, in this exercise:
.. math::
\begin{cases} a = 2x_1^3 + 5x_1x_2 + 4x_3 - 2x_1^2x_3 - 18 \\
b = x_1 + x_2^3 + x_1x_2^2 + x_1x_3^2 - 22 \\
c = 8x_1^2 + 2x_2x_3 + 2x_2^2 + 3x_2^3 - 52 \end{cases}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 2, 3]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 1103
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[1.0, 2.0, 3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
a = (2 * x[0] ** 3 + 5 * x[0] * x[1]
+ 4 * x[2] - 2 * x[0] ** 2 * x[2] - 18)
b = x[0] + x[1] ** 3 + x[0] * x[1] ** 2 + x[0] * x[2] ** 2 - 22.0
c = (8 * x[0] ** 2 + 2 * x[1] * x[2]
+ 2 * x[1] ** 2 + 3 * x[1] ** 3 - 52)
return (a * c * b ** 2 + a * b * c ** 2 + b ** 2
+ (x[0] + x[1] - x[2]) ** 2) ** 2
class Mishra10(Benchmark):
r"""
Mishra 10 objective function.
This class defines the Mishra 10 global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
TODO - int(x) should be used instead of floor(x)!!!!!
f_{\text{Mishra10}}({x}) = \left[ \lfloor x_1 \perp x_2 \rfloor -
\lfloor x_1 \rfloor - \lfloor x_2 \rfloor \right]^2
with :math:`x_i \in [-10, 10]` for :math:`i =1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [2, 2]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 1115
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[2.0, 2.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
x1, x2 = int(x[0]), int(x[1])
f1 = x1 + x2
f2 = x1 * x2
return (f1 - f2) ** 2.0
class Mishra11(Benchmark):
r"""
Mishra 11 objective function.
This class defines the Mishra 11 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra11}}(x) = \left [ \frac{1}{n} \sum_{i=1}^{n} \lvert x_i
\rvert - \left(\prod_{i=1}^{n} \lvert x_i \rvert \right )^{\frac{1}{n}}
\right]^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-3, 3), (-3, 3)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
N = self.N
return ((1.0 / N) * sum(abs(x)) - (prod(abs(x))) ** 1.0 / N) ** 2.0
class MultiModal(Benchmark):
r"""
MultiModal objective function.
This class defines the MultiModal global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{MultiModal}}(x) = \left( \sum_{i=1}^n \lvert x_i \rvert
\right) \left( \prod_{i=1}^n \lvert x_i \rvert \right)
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(abs(x)) * prod(abs(x))
| gfyoung/scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | Python | bsd-3-clause | 21,101 |
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as InternetProvider
class Provider(InternetProvider):
safe_email_tlds = ('com', 'net', 'eu', 'it', 'it', 'it')
free_email_domains = (
'libero.it', 'libero.it', 'libero.it',
'tim.it',
'tin.it',
'alice.it',
'virgilio.it',
'tiscali.it',
'fastwebnet.it',
'vodafone.it',
'poste.it',
'gmail.com', 'gmail.com', 'gmail.com',
'outlook.com',
'live.com',
'hotmail.com',
'hotmail.it',
'yahoo.com',
'tele2.it',
)
tlds = ('com', 'com', 'com', 'net', 'org', 'eu', 'it', 'it', 'it', 'it')
replacements = (
('à', 'a'), ('é', 'e'), ('è', 'e'),
('ì', 'i'), ('ò', 'o'), ('ù', 'u'),
)
| deanishe/alfred-fakeum | src/libs/faker/providers/internet/it_IT/__init__.py | Python | mit | 819 |
#########################################################
# Name: Asmit De #
# ID: aud311 #
# Date: 09/07/2016 #
# Assignment: Homework 1, Problem 7 #
# Description: Program to convert miles to kilometers #
#########################################################
# Assign 500 to variable d_miles
d_miles = 500
# Assign 1.6 to variable conversion_factor
conversion_factor = 1.6
# Calculate the distance in kilometers
d_km = d_miles * conversion_factor
# Display the result
print('Distance is equivalent to', d_km, 'km')
| asmitde/TA-PSU-CMPSC101 | Fall 2016/Homeworks/HW1/Solution/problem7.py | Python | apache-2.0 | 680 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.helpers import load_list_of_tasks
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
class Block(Base, Become, Conditional, Taggable):
# main block fields containing the task lists
_block = FieldAttribute(isa='list', default=[], inherit=False)
_rescue = FieldAttribute(isa='list', default=[], inherit=False)
_always = FieldAttribute(isa='list', default=[], inherit=False)
# other fields
_any_errors_fatal = FieldAttribute(isa='bool')
_delegate_to = FieldAttribute(isa='list')
_delegate_facts = FieldAttribute(isa='bool', default=False)
_name = FieldAttribute(isa='string', default='')
# for future consideration? this would be functionally
# similar to the 'else' clause for exceptions
#_otherwise = FieldAttribute(isa='list')
def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, implicit=False):
self._play = play
self._role = role
self._parent = None
self._dep_chain = None
self._use_handlers = use_handlers
self._implicit = implicit
# end of role flag
self._eor = False
if task_include:
self._parent = task_include
elif parent_block:
self._parent = parent_block
super(Block, self).__init__()
def __repr__(self):
return "BLOCK(uuid=%s)(id=%s)(parent=%s)" % (self._uuid, id(self), self._parent)
def get_vars(self):
'''
Blocks do not store variables directly, however they may be a member
of a role or task include which does, so return those if present.
'''
all_vars = self.vars.copy()
if self._parent:
all_vars.update(self._parent.get_vars())
return all_vars
@staticmethod
def load(data, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
implicit = not Block.is_block(data)
b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers, implicit=implicit)
return b.load_data(data, variable_manager=variable_manager, loader=loader)
@staticmethod
def is_block(ds):
is_block = False
if isinstance(ds, dict):
for attr in ('block', 'rescue', 'always'):
if attr in ds:
is_block = True
break
return is_block
def preprocess_data(self, ds):
'''
If a simple task is given, an implicit block for that single task
is created, which goes in the main portion of the block
'''
if not Block.is_block(ds):
if isinstance(ds, list):
return super(Block, self).preprocess_data(dict(block=ds))
else:
return super(Block, self).preprocess_data(dict(block=[ds]))
return super(Block, self).preprocess_data(ds)
def _load_block(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_rescue(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_always(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def get_dep_chain(self):
if self._dep_chain is None:
if self._parent:
return self._parent.get_dep_chain()
else:
return None
else:
return self._dep_chain[:]
def copy(self, exclude_parent=False, exclude_tasks=False):
def _dupe_task_list(task_list, new_block):
new_task_list = []
for task in task_list:
new_task = task.copy(exclude_parent=True)
if task._parent:
new_task._parent = task._parent.copy(exclude_tasks=True)
# go up the parentage tree until we find an
# object without a parent and make this new
# block their parent
cur_obj = new_task
while cur_obj._parent:
cur_obj = cur_obj._parent
cur_obj._parent = new_block
else:
new_task._parent = new_block
new_task_list.append(new_task)
return new_task_list
new_me = super(Block, self).copy()
new_me._play = self._play
new_me._use_handlers = self._use_handlers
new_me._eor = self._eor
if self._dep_chain is not None:
new_me._dep_chain = self._dep_chain[:]
new_me._parent = None
if self._parent and not exclude_parent:
new_me._parent = self._parent.copy(exclude_tasks=exclude_tasks)
if not exclude_tasks:
new_me.block = _dupe_task_list(self.block or [], new_me)
new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
new_me.always = _dupe_task_list(self.always or [], new_me)
new_me._role = None
if self._role:
new_me._role = self._role
new_me.validate()
return new_me
def serialize(self):
'''
Override of the default serialize method, since when we're serializing
a task we don't want to include the attribute list of tasks.
'''
data = dict()
for attr in self._valid_attrs:
if attr not in ('block', 'rescue', 'always'):
data[attr] = getattr(self, attr)
data['dep_chain'] = self.get_dep_chain()
data['eor'] = self._eor
if self._role is not None:
data['role'] = self._role.serialize()
if self._parent is not None:
data['parent'] = self._parent.copy(exclude_tasks=True).serialize()
data['parent_type'] = self._parent.__class__.__name__
return data
def deserialize(self, data):
'''
Override of the default deserialize method, to match the above overridden
serialize method
'''
# import is here to avoid import loops
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.handler_task_include import HandlerTaskInclude
# we don't want the full set of attributes (the task lists), as that
# would lead to a serialize/deserialize loop
for attr in self._valid_attrs:
if attr in data and attr not in ('block', 'rescue', 'always'):
setattr(self, attr, data.get(attr))
self._dep_chain = data.get('dep_chain', None)
self._eor = data.get('eor', False)
# if there was a serialized role, unpack it too
role_data = data.get('role')
if role_data:
r = Role()
r.deserialize(role_data)
self._role = r
parent_data = data.get('parent')
if parent_data:
parent_type = data.get('parent_type')
if parent_type == 'Block':
p = Block()
elif parent_type == 'TaskInclude':
p = TaskInclude()
elif parent_type == 'HandlerTaskInclude':
p = HandlerTaskInclude()
p.deserialize(parent_data)
self._parent = p
self._dep_chain = self._parent.get_dep_chain()
def set_loader(self, loader):
self._loader = loader
if self._parent:
self._parent.set_loader(loader)
elif self._role:
self._role.set_loader(loader)
dep_chain = self.get_dep_chain()
if dep_chain:
for dep in dep_chain:
dep.set_loader(loader)
def _get_attr_environment(self):
return self._get_parent_attribute('environment', extend=True)
def _get_parent_attribute(self, attr, extend=False, prepend=False):
'''
Generic logic to get the attribute or parent attribute for a block value.
'''
value = None
try:
value = self._attributes[attr]
if self._parent and (value is None or extend):
try:
parent_value = getattr(self._parent, attr, None)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
except AttributeError:
pass
if self._role and (value is None or extend):
try:
parent_value = getattr(self._role, attr, None)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
dep_chain = self.get_dep_chain()
if dep_chain and (value is None or extend):
dep_chain.reverse()
for dep in dep_chain:
dep_value = getattr(dep, attr, None)
if extend:
value = self._extend_value(value, dep_value, prepend)
else:
value = dep_value
if value is not None and not extend:
break
except AttributeError:
pass
if self._play and (value is None or extend):
try:
parent_value = getattr(self._play, attr, None)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
except AttributeError:
pass
except KeyError as e:
pass
return value
def filter_tagged_tasks(self, play_context, all_vars):
'''
Creates a new block, with task lists filtered based on the tags contained
within the play_context object.
'''
def evaluate_and_append_task(target):
tmp_list = []
for task in target:
if isinstance(task, Block):
tmp_list.append(evaluate_block(task))
elif task.action == 'meta' \
or (task.action == 'include' and task.evaluate_tags([], play_context.skip_tags, all_vars=all_vars)) \
or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
tmp_list.append(task)
return tmp_list
def evaluate_block(block):
new_block = self.copy(exclude_tasks=True)
new_block.block = evaluate_and_append_task(block.block)
new_block.rescue = evaluate_and_append_task(block.rescue)
new_block.always = evaluate_and_append_task(block.always)
return new_block
return evaluate_block(self)
def has_tasks(self):
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
def get_include_params(self):
if self._parent:
return self._parent.get_include_params()
else:
return dict()
def all_parents_static(self):
'''
Determine if all of the parents of this block were statically loaded
or not. Since Task/TaskInclude objects may be in the chain, they simply
call their parents all_parents_static() method. Only Block objects in
the chain check the statically_loaded value of the parent.
'''
from ansible.playbook.task_include import TaskInclude
if self._parent:
if isinstance(self._parent, TaskInclude) and not self._parent.statically_loaded:
return False
return self._parent.all_parents_static()
return True
| jcftang/ansible | lib/ansible/playbook/block.py | Python | gpl-3.0 | 14,413 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
VERSION = "6.4.0"
DOWNLOAD_URL = \
'https://github.com/ggravlingen/pytradfri/archive/{}.zip'.format(VERSION)
EXTRAS_REQUIRE = {
'async': ['aiocoap==0.4b3', 'DTLSSocket==0.1.10']
}
PACKAGES = find_packages(exclude=['tests', 'tests.*'])
setup(
name='pytradfri',
packages=PACKAGES,
python_requires='>=3.5',
version=VERSION,
description='IKEA Trådfri/Tradfri API. Control and observe your '
'lights from Python.',
long_description=long_description,
author='balloob, lwis, ggravlingen',
author_email='no@email.com',
long_description_content_type="text/markdown",
url='https://github.com/ggravlingen/pytradfri',
license='MIT',
keywords='ikea tradfri api iot light homeautomation',
download_url=DOWNLOAD_URL,
extras_require=EXTRAS_REQUIRE,
)
| ggravlingen/pytradfri | setup.py | Python | mit | 1,147 |
#!/usr/bin/env python
"""
Functions for sending email from cipres cron jobs.
Note that caller will get exceptions if there's a problem talking to the smtp server or the
email is badly formed, but not if the mail can't be delivered. If mail can't be delivered
to one or more of the recipients the fromaddr account can expect to receive an email
about it though.
Sorry, lots of stuff is hardcoded here. See sendMail function below for a fairly generic but
limited method to send an email message.
"""
import sys
import os
import re
import string
import subprocess
import tempfile
import getopt
import smtplib
import pystache
if sys.version_info.major == 2:
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
else:
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
ccaddr = "${email.adminAddr}"
fromaddr = "${email.adminAddr}"
passwordFile = os.path.expandvars("${SDK_VERSIONS}/db_password.txt")
def overLimitWarning(toaddr, username, template, hours_used, application_name, portal, email):
msg = MIMEMultipart('related')
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Cc'] = ccaddr
with open(template) as f:
templateContents = f.read()
contents = pystache.render(templateContents,
{'hours_used' : hours_used,
'application_name' : application_name,
'username': username,
'portal_url': portal,
'email': email} )
# Subject of email is taken from first (non-empty) line of body of email
lines = contents.splitlines()
msg['Subject'] = [ s for s in lines if s ][0]
alternatives = MIMEMultipart('alternative')
msg.attach(alternatives)
alternatives.attach(MIMEText(contents, 'plain'));
mail = smtplib.SMTP("${email.smtpServer}", ${email.smtpServer.port})
if "${mailSender}" == "gmail.mailSender" :
mail.ehlo()
mail.starttls()
mail.login("${mailSender.gmail.username}", "${mailSender.gmail.password}")
# mail.set_debuglevel(1)
toaddrs = [toaddr] + [ccaddr]
# print "Sending mail from %s, to %s" % (fromaddr, toaddrs)
mail.sendmail(fromaddr, toaddrs, msg.as_string())
mail.quit()
def sendMail(toA, fromA, ccA, subject, file):
# print "sendMail(%s, %s, %s)" % (toA, subject, file)
msg = MIMEMultipart('related')
msg['From'] = fromA
msg['To'] = toA
msg['Cc'] = ccA
if subject:
msg['Subject'] = subject
alternatives = MIMEMultipart('alternative')
msg.attach(alternatives)
plain= open(file, 'r').read()
alternatives.attach(MIMEText(plain, 'plain'));
mail = smtplib.SMTP("${email.smtpServer}", ${email.smtpServer.port})
if "${mailSender}" == "gmail.mailSender" :
mail.ehlo()
mail.starttls()
mail.login("${mailSender.gmail.username}", "${mailSender.gmail.password}")
# mail.set_debuglevel(1)
toA = [toA] + [ccA]
mail.sendmail(fromA, toA, msg.as_string())
mail.quit()
| leschzinerlab/COSMIC-CryoEM-Gateway | gateway_config/sdk/scripts/mailer.py | Python | gpl-3.0 | 3,160 |
from resources.lib.gui.gui import cGui
from resources.lib.gui.guiElement import cGuiElement
from resources.lib.handler.inputParameterHandler import cInputParameterHandler
from resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.lib.gui.hoster import cHosterGui
from resources.lib.handler.hosterHandler import cHosterHandler
SITE_IDENTIFIER = 'burning_series_org'
SITE_NAME = 'Burning-Series.org'
URL_MAIN = 'http://www.burning-series.org/'
URL_SERIES = 'http://www.burning-series.org/andere-serien'
def load():
cGui().showInfo('HS', SITE_NAME, 5)
return False
oGui = cGui()
__createMenuEntry(oGui, 'showAllSeries', 'Serien', URL_SERIES)
oGui.setEndOfDirectory()
def __createMenuEntry(oGui, sFunction, sLabel, sUrl):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction(sFunction)
oGuiElement.setTitle(sLabel)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addFolder(oGuiElement, oOutputParameterHandler)
def showAllSeries():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request();
sPattern = '<ul id="serSeries">(.*?)</ul>'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sHtmlContent = aResult[1][0]
sPattern = '<li><a href="([^"]+)">(.*?)</a></li>'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('showSeasons')
oGuiElement.setTitle(aEntry[1])
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_MAIN + str(aEntry[0]))
oGui.addFolder(oGuiElement, oOutputParameterHandler)
oGui.setEndOfDirectory()
def showSeasons():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request();
sPattern = '<ul class="pages">(.*?)</ul>'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sHtmlContent = aResult[1][0]
sPattern = '<a href="([^"]+)">(.*?)</a>'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('showSeries')
oGuiElement.setTitle('Staffel ' + str(aEntry[1]))
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_MAIN + str(aEntry[0]))
oGui.addFolder(oGuiElement, oOutputParameterHandler)
oGui.setEndOfDirectory()
def showSeries():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request();
sPattern = '<table>(.*?)</table>'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sHtmlContent = aResult[1][0]
sPattern = '<a href="([^"]+)"><strong>(.*?)</strong>.*?<span lang="en">(.*?)</span></a>'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('showHosters')
oGuiElement.setTitle(str(aEntry[1]) + ' - ' + str(aEntry[2]))
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_MAIN + str(aEntry[0]))
oGui.addFolder(oGuiElement, oOutputParameterHandler)
oGui.setEndOfDirectory()
def showHosters():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request();
sPattern = '<h3>Hoster dieser Episode(.*?)</ul>'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sHtmlContent = aResult[1][0]
sPattern = '<a href="([^"]+)">.*?title="([^"]+)" />(.*?)</a>'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
oHoster = __checkHoster(str(aEntry[1]))
if (oHoster != False):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('getHosterUrlandPlay')
oGuiElement.setTitle(str(aEntry[2]))
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_MAIN + str(aEntry[0]))
oOutputParameterHandler.addParameter('hosterName', oHoster.getPluginIdentifier())
oGui.addFolder(oGuiElement, oOutputParameterHandler)
oGui.setEndOfDirectory()
def __getMovieTitle(sHtmlContent):
sPattern = '</ul><h2>(.*?)<small id="titleEnglish" lang="en">(.*?)</small>'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
return str(aEntry[0]).strip() + ' - ' + str(aEntry[1]).strip()
return False
def getHosterUrlandPlay():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sHoster = oInputParameterHandler.getValue('hosterName')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request();
sTitle = __getMovieTitle(sHtmlContent)
sPattern = '<div id="video_actions">.*?<a href="([^"]+)">'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sStreamUrl = aResult[1][0]
oHoster = cHosterHandler().getHoster(sHoster)
if (sTitle != False):
oHoster.setFileName(sTitle)
cHosterGui().showHoster(oGui, oHoster, sStreamUrl)
oGui.setEndOfDirectory()
return
oGui.setEndOfDirectory()
def __checkHoster(sHosterName):
if (sHosterName == 'Megavideo'):
return cHosterHandler().getHoster('megavideo')
if (sHosterName == 'zShare'):
return cHosterHandler().getHoster('zshare')
if (sHosterName == 'Duckload'):
return cHosterHandler().getHoster('duckload')
if (sHosterName == 'FileBase'):
return cHosterHandler().getHoster('filebase')
if (sHosterName == 'MyStream'):
return cHosterHandler().getHoster('mystream')
return False
| mino60/venom-xbmc-addons-beta | plugin.video.vstream/resources/sites/trash/burning_series_org.py | Python | gpl-2.0 | 7,633 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import pkg_resources
from .__version__ import __version__
__all__ = ['Guess', 'Language',
'guess_file_info', 'guess_video_info',
'guess_movie_info', 'guess_episode_info']
# Do python3 detection before importing any other module, to be sure that
# it will then always be available
# with code from http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/
import sys
if sys.version_info[0] >= 3: # pragma: no cover
PY2, PY3 = False, True
unicode_text_type = str
native_text_type = str
base_text_type = str
def u(x):
return str(x)
def s(x):
return x
class UnicodeMixin(object):
__str__ = lambda x: x.__unicode__()
import binascii
def to_hex(x):
return binascii.hexlify(x).decode('utf-8')
else: # pragma: no cover
PY2, PY3 = True, False
__all__ = [str(s) for s in __all__] # fix imports for python2
unicode_text_type = unicode
native_text_type = str
base_text_type = basestring
def u(x):
if isinstance(x, str):
return x.decode('utf-8')
if isinstance(x, list):
return [u(s) for s in x]
return unicode(x)
def s(x):
if isinstance(x, unicode):
return x.encode('utf-8')
if isinstance(x, list):
return [s(y) for y in x]
if isinstance(x, tuple):
return tuple(s(y) for y in x)
if isinstance(x, dict):
return dict((s(key), s(value)) for key, value in x.items())
return x
class UnicodeMixin(object):
__str__ = lambda x: unicode(x).encode('utf-8')
def to_hex(x):
return x.encode('hex')
range = xrange
from guessit.guess import Guess, merge_all
from guessit.language import Language
from guessit.matcher import IterativeMatcher
from guessit.textutils import clean_string, is_camel, from_camel
import os.path
import logging
import json
log = logging.getLogger(__name__)
class NullHandler(logging.Handler):
def emit(self, record):
pass
# let's be a nicely behaving library
h = NullHandler()
log.addHandler(h)
def _guess_filename(filename, options=None, **kwargs):
mtree = _build_filename_mtree(filename, options=options, **kwargs)
_add_camel_properties(mtree, options=options)
return mtree.matched()
def _build_filename_mtree(filename, options=None, **kwargs):
mtree = IterativeMatcher(filename, options=options, **kwargs)
second_pass_options = mtree.second_pass_options
if second_pass_options:
log.info("Running 2nd pass")
merged_options = dict(options)
merged_options.update(second_pass_options)
mtree = IterativeMatcher(filename, options=merged_options, **kwargs)
return mtree
def _add_camel_properties(mtree, options=None, **kwargs):
prop = 'title' if mtree.matched().get('type') != 'episode' else 'series'
value = mtree.matched().get(prop)
_guess_camel_string(mtree, value, options=options, skip_title=False, **kwargs)
for leaf in mtree.match_tree.unidentified_leaves():
value = leaf.value
_guess_camel_string(mtree, value, options=options, skip_title=True, **kwargs)
def _guess_camel_string(mtree, string, options=None, skip_title=False, **kwargs):
if string and is_camel(string):
log.info('"%s" is camel cased. Try to detect more properties.' % (string,))
uncameled_value = from_camel(string)
camel_tree = _build_filename_mtree(uncameled_value, options=options, name_only=True, skip_title=skip_title, **kwargs)
if len(camel_tree.matched()) > 0:
# Title has changed.
mtree.matched().update(camel_tree.matched())
return True
return False
def guess_file_info(filename, info=None, options=None, **kwargs):
"""info can contain the names of the various plugins, such as 'filename' to
detect filename info, or 'hash_md5' to get the md5 hash of the file.
>>> testfile = os.path.join(os.path.dirname(__file__), 'test/dummy.srt')
>>> g = guess_file_info(testfile, info = ['hash_md5', 'hash_sha1'])
>>> g['hash_md5'], g['hash_sha1']
('64de6b5893cac24456c46a935ef9c359', 'a703fc0fa4518080505809bf562c6fc6f7b3c98c')
"""
info = info or 'filename'
options = options or {}
result = []
hashers = []
# Force unicode as soon as possible
filename = u(filename)
if isinstance(info, base_text_type):
info = [info]
for infotype in info:
if infotype == 'filename':
result.append(_guess_filename(filename, options, **kwargs))
elif infotype == 'hash_mpc':
from guessit.hash_mpc import hash_file
try:
result.append(Guess({infotype: hash_file(filename)},
confidence=1.0))
except Exception as e:
log.warning('Could not compute MPC-style hash because: %s' % e)
elif infotype == 'hash_ed2k':
from guessit.hash_ed2k import hash_file
try:
result.append(Guess({infotype: hash_file(filename)},
confidence=1.0))
except Exception as e:
log.warning('Could not compute ed2k hash because: %s' % e)
elif infotype.startswith('hash_'):
import hashlib
hashname = infotype[5:]
try:
hasher = getattr(hashlib, hashname)()
hashers.append((infotype, hasher))
except AttributeError:
log.warning('Could not compute %s hash because it is not available from python\'s hashlib module' % hashname)
else:
log.warning('Invalid infotype: %s' % infotype)
# do all the hashes now, but on a single pass
if hashers:
try:
blocksize = 8192
hasherobjs = dict(hashers).values()
with open(filename, 'rb') as f:
chunk = f.read(blocksize)
while chunk:
for hasher in hasherobjs:
hasher.update(chunk)
chunk = f.read(blocksize)
for infotype, hasher in hashers:
result.append(Guess({infotype: hasher.hexdigest()},
confidence=1.0))
except Exception as e:
log.warning('Could not compute hash because: %s' % e)
result = merge_all(result)
return result
def guess_video_info(filename, info=None, options=None, **kwargs):
return guess_file_info(filename, info=info, options=options, type='video', **kwargs)
def guess_movie_info(filename, info=None, options=None, **kwargs):
return guess_file_info(filename, info=info, options=options, type='movie', **kwargs)
def guess_episode_info(filename, info=None, options=None, **kwargs):
return guess_file_info(filename, info=info, options=options, type='episode', **kwargs)
| Hellowlol/PyTunes | libs/guessit/__init__.py | Python | gpl-3.0 | 7,831 |
# Copyright (c) 2017 pandas-gbq Authors All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Helper methods for BigQuery schemas"""
import copy
# API may return data types as legacy SQL, so maintain a mapping of aliases
# from standard SQL to legacy data types.
_TYPE_ALIASES = {
"BOOL": "BOOLEAN",
"FLOAT64": "FLOAT",
"INT64": "INTEGER",
"STRUCT": "RECORD",
}
def to_pandas_gbq(client_schema):
"""Given a sequence of :class:`google.cloud.bigquery.schema.SchemaField`,
return a schema in pandas-gbq API format.
"""
remote_fields = [
field_remote.to_api_repr() for field_remote in client_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return {"fields": remote_fields}
def to_google_cloud_bigquery(pandas_gbq_schema):
"""Given a schema in pandas-gbq API format,
return a sequence of :class:`google.cloud.bigquery.schema.SchemaField`.
"""
from google.cloud import bigquery
# Need to convert from JSON representation to format used by client library.
schema = add_default_nullable_mode(pandas_gbq_schema)
return [
bigquery.SchemaField.from_api_repr(field) for field in schema["fields"]
]
def _clean_schema_fields(fields):
"""Return a sanitized version of the schema for comparisons.
The ``mode`` and ``description`` properties areis ignored because they
are not generated by func:`pandas_gbq.schema.generate_bq_schema`.
"""
fields_sorted = sorted(fields, key=lambda field: field["name"])
clean_schema = []
for field in fields_sorted:
field_type = field["type"].upper()
field_type = _TYPE_ALIASES.get(field_type, field_type)
clean_schema.append({"name": field["name"], "type": field_type})
return clean_schema
def schema_is_subset(schema_remote, schema_local):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
schema_remote : dict
Schema for comparison. Each item of ``fields`` should have a 'name'
and a 'type'
schema_local : dict
Schema for comparison. Each item of ``fields`` should have a 'name'
and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = _clean_schema_fields(schema_remote.get("fields", []))
fields_local = _clean_schema_fields(schema_local.get("fields", []))
return all(field in fields_remote for field in fields_local)
def generate_bq_schema(dataframe, default_type="STRING"):
"""Given a passed dataframe, generate the associated Google BigQuery schema.
Arguments:
dataframe (pandas.DataFrame): D
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# If you update this mapping, also update the table at
# `docs/source/writing.rst`.
type_mapping = {
"i": "INTEGER",
"b": "BOOLEAN",
"f": "FLOAT",
"O": "STRING",
"S": "STRING",
"U": "STRING",
"M": "TIMESTAMP",
}
fields = []
for column_name, dtype in dataframe.dtypes.iteritems():
fields.append(
{
"name": column_name,
"type": type_mapping.get(dtype.kind, default_type),
}
)
return {"fields": fields}
def update_schema(schema_old, schema_new):
"""
Given an old BigQuery schema, update it with a new one.
Where a field name is the same, the new will replace the old. Any
new fields not present in the old schema will be added.
Arguments:
schema_old: the old schema to update
schema_new: the new schema which will overwrite/extend the old
"""
old_fields = schema_old["fields"]
new_fields = schema_new["fields"]
output_fields = list(old_fields)
field_indices = {field["name"]: i for i, field in enumerate(output_fields)}
for field in new_fields:
name = field["name"]
if name in field_indices:
# replace old field with new field of same name
output_fields[field_indices[name]] = field
return {"fields": output_fields}
def add_default_nullable_mode(schema):
"""Manually create the schema objects, adding NULLABLE mode.
Workaround for error in SchemaField.from_api_repr, which required
"mode" to be set:
https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
"""
# Returns a copy rather than modifying the mutable arg,
# per Issue #277
result = copy.deepcopy(schema)
for field in result["fields"]:
field.setdefault("mode", "NULLABLE")
return result
def remove_policy_tags(schema):
"""Manually create the schema objects, removing policyTags.
Workaround for 403 error with policy tags, which are not required in a load
job: https://github.com/googleapis/python-bigquery/pull/557
"""
# Returns a copy rather than modifying the mutable arg,
# per Issue #277
result = copy.deepcopy(schema)
for field in result["fields"]:
if "policyTags" in field:
del field["policyTags"]
return result
| pydata/pandas-gbq | pandas_gbq/schema.py | Python | bsd-3-clause | 5,518 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ProviderAPIRegistry(object):
__shared_object_state = {}
__registry = {}
__iter__ = __registry.__iter__
__getitem__ = __registry.__getitem__
locked = False
def __init__(self):
# NOTE(morgan): This rebinds __dict__ and allows all instances of
# the provider API to share a common state. Any changes except
# rebinding __dict__ will maintain the same state stored on the class
# not the instance. This design pattern is preferable to
# full singletons where state sharing is the important "feature"
# derived from the "singleton"
#
# Use "super" to bypass the __setattr__ preventing changes to the
# object itself.
super(ProviderAPIRegistry, self).__setattr__(
'__dict__', self.__shared_object_state)
def __getattr__(self, item):
"""Do attr lookup."""
try:
return self.__registry[item]
except KeyError:
raise AttributeError(
"'ProviderAPIs' has no attribute %s" % item)
def __setattr__(self, key, value):
"""Do not allow setting values on the registry object."""
raise RuntimeError('Programming Error: You may not set values on the '
'ProviderAPIRegistry objects.')
def _register_provider_api(self, name, obj):
"""Register an instance of a class as a provider api."""
if name == 'driver':
raise ValueError('A provider may not be named "driver".')
if self.locked:
raise RuntimeError(
'Programming Error: The provider api registry has been '
'locked (post configuration). Ensure all provider api '
'managers are instantiated before locking.')
if name in self.__registry:
raise DuplicateProviderError(
'`%(name)s` has already been registered as an api '
'provider by `%(prov)r`' % {'name': name,
'prov': self.__registry[name]})
self.__registry[name] = obj
def _clear_registry_instances(self):
"""ONLY USED FOR TESTING."""
self.__registry.clear()
# Use super to allow setting around class implementation of __setattr__
super(ProviderAPIRegistry, self).__setattr__('locked', False)
def lock_provider_registry(self):
# Use super to allow setting around class implementation of __setattr__
super(ProviderAPIRegistry, self).__setattr__('locked', True)
def deferred_provider_lookup(self, api, method):
"""Create descriptor that performs lookup of api and method on demand.
For specialized cases, such as the enforcer "get_member_from_driver"
which needs to be effectively a "classmethod", this method returns
a smart descriptor object that does the lookup at runtime instead of
at import time.
:param api: The api to use, e.g. "identity_api"
:type api: str
:param method: the method on the api to return
:type method: str
"""
class DeferredProviderLookup(object):
def __init__(self, api, method):
self.__api = api
self.__method = method
def __get__(self, instance, owner):
api = getattr(ProviderAPIs, self.__api)
return getattr(api, self.__method)
return DeferredProviderLookup(api, method)
class DuplicateProviderError(Exception):
"""Attempting to register a duplicate API provider."""
class ProviderAPIMixin(object):
"""Allow referencing provider apis on self via __getattr__.
Be sure this class is first in the class definition for inheritance.
"""
def __getattr__(self, item):
"""Magic getattr method."""
try:
return getattr(ProviderAPIs, item)
except AttributeError:
return self.__getattribute__(item)
ProviderAPIs = ProviderAPIRegistry()
| openstack/keystone | keystone/common/provider_api.py | Python | apache-2.0 | 4,553 |
from kivy.uix.bubble import Bubble
from kivy.properties import BooleanProperty
class NewSwitchRename(Bubble):
newbranch_disabled = BooleanProperty(False)
rename_disabled = BooleanProperty(False)
switch_disabled = BooleanProperty(False)
delete_disabled = BooleanProperty(False)
def __del__(self, *args, **kwargs):
pass
| RedXBeard/gitwatcher-ui | bubbles.py | Python | mit | 349 |
# -*- coding: utf-8 -*-
__author__ = 'Marco Milanesi'
__email__ = 'kpanic@gmail.com'
__version__ = '0.1.0'
| kpanic/pollicino | pollicino/__init__.py | Python | gpl-3.0 | 108 |
#!/usr/bin/python3
# -*- coding: utf-8 -*
"""
@author Simon Wu <swprojects@runbox.com>
Copyright (c) 2018 by Simon Wu <Advanced Action Scheduler>
Released subject to the GNU Public License
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import wx
import base
import json
import logging
import dialogs
import platform
import os
import os.path
import schedulemanager
import wx.dataview
import wx.adv
import sys
from about import AboutDialog
from settings import SettingsFrame
from splashscreen import SplashScreen
from taskbaricon import TaskBarIcon
from tooltip import ToolTip
from userguide import UserGuideFrame
from version import __version__
from shared import DELIMITER, FUNCTIONS
from ast import literal_eval as make_tuple
from time import gmtime, strftime
from copy import deepcopy
__title__ = "Advanced Action Scheduler"
PLATFORM = platform.system()
if PLATFORM == "Windows":
import keyboard
else:
try:
import keyboard
keyboard.unhook_all()
except Exception:
print("Failed to import keyboard. Using dummy keyboard")
print("Not run with root privileges?")
from dummykeyboard import keyboard
appPath = ""
if __name__ != "__main__":
# this allows us to import relatively
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
appPath = os.path.dirname(os.path.realpath(__file__)) + "/"
SYS_ARGS = {
"--verbose": 0,
}
# verbosity
LOG_LEVELS = {
"1": 20, # Info
"2": 10, # Debug
"3": 30, # Warning
"4": 40, # Error
"5": 50, # Critical
}
DEFAULTCONFIG = {
"browserPresets": [], # list of saved browsers
"currentFile": False, # the currently opened schedule file
"loadLastFile": True, # the currently opened schedule file
"fileList": [], # recently opened schedule files
"firstStart": True,
"keepFileList": True,
"maxUndoCount": 10, # maximum number of undo operations a user can do
"newProcessPresets": [], # list of saved commands
"openUrlPresets": [], # list of saved urls
"onClose": 0, # on close window
"onTrayIconLeft": 0,
"onTrayIconLeftDouble": 1,
"schedManagerLogCount": 20, # number of logs before clearing table
"groupSelectionSwitchTab": True, # auto switch to Schedules tab when group item changed
"schedManagerSwitchTab": True, # auto switch to Manager tab when schedules enabled
"showSplashScreen": True,
"showTrayIcon": True,
"toggleSchedManHotkey": "CTRL+F11",
"toolbarSize": 48, # maximum toolbar size
"windowPos": False, # the last window position
"windowSize": False, # the last window size
}
class Main(wx.Frame):
def __init__(self, parent=None):
self._title = "{0}".format(__title__)
wx.Frame.__init__(self, parent=parent, title=self._title)
self._ids = {}
self._appConfig = DEFAULTCONFIG
self._aboutDialog = None
self._powerAction = None
self._powerDialog = []
self._settingsDialog = None
self._userGuideDialog = None
self._clipboard = None
self._currentSelectionType = None
self._currentTreeFocus = None
self._fileList = []
self._fileListMenuItems = {}
self._imageList = wx.ImageList(32, 32)
self._schedImageList = wx.ImageList(32, 32)
self._imageListRef = []
self._overrideToolSize = None
self._data = {}
self._menus = {}
self._redoStack = []
self._undoStack = []
self._commandState = 0
self._schedManager = schedulemanager.Manager(self)
self._taskBarIcon = None
self._toolbarBitmaps = {}
self.toolbar = None
self.toolTip = ToolTip(self)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_SIZE, self.OnSize)
# -----
self.CreateMenu()
self.CreateImageList()
self.CreateToolbarBitmaps()
self.CreateToolbar()
self.CreateStatusBar()
self.SetIcon(wx.Icon("icons/icon.png"))
self.CreateUI()
# load settings
self.LoadConfig()
# setup hotkeys
self.SetupHotkeys()
self.SetupAcceleratorTable()
self.powerTimer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnPowerTimer, self.powerTimer)
self.powerTimer.Start(1000)
self.SetMinSize((700, 600))
self.SetSize((700, 600))
def ids(self, value):
""" return existing ID or create a new ID """
if value not in self._ids:
self._ids[value] = wx.NewId()
return self._ids[value]
@property
def appPath(self):
return appPath
@property
def imagePath(self):
return appPath + "images/"
@property
def configPath(self):
sp = wx.StandardPaths.Get()
path = sp.GetUserConfigDir()
if PLATFORM == "Linux":
dirPath = os.path.join(path, ".advancedactionscheduler")
else:
dirPath = os.path.join(path, "Advanced Action Scheduler")
path = os.path.join(dirPath, "config.json")
if not os.path.exists(os.path.join(dirPath)):
os.makedirs(dirPath)
return path
@property
def taskBarIcon(self):
return self._taskBarIcon
@taskBarIcon.setter
def taskBarIcon(self, value):
self._taskBarIcon = value
@property
def groupSelection(self):
return self.groupList.GetSelection()
@property
def scheduleSelection(self):
return self.schedList.GetSelection()
@property
def commandState(self):
return self._commandState
@commandState.setter
def commandState(self, value):
self._commandState = value
self.UpdateTitlebar()
@property
def enableTool(self):
"""Enable or disable a toolbar tool"""
return self.toolbar.EnableTool
@property
def infoSchedFont(self):
return wx.Font(8, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False)
@property
def imageList(self):
return self._imageList
@property
def schedImageList(self):
return self._schedImageList
def imageListIndex(self, label):
return self._imageListRef.index(label.lower().replace(" ", ""))
def AddLogMessage(self, message):
""" insert log message as first item to schedule messenger list """
if self._appConfig["schedManagerLogCount"] <= 0:
self.schedLog.DeleteAllItems()
return
columnNames = {}
for x in range(self.schedLog.GetColumnCount()):
column = self.schedLog.GetColumn(x)
columnText = column.GetText()
columnNames[columnText] = x
while self.schedLog.GetItemCount() >= self._appConfig["schedManagerLogCount"]:
try:
self.schedLog.DeleteItem(self.schedLog.GetItemCount() - 1)
except Exception as e:
pass
if self.schedLog.GetItemCount() == 0:
n = 0
else:
first = self.schedLog.GetItemText(0, columnNames["#"])
n = int(first) + 1
item = self.schedLog.InsertItem(0, "")
self.schedLog.SetItem(item, columnNames["#"], str(n))
dt = gmtime()
self.schedLog.SetItem(item, columnNames["Time"], strftime("%H:%M:%S", dt))
self.schedLog.SetItem(item, columnNames["Date"], strftime("%d-%m-%Y", dt))
for k, v in message.items():
try:
self.schedLog.SetItem(item, columnNames[k], v)
except Exception as e:
pass
def AppendToSelectedScheduleItem(self, name, value):
if not self.scheduleSelection.IsOk():
return
self.SaveStateToUndoStack()
self.ClearRedoStack()
value = name + DELIMITER + value
newItem = self.schedList.AppendItem(self.scheduleSelection, value)
self.schedList.SetItemImage(newItem, self.imageListIndex(name))
# reflect changes in application data
idx = self.schedList.GetItemIndex(newItem)
groupSel = self.groupList.GetSelection()
n = 0
item = self.schedList.GetFirstItem()
while item != newItem and item.IsOk():
n += 1
item = self.schedList.GetNextItem(item)
self._data[groupSel]["schedules"].insert(n, (idx, {'columns': {"0": value},
'expanded': False,
'selected': True,
'checked': 1}))
self.schedList.Select(newItem)
self.schedList.CheckItem(newItem)
self.schedList.Expand(newItem)
self.schedList.SetFocus()
def AppendSchedules(self):
if self._clipboard["toplevel"] is False:
return
self.SaveStateToUndoStack()
self.ClearRedoStack()
clip = self._clipboard
schedules = clip["schedules"]
schedules = self.GetUniqueSchedules(schedules)
self.schedList.AppendSubTree(self.schedList.GetRootItem(), schedules)
index = self.GetGroupListIndex(self.groupSelection)
self._data[index]["schedules"] = self.GetScheduleTree()
self.UpdateScheduleImageList()
def CancelPowerAlerts(self):
for d in self._powerDialog:
d.Close()
self._powerDialog = []
def ClearRecentFiles(self):
for item in self._fileListMenuItems.values():
self.menuFile.Delete(item)
self._fileListMenuItems = {}
self._fileList = []
def ClearRedoStack(self):
self._redoStack = []
self.UpdateToolbar()
def ClearUI(self):
""" clears lists and set toolbar/button states appropriately """
self.groupList.DeleteAllItems()
self.schedList.DeleteAllItems()
self._data = {}
self.commandState = 0
self._undoStack = []
self._redoStack = []
self._appConfig["currentFile"] = False
self.UpdateScheduleToolbar()
self.UpdateGroupToolbar()
self.UpdateToolbar()
self.UpdateTitlebar()
def CloseFile(self):
self.Raise()
self.Restore()
self.Show()
if self._commandState != 0:
dlg = wx.MessageDialog(self,
message="Save file before closing?",
caption="Close File",
style=wx.YES_NO|wx.CANCEL|wx.CANCEL_DEFAULT)
ret = dlg.ShowModal()
if ret == wx.ID_CANCEL:
return wx.ID_CANCEL
if ret == wx.ID_YES:
self.SaveData()
if self._appConfig["loadLastFile"] is False:
self._appConfig["currentFile"] = False # clear
if self._appConfig["keepFileList"] is False:
self._appConfig["fileList"] = []
else:
self._appConfig["fileList"] = self._fileList
self.SaveDataToJSON(self.configPath, self._appConfig)
self.ClearUI()
def CopySelection(self):
if self._currentSelectionType == "group":
index = self.GetGroupListIndex(self.groupSelection)
self._clipboard = {"origin": "group",
"type": "copy",
"schedules": self._data[index]["schedules"],
"name": self.groupList.GetItemText(self.groupSelection)}
self._clipboard["toplevel"] = True
elif self._currentSelectionType == "schedule":
self._clipboard = {"origin": "schedule",
"type": "copy",
"schedules": self.schedList.GetSubTree(self.scheduleSelection),
"name": self.groupList.GetItemText(self.scheduleSelection)}
if self.schedList.IsTopLevel(self.scheduleSelection):
self._clipboard["toplevel"] = True
else:
self._clipboard["toplevel"] = False
self.UpdateToolbar()
def CreateImageList(self):
labels = ["group", "schedule", "groupchecked"]
labels.extend(FUNCTIONS)
for label in labels:
img = wx.Image(self.imagePath + label.lower() + ".png")
img.Rescale(32, 32, wx.IMAGE_QUALITY_HIGH)
bmp = wx.Bitmap(img)
self.imageList.Add(bmp)
self.schedImageList.Add(bmp)
self._imageListRef.append(label.lower())
def CreateMenu(self):
menubar = wx.MenuBar()
menuFile = wx.Menu()
fileMenus = [
("New", "Ctrl+N", "New Schedule File", wx.ID_NEW),
("Open...", "Ctrl+O", "Open Schedule File", wx.ID_OPEN),
("Save", "Ctrl+S", "Save Schedule File", wx.ID_SAVE),
("Save As...", "Ctrl+Shift+S", "Save Schedule File As...", wx.ID_SAVEAS),
("Close File", "Ctrl+W", "Close Schedule File", wx.ID_CLOSE),
# ("Import", "Ctrl+I", "Import Schedule File", wx.ID_CDROM), # because no import id
("Settings", "Alt+P", "Open Settings...", wx.ID_PREFERENCES),
("Exit", "Ctrl+Q", "Exit Program", wx.ID_EXIT)]
for item, accelHint, helpStr, wxId in fileMenus:
self._menus[item] = menuFile.Append(wxId, item + "\t" + accelHint, helpStr)
self.Bind(wx.EVT_MENU, self.OnMenu, self._menus[item])
if item == "Close File":
menuFile.AppendSeparator()
elif item == "Settings":
menuFile.AppendSeparator()
self.menuFile = menuFile
menuRun = wx.Menu()
runMenus = [("Enable Schedule Manager", "Enable Schedule Manager", wx.ID_EXECUTE),
("Disable Schedule Manager", "Disable Schedule Manager", wx.ID_STOP)]
for item, helpStr, wxId in runMenus:
self._menus[item] = menuRun.Append(wxId, item, helpStr)
self.Bind(wx.EVT_MENU, self.OnMenu, self._menus[item])
self._menus["Disable Schedule Manager"].Enable(False)
menuHelp = wx.Menu()
helpMenus = [("User Guide\tCtrl+H", "Open User Guide", wx.ID_HELP),
("Check for updates", "Check for updates", wx.ID_SETUP),
("About\tCtrl+F1", "Import Images From Folder", wx.ID_ABOUT)]
for item, helpStr, wxId in helpMenus:
self._menus[item] = menuHelp.Append(wxId, item, helpStr)
self.Bind(wx.EVT_MENU, self.OnMenu, self._menus[item])
menubar.Append(menuFile, "&File")
menubar.Append(menuRun, "&Run")
menubar.Append(menuHelp, "&Help")
self.menubar = menubar
self.SetMenuBar(menubar)
def CreateToolbar(self):
self._tools = {}
toolbar = wx.ToolBar(self, style=wx.TB_FLAT)
toolSize = int(self._appConfig["toolbarSize"]), int(self._appConfig["toolbarSize"])
toolbar.SetToolBitmapSize(toolSize)
for label, help, state, wxId in [
("New", "New", True, wx.ID_NEW),
("Open", "Open", True, wx.ID_OPEN),
("Save", "Save", True, wx.ID_SAVE),
("Save As...", "Save As...", True, wx.ID_SAVEAS),
("Close", "Close", True, wx.ID_CLOSE),
# ("Import", "Import", True, None),
("Add Group", "Add Group", True, None),
("Remove Group", "Remove Selected Group", False, None),
("Cut", "Cut", False, wx.ID_CUT),
("Copy", "Copy", False, wx.ID_COPY),
("Paste", "Paste", False, wx.ID_PASTE),
("Undo", "Undo", False, wx.ID_UNDO),
("Redo", "Redo", False, wx.ID_REDO),
("Enable Schedule Manager", "Enable Schedule Manager", True, wx.ID_EXECUTE),
("Settings", "Settings", True, wx.ID_PREFERENCES)]:
if wxId is None:
wxId = self.ids(label)
bmp = self._toolbarBitmaps[toolSize[0]][label]
tool = toolbar.AddTool(wxId, label=label, bitmap=bmp, shortHelp=help)
self.Bind(wx.EVT_TOOL, self.OnMenu, tool)
self._tools[label] = tool
tool.Enable(state)
if label == "Close":
toolbar.AddSeparator()
elif label == "Paste":
toolbar.AddSeparator()
elif label == "Remove Group":
toolbar.AddSeparator()
elif label == "Redo":
toolbar.AddSeparator()
elif label == "Enable Schedule Manager":
toolbar.AddStretchableSpace()
toolbar.Realize()
self.toolbar = toolbar
self.SetToolBar(toolbar)
def CreateToolbarBitmaps(self):
for label in ["New", "Open", "Save", "Save As...", "Close",
"Import", "Add Group", "Remove Group",
"Remove Group", "Cut", "Copy", "Paste",
"Undo", "Redo", "Enable Schedule Manager",
"Disable Schedule Manager", "Settings"]:
for size in [16, 32, 48, 64, 128, 256]:
# print(label)
img = wx.Image("icons/{0}.png".format(label.lower().replace(" ", "").replace(".", "")))
img.Rescale(size, size, wx.IMAGE_QUALITY_HIGH)
bmp = wx.Bitmap(img)
try:
self._toolbarBitmaps[size]
except Exception as e:
self._toolbarBitmaps[size] = {}
self._toolbarBitmaps[size][label] = bmp
def CreateTrayIcon(self):
if self.taskBarIcon:
self.taskBarIcon.RemoveTray()
self.taskBarIcon = TaskBarIcon(self)
def CreateUI(self):
self.splitter = wx.SplitterWindow(self)
leftPanel = wx.Panel(self.splitter)
# leftPanel.SetBackgroundColour("DARKGREY")
leftSizer = wx.BoxSizer(wx.VERTICAL)
hSizerGroup = wx.WrapSizer(wx.HORIZONTAL)
self.groupBtns = {}
for label in ["Add Group", "Up", "Down", "Edit", "Toggle", "Delete"]:
wxId = self.ids("group_" + label)
btn = wx.Button(leftPanel, wxId, label, name=label, style=wx.BU_EXACTFIT|wx.BU_NOTEXT)
if label != "Add Group":
btn.Disable()
self.groupBtns[label] = btn
img = wx.Image("icons/{0}.png".format(label.lower().replace(" ", "")))
img = img.Rescale(24, 24, wx.IMAGE_QUALITY_HIGH)
bmp = wx.Bitmap(img)
if label == "Edit":
btn.Bind(wx.EVT_BUTTON, self.OnGroupItemEdit)
else:
btn.Bind(wx.EVT_BUTTON, self.OnGroupToolBar)
btn.SetBitmap(bmp)
tooltip = wx.ToolTip(label)
btn.SetToolTip(tooltip)
hSizerGroup.Add(btn, 0, wx.ALL|wx.EXPAND, 2)
leftSizer.Add(hSizerGroup, 0, wx.ALL|wx.EXPAND, 5)
self.groupList = base.TreeListCtrl(leftPanel)
self.groupList.AssignImageList(self.imageList)
self.groupList.Bind(wx.EVT_CHAR, self.OnGroupChar)
self.groupList.Bind(wx.dataview.EVT_TREELIST_SELECTION_CHANGED, self.OnGroupItemSelectionChanged)
self.groupList.Bind(wx.dataview.EVT_TREELIST_ITEM_CONTEXT_MENU, self.OnGroupContextMenu)
self.groupList.Bind(wx.dataview.EVT_TREELIST_ITEM_CHECKED, self.OnGroupItemChecked)
self.groupList.Bind(wx.dataview.EVT_TREELIST_ITEM_ACTIVATED, self.OnGroupItemEdit)
self.groupList.AppendColumn("Group")
self.groupListRoot = self.groupList.GetRootItem()
leftSizer.Add(self.groupList, 1, wx.ALL|wx.EXPAND, 5)
leftPanel.SetSizer(leftSizer)
# ----- rhs layout -----
nbPanel = wx.Panel(self.splitter)
self.notebook = wx.Notebook(nbPanel)
nbSizer = wx.BoxSizer(wx.VERTICAL)
nbSizer.Add(self.notebook, 1, wx.ALL|wx.EXPAND, 2)
# the schedule panel/tab page
schedPanel = wx.Panel(self.notebook)
schedSizer = wx.BoxSizer(wx.VERTICAL)
# -----
hSizerFunctions = wx.WrapSizer(wx.HORIZONTAL)
self.schedBtns = {}
for label in ["Add Schedule", "Up", "Down", "Edit", "Toggle", "Delete"]:
wxId = self.ids("schedule_" + label)
btn = wx.Button(schedPanel, wxId, label, name=label, style=wx.BU_EXACTFIT|wx.BU_NOTEXT)
btn.Disable()
self.schedBtns[label] = btn
img = wx.Image("icons/{0}.png".format(label.lower().replace(" ", "")))
img = img.Rescale(32, 32, wx.IMAGE_QUALITY_HIGH)
bmp = wx.Bitmap(img)
if label == "Edit":
btn.Bind(wx.EVT_BUTTON, self.OnScheduleItemEdit)
else:
btn.Bind(wx.EVT_BUTTON, self.OnScheduleToolBar)
if label in ["Delete"]:
hSizerFunctions.AddStretchSpacer()
btn.SetBitmap(bmp)
tooltip = wx.ToolTip(label)
btn.SetToolTip(tooltip)
hSizerFunctions.Add(btn, 0, wx.ALL|wx.EXPAND, 2)
schedSizer.Add(hSizerFunctions, 0, wx.ALL|wx.EXPAND, 2)
schedSizer.Add(wx.StaticLine(schedPanel), 0, wx.ALL|wx.EXPAND, 2)
# schedPanel.SetBackgroundColour("lightgray")
# -----
hSizerFunctions2 = wx.BoxSizer(wx.HORIZONTAL)
self.cboxFunctions = wx.ComboBox(schedPanel, style=wx.CB_READONLY, choices=FUNCTIONS, size=(-1, -1))
self.cboxFunctions.SetSelection(0)
self.cboxFunctions.Disable()
self.btnAddFunction = wx.Button(schedPanel, label="Add Action", name="Add Action", size=(-1, -1))
img = wx.Image("icons/add.png")
img = img.Rescale(24, 24, wx.IMAGE_QUALITY_HIGH)
bmp = wx.Bitmap(img)
self.btnAddFunction.SetBitmap(bmp)
self.btnAddFunction.Bind(wx.EVT_BUTTON, self.OnScheduleToolBar)
self.btnAddFunction.Disable()
hSizerFunctions2.Add(self.cboxFunctions, 0, wx.ALL|wx.CENTRE, 5)
hSizerFunctions2.Add(self.btnAddFunction, 0, wx.ALL|wx.CENTRE, 5)
schedSizer.Add(hSizerFunctions2, 0, wx.ALL, 0)
# -----
self.splitter2 = wx.SplitterWindow(schedPanel)
schedListPanel = wx.Panel(self.splitter2)
schedListSizer = wx.BoxSizer(wx.HORIZONTAL)
self.schedList = base.TreeListCtrl(schedListPanel, style=wx.dataview.TL_CHECKBOX)
schedListSizer.Add(self.schedList, 1, wx.ALL|wx.EXPAND, 0)
schedListPanel.SetSizer(schedListSizer)
self.schedList.AssignImageList(self.schedImageList)
self.schedList.Bind(wx.EVT_CHAR, self.OnScheduleChar)
self.schedList.Bind(wx.dataview.EVT_TREELIST_ITEM_CONTEXT_MENU, self.OnScheduleContextMenu)
self.schedList.Bind(wx.dataview.EVT_TREELIST_ITEM_ACTIVATED, self.OnScheduleTreeActivated)
self.schedList.Bind(wx.dataview.EVT_TREELIST_SELECTION_CHANGED, self.OnScheduleTreeSelectionChanged)
self.schedList.Bind(wx.dataview.EVT_TREELIST_ITEM_CHECKED, self.OnScheduleTreeItemChecked)
infoPanel = wx.Panel(self.splitter2)
infoPanelSizer = wx.BoxSizer(wx.HORIZONTAL)
self.infoSchedButton = wx.Button(infoPanel, style=wx.BU_EXACTFIT|wx.BU_NOTEXT)
self.infoSchedButton.Bind(wx.EVT_BUTTON, self.OnScheduleItemEdit)
self.infoSchedButton.SetBitmap(wx.Bitmap())
self.infoSched = wx.TextCtrl(infoPanel, style=wx.TE_MULTILINE|wx.TE_READONLY|wx.TE_RICH|wx.BORDER_NONE)
self.infoSched.SetFont(self.infoSchedFont)
self.infoSched.SetBackgroundColour(wx.Colour(60, 60, 60))
self.infoSched.SetForegroundColour(wx.Colour(250, 250, 250))
infoPanel.SetBackgroundColour(wx.Colour(60, 60, 60))
self.infoSchedButton.Hide()
infoPanelSizer.Add(self.infoSchedButton, 1, wx.ALL|wx.EXPAND, 0)
infoPanelSizer.Add(self.infoSched, 4, wx.ALL|wx.EXPAND, 5)
infoPanel.SetSizer(infoPanelSizer)
self.infoPanelSizer = infoPanelSizer
self.splitter2.SplitHorizontally(schedListPanel, infoPanel)
self.splitter2.SetSashGravity(0.8)
schedSizer.Add(self.splitter2, 1, wx.ALL|wx.EXPAND, 0)
schedPanel.SetSizer(schedSizer)
self.schedList.SetForegroundColour(wx.Colour(30, 30, 30))
self.groupList.SetForegroundColour(wx.Colour(30, 30, 30))
# the schedule manager panel/tab page
schedManagerPanel = wx.Panel(self.notebook)
schedManagerSizer = wx.BoxSizer(wx.VERTICAL)
schedManagerPanel.SetSizer(schedManagerSizer)
schedManagerHsizer = wx.BoxSizer(wx.HORIZONTAL)
self.schedManagerBtns = {}
for label in ["Clear"]:
if label == "Clear":
schedManagerHsizer.AddStretchSpacer()
btn = wx.Button(schedManagerPanel, label=label, name=label, size=(-1, -1),
style=wx.BU_EXACTFIT|wx.BU_NOTEXT)
self.schedBtns[label] = btn
img = wx.Image("icons/{0}.png".format(label.lower().replace(" ", "")))
img = img.Rescale(32, 32, wx.IMAGE_QUALITY_HIGH)
bmp = wx.Bitmap(img)
btn.SetBitmap(bmp)
btn.Bind(wx.EVT_BUTTON, self.OnScheduleManagerToolbar)
schedManagerHsizer.Add(btn, 0, wx.ALL, 5)
self.schedManagerBtns[label] = btn
tooltip = wx.ToolTip(label)
btn.SetToolTip(tooltip)
schedManagerSizer.Add(schedManagerHsizer, 0, wx.ALL|wx.EXPAND, 0)
self.schedLog = base.BaseList(schedManagerPanel)
self.schedLog.Bind(wx.EVT_RIGHT_UP, self.OnScheduleManagerContextMenu)
self.schedLog.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnScheduleManagerContextMenu)
self.schedLog.InsertColumn(0, "Group")
self.schedLog.InsertColumn(1, "Schedule")
self.schedLog.InsertColumn(2, "Message")
self.schedLog.InsertColumn(4, "Time")
self.schedLog.InsertColumn(5, "Date")
self.schedLog.InsertColumn(6, "#")
self.schedLog.setResizeColumn(3)
schedManagerSizer.Add(self.schedLog, 1, wx.ALL|wx.EXPAND, 5)
self.notebook.AddPage(schedPanel, "Schedules")
self.notebook.AddPage(schedManagerPanel, "Manager")
nbPanel.SetSizer(nbSizer)
self.splitter.SplitVertically(leftPanel, nbPanel)
self.splitter.SetSashGravity(0.2)
self.schedList.AppendColumn("Schedule")
def CutSelection(self):
if self._currentSelectionType == "group":
if not self.groupSelection.IsOk():
return
self.SaveStateToUndoStack()
index = self.GetGroupListIndex(self.groupSelection)
self._clipboard = {"origin": "group",
"type": "cut",
"schedules": self._data[index]["schedules"],
"name": self.groupList.GetItemText(self.groupSelection),
"toplevel": True}
next = self.groupList.GetNextSibling(self.groupSelection)
self.groupList.DeleteItem(self.groupSelection)
self.schedList.DeleteAllItems()
del self._data[index]
if next.IsOk():
self.groupList.Select(next)
elif self._currentSelectionType == "schedule":
if not self.scheduleSelection.IsOk():
return
self.SaveStateToUndoStack()
index = self.GetGroupListIndex(self.groupSelection)
nextItem = self.groupList.GetNextSibling(self.groupSelection)
self._clipboard = {"origin": "schedule",
"type": "cut",
"schedules": self.schedList.GetSubTree(self.scheduleSelection),
"name": self.groupList.GetItemText(self.scheduleSelection)}
if self.schedList.IsTopLevel(self.scheduleSelection):
self._clipboard["toplevel"] = True
else:
self._clipboard["toplevel"] = False
self.schedList.DeleteItem(self.scheduleSelection)
self._data[index]["schedules"] = self.GetScheduleTree()
if nextItem.IsOk():
self.groupList.Select(nextItem)
self.UpdateScheduleInfo()
self.UpdateToolbar()
def DeleteGroupItem(self):
if not self.groupSelection.IsOk():
return
groupIdx = self.GetGroupListIndex(self.groupSelection)
next = self.groupList.GetNextSibling(self.groupSelection)
self.SaveStateToUndoStack()
self.ClearRedoStack()
self.schedList.DeleteAllItems()
self.groupList.DeleteItem(self.groupSelection)
del self._data[groupIdx]
if next.IsOk():
self.groupList.Select(next)
self.UpdateGroupToolbar()
self.UpdateScheduleToolbar()
self.UpdateScheduleInfo()
def DeleteScheduleItem(self):
if not self.scheduleSelection.IsOk():
return
self.SaveStateToUndoStack()
next = self.schedList.GetNextSibling(self.scheduleSelection)
self.schedList.DeleteItem(self.scheduleSelection)
self.UpdateScheduleToolbar()
self.SaveScheduleTreeToData()
self.ClearRedoStack()
if next.IsOk():
self.schedList.Select(next)
self.UpdateScheduleToolbar()
self.UpdateScheduleInfo()
def DisableScheduleManager(self):
# Enable/Disable menu item accordingly
self._menus["Disable Schedule Manager"].Enable(False)
self._menus["Enable Schedule Manager"].Enable(True)
self._tools["Enable Schedule Manager"].SetLabel("Enable Schedule Manager")
self._tools["Enable Schedule Manager"].SetShortHelp("Enable Schedule Manager")
width, height = self.toolbar.GetToolBitmapSize()
img = wx.Image("icons/enableschedulemanager.png")
img = img.Rescale(width, height, wx.IMAGE_QUALITY_HIGH)
bmp = self._toolbarBitmaps[width]["Enable Schedule Manager"]
self._tools["Enable Schedule Manager"].SetNormalBitmap(bmp)
wx.CallAfter(self.toolbar.Realize)
self._schedManager.Stop()
if self.taskBarIcon:
self.taskBarIcon.SetTrayIcon(running=False)
def DoRedo(self):
if self._redoStack == []:
return
state = self._redoStack[-1]
self._undoStack.append(self.GetCommandState())
del self._redoStack[-1]
self.RestoreState(state)
self.commandState += 1
self.UpdateScheduleInfo()
self.UpdateGroupImageList()
def DoUndo(self):
if self._undoStack == []:
return
state = self._undoStack[-1]
self._redoStack.append(self.GetCommandState())
del self._undoStack[-1]
self.RestoreState(state)
self.commandState -= 1
self.UpdateScheduleInfo()
self.UpdateGroupImageList()
def EditSelectedScheduleItem(self, name, value):
selection = self.schedList.GetSelection()
if not selection.IsOk():
return
self.SaveStateToUndoStack()
self.ClearRedoStack()
value = name + DELIMITER + value
self.schedList.SetItemText(selection, 0, value)
self.UpdateSelectedItemInData()
self.UpdateScheduleInfo()
self.schedList.SetFocus()
def EnableScheduleManager(self):
self.CancelPowerAlerts()
# Enable/Disable menu item accordingly
self._menus["Disable Schedule Manager"].Enable(True)
self._menus["Enable Schedule Manager"].Enable(False)
self._tools["Enable Schedule Manager"].SetLabel("Disable Schedule Manager")
self._tools["Enable Schedule Manager"].SetShortHelp("Disable Schedule Manager")
width, height = self.toolbar.GetToolBitmapSize()
bmp = self._toolbarBitmaps[width]["Disable Schedule Manager"]
self._tools["Enable Schedule Manager"].SetNormalBitmap(bmp)
sendData = {}
for item, data in self._data.items():
if self.groupList.GetCheckedState(item) == 0:
continue
itemText = self.groupList.GetItemText(item)
sendData[itemText] = data["schedules"]
self.toolbar.Realize()
if not sendData:
self.DisableScheduleManager()
return
self._schedManager.SetSchedules(sendData)
self._schedManager.Start()
# switch to the manager when schedules are started
if self._appConfig["schedManagerSwitchTab"] is True:
self.notebook.SetSelection(1)
if self.taskBarIcon:
self.taskBarIcon.SetTrayIcon(running=True)
def ExtractContentsFromSchedules(self, schedules):
""""""
result = {}
for (index, itemData) in schedules:
if "," not in index:
n = int(index)
result[n] = []
continue
newIndex = ",".join(index.split(",")[1:])
result[n].append((newIndex, itemData))
return result
def GetAppConfig(self):
return self._appConfig
def GetCommandState(self):
""" get state for undo/redo operations """
state = {"data": self.GetDataForJSON(),
"groupSel": self.groupList.GetIndexByOrder(self.groupList.GetSelection()),
"schedSel": self.schedList.GetIndexByOrder(self.schedList.GetSelection()),
"currentSelectionType": self._currentSelectionType}
return deepcopy(state)
def GetDialog(self, label, value=None, appendResult=False):
"""
Returns a dialog which matches the given label i.e. action.
value: if editing an action, we set the dialog to the current values
appendResult: this is passed to dialogs, as a workaround, where we can't EndModal
correctly so we tell the dialog to append the result or edit the
selected action
"""
if label == "CloseWindow":
dlg = dialogs.window.WindowDialog(self, title="Close Window")
elif label == "Control":
dlg = dialogs.control.AddControl(self)
elif label == "Delay":
dlg = dialogs.delay.AddDelay(self)
elif label == "IfWindowNotOpen":
dlg = dialogs.window.WindowDialog(self, title="If Window Open")
elif label == "IfWindowOpen":
dlg = dialogs.window.WindowDialog(self, title="If Window Not Open")
elif label == "KillProcess":
dlg = dialogs.window.WindowDialog(self, title="Kill Process")
elif label == "SwitchWindow":
dlg = dialogs.window.WindowDialog(self, title="Switch Window")
elif label == "MouseClickAbsolute":
dlg = dialogs.mouseabsolute.MouseClickAbsolute(self, appendResult=appendResult)
elif label == "MouseClickRelative":
dlg = dialogs.mouserelative.MouseClickRelative(self, appendResult=appendResult)
elif label == "NewProcess":
dlg = dialogs.process.NewProcess(self)
dlg.SetHistoryList(self._appConfig["newProcessPresets"])
elif label == "OpenURL":
dlg = dialogs.browser.OpenURL(self)
dlg.SetBrowserPresets(self._appConfig["browserPresets"])
dlg.SetUrlPresets(self._appConfig["openUrlPresets"])
elif label == "Power":
dlg = dialogs.power.AddPower(self)
elif label == "StartSchedule":
dlg = dialogs.schedule.StartSchedule(self)
dlg.SetScheduleNames(self.GetScheduleNames())
elif label == "StopSchedule":
dlg = dialogs.schedule.StopSchedule(self)
dlg.SetScheduleNames(self.GetScheduleNames())
if value:
dlg.SetValue(value)
return dlg
def GetDataForJSON(self):
""" convert data for json dump """
n = 0
jsonData = {idx: idxData for idx, idxData in self.GetGroupTree()}
child = self.groupList.GetFirstItem()
while child.IsOk():
childText = self.groupList.GetItemText(child)
for item, itemData in self._data.items():
if self.groupList.GetItemText(item) != childText:
continue
break
jsonData[str(n)]["schedules"] = itemData["schedules"]
jsonData[str(n)]["checked"] = itemData["checked"]
n += 1
child = self.groupList.GetNextSibling(child)
jsonData["__version__"] = __version__
return jsonData
def GetGroupListIndex(self, item):
""" only way to finding existing item in self._data by comparing TreeListItem """
for dataItem in self._data.keys():
if dataItem == item:
return dataItem
return -1
def GetGroupNames(self):
""" return ordered list of group names """
groupNames = []
child = self.groupList.GetFirstItem()
while child.IsOk():
groupNames.append(self.groupList.GetItemText(child, col=0))
child = self.groupList.GetNextSibling(child)
return groupNames
def GetBitmapFromImage(self, name, size=None):
img = wx.Image(self.imagePath + name.lower().replace(" ", "") + ".png")
if size:
w, h = size
img = img.Rescale(w, h, wx.IMAGE_QUALITY_HIGH)
bmp = wx.Bitmap(img)
return bmp
def GetGroupTree(self):
""" retrieve tree structure, used for saving data """
data = self.groupList.GetTree()
return data
def GetScheduleNames(self):
""" return toplevel items"""
schedules = []
item = self.schedList.GetFirstItem()
while item.IsOk():
schedules.append(self.schedList.GetItemText(item, 0).split(DELIMITER)[0])
item = self.schedList.GetNextSibling(item)
return schedules
def GetScheduleTree(self):
""" retrieve tree structure, used for saving data """
data = self.schedList.GetTree()
return data
def GetUniqueSchedules(self, schedules):
""" rename names in tree structure if schedule already exists """
schedules = deepcopy(schedules)
result = []
schedNames = self.GetScheduleNames()
for (index, itemData) in schedules:
if "," not in index:
name, data = itemData["columns"]["0"].split(DELIMITER)
n = 1
newName = name
while newName in schedNames:
newName = name + "_%d" % n
n += 1
schedNames.append(newName)
itemData["columns"]["0"] = newName + DELIMITER + data
result.append((index, itemData))
continue
result.append((index, itemData))
return result
def LoadConfig(self):
""" load application config and restore config settings """
try:
with open(self.configPath, 'r') as file:
self._appConfig.update(json.load(file))
except FileNotFoundError:
with open(self.configPath, 'w') as file:
json.dump(self._appConfig, file, sort_keys=True, indent=2)
except json.JSONDecodeError:
with open(self.configPath, 'w') as file:
json.dump(self._appConfig, file, sort_keys=True, indent=2)
file.close()
self.SetRecentFiles()
if self._appConfig["loadLastFile"] is True:
if os.path.exists(self._appConfig["currentFile"]):
self.LoadFile(self._appConfig["currentFile"])
else:
self._appConfig["currentFile"] = False
if self._appConfig["showSplashScreen"] is True:
SplashScreen(800)
# set position of window providing that the users monitor geometry
# (albeit with some arbitrary leeway) contains the last mouse position
try:
x, y = [int(v) for v in make_tuple(self._appConfig["windowPos"])]
displays = (wx.Display(i) for i in range(wx.Display.GetCount()))
for display in displays:
x1, y1, w, h = display.GetGeometry()
if x in range(x1 - 200, x1 + w) and y in range(y1 - 200, y1 + h):
self.SetPosition((x, y))
break
continue
x, y = [int(v) for v in make_tuple(self._appConfig["windowSize"])]
self.SetSize((x, y))
except Exception as e:
print(e)
self.UpdateTrayIcon()
self.UpdateToolbarSize()
self.UpdateTitlebar()
# wx.CallLater(1000, self.Show)
self.Show()
self.Raise()
if self._appConfig["firstStart"]:
self.ShowUserGuide()
self._appConfig["firstStart"] = False
def LoadFile(self, filePath):
""" load a schedule file by file path """
if filePath:
try:
with open(filePath, 'r') as file:
fileData = json.load(file)
self.SetGroupTree(fileData)
self.schedList.DeleteAllItems()
self._appConfig["currentFile"] = filePath
self.SaveDataToJSON(self.configPath, self._appConfig)
self.UpdateTitlebar()
except FileNotFoundError:
logging.error("{0}".format(FileNotFoundError))
return
except json.JSONDecodeError:
# TODO: raise corrupt/invalid file error
logging.error("{0}".format(json.JSONDecodeError))
return
self.UpdateRecentFiles(filePath)
def MoveGroupItemDown(self):
# valid item selection?
selection = self.groupList.GetSelection()
if not selection.IsOk():
return
# can item be moved down?
next = self.groupList.GetNextSibling(selection)
assert next.IsOk(), "Next item is not valid"
self.SaveStateToUndoStack()
idxText = self.groupList.GetItemText(selection)
checkState = self.groupList.GetCheckedState(selection)
idxData = self._data[selection]
newItem = self.groupList.InsertItem(self.groupListRoot, next, idxText)
self.groupList.DeleteItem(selection)
self.groupList.CheckItem(newItem, checkState)
self._data[newItem] = idxData
del self._data[selection]
self.groupList.Select(newItem)
self.UpdateGroupImageList()
self.SaveScheduleTreeToData()
self.ClearRedoStack()
self.UpdateGroupToolbar()
def MoveGroupItemUp(self):
# valid item selection?
selection = self.groupSelection
if not selection.IsOk():
return
# can previous item be moved down?
previous = self.schedList.GetPreviousSibling(selection)
assert previous.IsOk() is True, "Previous item is not valid"
self.SaveStateToUndoStack()
idxText = self.groupList.GetItemText(previous)
checkState = self.groupList.GetCheckedState(previous)
idxData = self._data[previous]
newItem = self.groupList.InsertItem(self.groupListRoot, selection, idxText)
self.groupList.DeleteItem(previous)
self.groupList.CheckItem(newItem, checkState)
self._data[newItem] = idxData
del self._data[previous]
self.SaveScheduleTreeToData()
self.ClearRedoStack()
self.UpdateGroupImageList()
self.UpdateGroupToolbar()
def MoveScheduleItemDown(self):
# valid item selection?
selection = self.scheduleSelection
if not selection.IsOk():
return
# can item be moved down?
next = self.schedList.GetNextSibling(selection)
assert next.IsOk(), "Next item is not valid"
self.SaveStateToUndoStack()
baseIdx = self.schedList.GetItemIndex(selection)
subTree = self.schedList.GetSubTree(selection)
self.schedList.InsertSubTree(next, subTree)
self.schedList.DeleteItem(selection)
# need to reflect these changes in self._data
groupSel = self.GetGroupListIndex(self.groupSelection)
groupScheds = self._data[groupSel]["schedules"]
baseIdxSplitLen = len(baseIdx.split(",")) - 1
nextBaseIdx = baseIdx.split(",")
nextBaseIdx[-1] = str(int(nextBaseIdx[-1]) + 1)
nextBaseIdx = ",".join(nextBaseIdx)
idxDecr = []
idxIncr = []
for n, (idx, idxData) in enumerate(groupScheds):
if idx.startswith(baseIdx):
idxSplit = idx.split(",")
idxSplit[baseIdxSplitLen] = str(int(idxSplit[baseIdxSplitLen]) + 1)
idx = ",".join(idxSplit)
groupScheds[n] = (idx, idxData)
idxIncr.append(n)
elif idx.startswith(nextBaseIdx):
idxSplit = idx.split(",")
idxSplit[baseIdxSplitLen] = str(int(idxSplit[baseIdxSplitLen]) - 1)
idx = ",".join(idxSplit)
groupScheds[n] = (idx, idxData)
idxDecr.append(n)
newScheds = groupScheds[:idxIncr[0]]
for x in idxDecr:
newScheds.append(groupScheds[x])
for x in idxIncr:
newScheds.append(groupScheds[x])
newScheds += groupScheds[idxDecr[-1] + 1:]
self._data[groupSel]["schedules"] = newScheds
self.schedList.Select(self.schedList.GetNextSibling(next))
self.UpdateScheduleToolbar()
self.UpdateScheduleImageList()
self.SaveScheduleTreeToData()
self.ClearRedoStack()
def MoveScheduleItemUp(self):
""" move item up by moving the previous item down """
# valid item selection?
selection = self.scheduleSelection
if not selection.IsOk():
return
baseIdx = self.schedList.GetItemIndex(selection)
previous = self.schedList.GetPreviousSibling(selection)
assert previous.IsOk() is True, "Previous item is not valid"
self.SaveStateToUndoStack()
prevSubTree = self.schedList.GetSubTree(previous)
self.schedList.InsertSubTree(selection, prevSubTree)
self.schedList.DeleteItem(previous)
# need to reflect these changes in self._data
groupSel = self.GetGroupListIndex(self.groupSelection)
groupScheds = self._data[groupSel]["schedules"]
baseIdxSplitLen = len(baseIdx.split(",")) - 1
prevBaseIdx = baseIdx.split(",")
prevBaseIdx[-1] = str(int(prevBaseIdx[-1]) - 1)
prevBaseIdx = ",".join(prevBaseIdx)
idxDecr = []
idxIncr = []
for n, (idx, idxData) in enumerate(groupScheds):
if idx.startswith(baseIdx):
idxSplit = idx.split(",")
idxSplit[baseIdxSplitLen] = str(int(idxSplit[baseIdxSplitLen]) - 1)
idx = ",".join(idxSplit)
groupScheds[n] = (idx, idxData)
idxDecr.append(n)
elif idx.startswith(prevBaseIdx):
idxSplit = idx.split(",")
idxSplit[baseIdxSplitLen] = str(int(idxSplit[baseIdxSplitLen]) + 1)
idx = ",".join(idxSplit)
groupScheds[n] = (idx, idxData)
idxIncr.append(n)
# print(idxDecr, idxIncr)
newScheds = groupScheds[:idxIncr[0]]
for x in idxDecr:
newScheds.append(groupScheds[x])
for x in idxIncr:
newScheds.append(groupScheds[x])
newScheds += groupScheds[idxDecr[-1] + 1:]
self._data[groupSel]["schedules"] = newScheds
self.SaveScheduleTreeToData()
self.ClearRedoStack()
self.UpdateScheduleImageList()
self.UpdateScheduleToolbar()
def OnClose(self, event=None):
"""
on application exit we prompt user to close file and
disable the schedule manager directly
"""
self._appConfig["windowSize"] = str(self.GetSize())
self._appConfig["windowPos"] = str(self.GetPosition())
if self.CloseFile() == wx.ID_CANCEL:
return
self.CancelPowerAlerts()
self._schedManager.Stop()
try:
self.taskBarIcon.RemoveTray()
except Exception as e:
pass
keyboard.unhook_all()
self.Destroy()
def OnAboutDialogClose(self, event):
"""
clear reference to AboutDialog so a new instance
can be opened next time
"""
try:
self._aboutDialog = None
event.Skip()
except Exception as e:
pass
def OnAddAction(self, event=None):
"""Opens the selected action dialog"""
if not self.scheduleSelection.IsOk():
return
index = self.cboxFunctions.GetSelection()
if index == -1:
return
name = self.cboxFunctions.GetStringSelection()
logging.info("OnAddAction event: %s" % name)
logging.debug(index)
dlg = self.GetDialog(name, appendResult=True)
# did user cancel adding action?
if dlg.ShowModal() == wx.ID_CANCEL:
return
self.AppendToSelectedScheduleItem(name, dlg.GetValue())
def OnGroupChar(self, event):
key = event.GetKeyCode()
if key == wx.WXK_DELETE:
self.DeleteGroupItem()
return
if key == 341: # F2
self.OnGroupItemEdit()
return
# important, as the control toggles item
# even if item is not OK...therefore
# causes issues with saving state correctly
if key == wx.WXK_SPACE:
if self.groupSelection.IsOk():
event.Skip()
return
event.Skip()
def OnGroupContextMenu(self, event):
menu = wx.Menu()
subMenu = wx.Menu()
pastes = []
if self._clipboard and self._clipboard["toplevel"] is True:
pastes = ["Paste As New Group"]
if self.groupSelection.IsOk():
pastes.extend(["Paste Before", "Paste After", "Paste Into Group"])
for label in pastes:
item = subMenu.Append(wx.ID_ANY, label)
for label in ["Edit", "Add Group", "", "Up", "Down", "Toggle", "", "Delete"]:
if not label:
menu.AppendSeparator()
continue
if label == "Add Group":
if self.groupSelection.IsOk():
item = menu.Append(wx.ID_ANY, "Cut")
item = menu.Append(wx.ID_ANY, "Copy")
if self._clipboard and pastes != []:
item = menu.AppendSubMenu(subMenu, "Paste")
item = menu.Append(wx.ID_ANY, label)
if not self.groupBtns[label].IsEnabled():
item.Enable(False)
continue
menu.Bind(wx.EVT_MENU, self.OnGroupToolBar)
self.PopupMenu(menu)
def OnGroupItemChecked(self, event):
self.SaveStateToUndoStack()
self.ClearRedoStack()
groupSel = self.GetGroupListIndex(self.groupSelection)
self._data[groupSel]["checked"] = self.groupList.GetCheckedState(self.groupSelection)
self.UpdateGroupImageList()
def OnGroupItemEdit(self, event=None):
selection = self.groupSelection
if not selection.IsOk():
return
groupName = self.groupList.GetItemText(selection, 0)
m = "Group Name:"
# find unique group name
i = 1
b = "group_"
uid = b + str(i)
groupNames = [s for s in self.GetGroupNames() if not s == groupName]
while uid in groupNames:
i += 1
uid = b + str(i)
while True:
dlg = wx.TextEntryDialog(self, message=m, caption="Add Group", value=groupName)
ret = dlg.ShowModal()
if ret == wx.ID_CANCEL:
return
elif dlg.GetValue() in groupNames:
m = "Group Name: ('{0}' already exists)".format(dlg.GetValue())
continue
elif dlg.GetValue() == "":
m = "Group Name: (Name cannot be empty)"
continue
elif not dlg.GetValue().replace("_", "").isalnum():
m = "Group Name: (Name can only contain 0-9, A-Z. Underscores allowed)"
continue
self.SaveStateToUndoStack()
newName = dlg.GetValue()
self.groupList.SetItemText(selection, newName)
self.ClearRedoStack()
return
def OnGroupItemKeyDown(self, event):
key = event.GetKeyCode()
index = self.groupSelection
if key == wx.WXK_SPACE:
self.groupList.CheckItem(index)
def OnGroupItemSelectionChanged(self, event=None):
""" update group buttons and schedule list """
self._currentTreeFocus = "group"
if self._appConfig["groupSelectionSwitchTab"] is True:
self.notebook.SetSelection(0)
self.UpdateGroupToolbar()
self.UpdateToolbar()
self.schedList.DeleteAllItems()
for item, data in self._data.items():
if self.groupSelection != item:
continue
self.enableTool(self._ids["Remove Group"], True)
self.SetScheduleTree(data["schedules"])
self.schedBtns["Add Schedule"].Enable()
break
self.enableTool(self._ids["Remove Group"], False)
self.schedBtns["Add Schedule"].Disable()
self.UpdateScheduleToolbar()
self.UpdateScheduleInfo()
def OnGroupToolBar(self, event):
e = event.GetEventObject()
try:
name = e.GetName()
except AttributeError:
id = event.GetId()
name = e.GetLabel(id)
if name == "Add Group":
self.ShowAddGroupDialog()
elif name == "Copy":
self.CopySelection()
elif name == "Cut":
self.CutSelection()
elif name == "Delete":
self.DeleteGroupItem()
elif name == "Down":
self.MoveGroupItemDown()
elif name == "Edit":
self.OnGroupItemEdit()
elif name == "Paste After":
self.OnGroupListPaste(append=2)
elif name == "Paste As New Group":
self.OnGroupListPaste(append=0)
elif name == "Paste Before":
self.OnGroupListPaste(append=1)
elif name == "Paste Into Group":
self.PasteIntoGroup(append=0)
elif name == "Toggle":
self.ToggleGroupSelection()
elif name == "Up":
self.MoveGroupItemUp()
def OnMenu(self, event):
id = event.GetId()
if id == wx.ID_ABOUT:
self.ShowAboutDialog()
elif id == wx.ID_CLOSE:
self.CloseFile()
elif id == wx.ID_COPY:
self.CopySelection()
elif id == wx.ID_CUT:
self.CutSelection()
elif id == wx.ID_SETUP:
self.ShowCheckForUpdatesDialog()
elif id == wx.ID_EXECUTE:
self.ToggleScheduleManager()
elif id == wx.ID_EXIT:
self.Close()
elif id == wx.ID_CDROM:
self.ShowImportDialog()
elif id == wx.ID_NEW:
self.CloseFile()
elif id == wx.ID_PREFERENCES:
self.ShowSettingsDialog()
elif id == wx.ID_OPEN:
self.OpenFile()
elif id == wx.ID_PASTE:
self.OnPaste()
elif id == wx.ID_REDO:
self.DoRedo()
elif id == wx.ID_SAVE:
self.SaveData()
elif id == wx.ID_SAVEAS:
self.SaveFileAs()
elif id == wx.ID_STOP:
self.DisableScheduleManager()
elif id == wx.ID_UNDO:
self.DoUndo()
elif id == wx.ID_HELP:
self.ShowUserGuide()
elif id == self._ids["Add Group"]:
self.ShowAddGroupDialog()
# elif id == self._ids["Import"]:
# self.ShowImportDialog()
elif id == self._ids["Remove Group"]:
self.DeleteGroupItem()
def OnGroupListPaste(self, append=0):
"""
This handles the hotkey or toolbar press and not
the context menu paste.
By default, we paste clipboard contents into a new group
appended to end of group list.
"""
# we only allow schedules to be pasted onto the group list
if self._clipboard["toplevel"] is False:
self.toolTip.message = ("Actions can only be pasted inside a schedule")
return
name = self._clipboard["name"]
# if we are pasting a schedule into a group, we take
# the schedule's name as new group name
try:
name = name.split(DELIMITER)[0]
except Exception as e:
pass # not a schedule
self.ShowAddGroupDialog(name, "Paste Contents Into New Group", self._clipboard["schedules"], append)
self.UpdateGroupImageList()
def OnPaste(self):
""" user pressed shortcut or toolbar paste button """
if self._clipboard is None:
return
if self._currentSelectionType == "group":
self.OnGroupListPaste()
elif self._currentSelectionType == "schedule":
self.OnScheduleTreePaste()
def OnPowerAction(self, kwargs):
self._powerAction = kwargs
self.DisableScheduleManager()
def OnPowerTimer(self, event):
"""
alert the user with notice of impending power action
with a always on top dialog on each display/monitor
or user specified
the user can cancel this action by pressing cancel button,
which subsequently destroys any other alerts
"""
if self._powerAction and not self._powerDialog:
primary = self._powerAction["primary"]
displays = (wx.Display(i) for i in range(wx.Display.GetCount()))
for display in displays:
if primary is True and not display.IsPrimary():
continue
rectDisplay = display.GetGeometry()
d = dialogs.power.PowerAlertDialog()
d.SetContainingRect(rectDisplay)
d.SetValue(self._powerAction)
self._powerDialog.append(d)
self._powerAction = None
cancelAction = None
for d in self._powerDialog:
if not d.IsShown():
cancelAction = True
if cancelAction:
self.CancelPowerAlerts()
self.AddLogMessage({"Message": "User cancelled power action"})
def OnRecentFile(self, event):
e = event.GetEventObject()
id = event.GetId()
filePath = e.GetLabel(id)
if filePath == self._appConfig["currentFile"]:
logging.info("File already opened")
return
self.CloseFile()
self.LoadFile(filePath)
def OnScheduleChar(self, event):
key = event.GetKeyCode()
if key == wx.WXK_DELETE:
self.DeleteScheduleItem()
return
if key == 341: # F2
self.OnScheduleItemEdit()
return
# important, as the control toggles item
# even if item is not OK...therefore
# causes issues with saving state correctly
if key == wx.WXK_SPACE:
if self.scheduleSelection.IsOk():
event.Skip()
return
event.Skip()
def OnScheduleContextMenu(self, event):
menu = wx.Menu()
self._currentSelectionType = "schedule"
subMenu = wx.Menu()
pastes = []
if self._clipboard and self.scheduleSelection.IsOk():
if self._clipboard["toplevel"] is False and not self.schedList.IsTopLevel(self.scheduleSelection):
if self._clipboard["origin"] == "schedule":
pastes.extend(["Paste Before", "Paste After", "Paste Into"])
pastes.append("Paste Append")
elif self._clipboard["toplevel"] is False and self.schedList.IsTopLevel(self.scheduleSelection):
pastes.append("Paste Into")
elif self._clipboard["toplevel"] is True and self.schedList.IsTopLevel(self.scheduleSelection):
pastes.extend(["Paste Before", "Paste After"])
if self._clipboard["origin"] == "schedule":
pastes.append("Paste Into")
pastes.append("Paste Append")
elif self._clipboard["toplevel"] is True and not self.schedList.IsTopLevel(self.scheduleSelection):
pastes.append("Paste Append")
elif self.scheduleSelection.IsOk():
pastes.append("Paste Append")
for label in pastes:
item = subMenu.Append(wx.ID_ANY, label)
subMenuFunctions = wx.Menu()
if self.cboxFunctions.IsEnabled():
for label in FUNCTIONS:
item = subMenuFunctions.Append(wx.ID_ANY, label)
for label in ["Edit", "Add Schedule", "", "Up", "Down",
"Toggle", "", "Delete"]:
if not label:
menu.AppendSeparator()
continue
if label == "Add Schedule":
item = menu.AppendSubMenu(subMenuFunctions, "Add Action")
if not self.cboxFunctions.IsEnabled():
item.Enable(False)
item = menu.Append(wx.ID_ANY, label)
if label == "Add Schedule":
if self.scheduleSelection.IsOk():
menu.AppendSeparator()
item = menu.Append(wx.ID_ANY, "Cut")
item = menu.Append(wx.ID_ANY, "Copy")
if self._clipboard and pastes != []:
item = menu.AppendSubMenu(subMenu, "Paste")
if not self.schedBtns[label].IsEnabled():
item.Enable(False)
continue
menu.Bind(wx.EVT_MENU, self.OnScheduleToolBar)
self.PopupMenu(menu)
def OnScheduleItemEdit(self, event=None):
"""Handler for editing schedule item"""
schedSel = self.scheduleSelection
if not schedSel.IsOk():
return
itemText = self.schedList.GetItemText(schedSel, 0)
name, params = itemText.split(DELIMITER)
params = make_tuple(params)
params = {x: y for x, y in params}
params["name"] = name
# is item top level? i.e. a schedule
if self.schedList.GetItemParent(schedSel) == self.schedList.GetRootItem():
schedNames = [s for s in self.GetScheduleNames() if not s == name]
dlg = dialogs.schedule.AddSchedule(self, blacklist=schedNames)
dlg.SetScheduleName(name)
dlg.SetValue(params)
if dlg.ShowModal() != wx.ID_OK:
return
self.SaveStateToUndoStack()
newName, value = dlg.GetValue()
value = newName + DELIMITER + value
self.schedList.SetItemText(schedSel, 0, value)
self.SaveScheduleTreeToData()
self.ClearRedoStack()
else:
dlg = self.GetDialog(name)
dlg.SetValue(params)
res = dlg.ShowModal()
if res != wx.ID_OK:
return
self.SaveStateToUndoStack()
value = dlg.GetValue()
value = name + DELIMITER + value
self.schedList.SetItemText(schedSel, 0, value)
self.SaveScheduleTreeToData()
self.ClearRedoStack()
self.UpdateSelectedItemInData()
self.UpdateScheduleInfo()
self.schedList.SetFocus()
def OnScheduleManagerContextMenu(self, event):
menu = wx.Menu()
subMenu = wx.Menu()
if self.cboxFunctions.IsEnabled():
for label in FUNCTIONS:
subMenu.Append(wx.ID_ANY, label)
for label in ["Clear"]:
if not label:
menu.AppendSeparator()
continue
menu.Append(wx.ID_ANY, label)
menu.Bind(wx.EVT_MENU, self.OnScheduleManagerToolbar)
self.PopupMenu(menu)
def OnScheduleManagerToolbar(self, event):
try:
e = event.GetEventObject()
name = e.GetName()
except Exception as e:
id = event.GetId()
name = e.GetLabel(id)
if name == "Clear":
self.schedLog.DeleteAllItems()
def OnScheduleToolBar(self, event):
e = event.GetEventObject()
try:
name = e.GetName()
except AttributeError:
id = event.GetId()
name = e.GetLabel(id)
if name == "Add Action":
self.OnAddAction()
elif name == "Add Schedule":
self.ShowAddScheduleDialog()
elif name == "Copy":
self.CopySelection()
elif name == "Cut":
self.CutSelection()
elif name == "Delete":
self.DeleteScheduleItem()
elif name == "Down":
self.MoveScheduleItemDown()
elif name == "Edit":
self.OnScheduleItemEdit()
elif name == "Paste Before":
self.PasteIntoGroup(append=1)
elif name == "Paste After":
self.PasteIntoGroup(append=2)
elif name == "Paste Append":
self.PasteIntoGroup(append=0)
elif name == "Paste Into":
self.PasteIntoGroup(append=3)
elif name == "Toggle":
self.ToggleScheduleSelection()
elif name == "Up":
self.MoveScheduleItemUp()
def OnScheduleTreeActivated(self, event):
self.OnScheduleItemEdit(None)
def OnScheduleTreeSelectionChanged(self, event=None):
""" update the schedule item information """
self._currentSelectionType = "schedule"
self.UpdateScheduleInfo()
self.UpdateScheduleToolbar()
self.UpdateToolbar()
def OnScheduleTreeItemChecked(self, event):
self.schedList.Select(self.scheduleSelection)
self.SaveStateToUndoStack()
self.ClearRedoStack()
groupSel = self.GetGroupListIndex(self.groupSelection)
idx = self.schedList.GetItemIndex(self.scheduleSelection)
for n, (j, k) in enumerate(self._data[groupSel]["schedules"]):
if not j == idx:
continue
self._data[groupSel]["schedules"][n][1]["checked"] = self.schedList.GetCheckedState(self.scheduleSelection)
break
def OnScheduleTreePaste(self, append=0):
""" user pastes on the schedule list """
if self._clipboard["toplevel"] is False:
if not self.scheduleSelection.IsOk():
self.toolTip.message = "Cannot paste action item outside of a schedule"
return
if not self.scheduleSelection.IsOk():
self.AppendSchedules()
# ensure that a non-schedule item is inserted in schedule
# rather than a sibling of a schedule
elif self.schedList.IsTopLevel(self.scheduleSelection):
if self._clipboard["toplevel"] is False:
self.PasteIntoGroup(3)
else:
self.PasteIntoGroup(0)
else:
self.PasteIntoGroup(2)
# clear the clipboard
# if self._clipboard["type"] == "cut":
# self._clipboard = None
# self.UpdateToolbar()
self.UpdateScheduleImageList()
def OnSize(self, event):
wx.CallAfter(self.UpdateToolbarSize)
event.Skip()
def OnToolBar(self, event):
e = event.GetEventObject()
id = event.GetId()
label = e.GetLabel(id)
if label == "Add Group":
self.ShowAddGroupDialog()
elif label == "Close":
self.CloseFile()
elif label == "Cut":
self.CutSelection()
elif label == "Disable Schedule Manager":
self.DisableScheduleManager()
elif label == "Enable Schedule Manager":
self.EnableScheduleManager()
elif label == "Import":
self.ShowImportDialog()
elif label == "New":
self.CloseFile()
elif label == "Open":
self.OpenFile()
elif label == "Remove Group":
self.DeleteGroupItem()
elif label == "Redo":
self.DoRedo()
elif label == "Save":
self.SaveData()
elif label == "Save As...":
self.SaveFileAs()
elif label == "Settings":
self.ShowSettingsDialog()
elif label == "Undo":
self.DoUndo()
def OpenFile(self):
# ask if user wants to save first
dlg = wx.MessageDialog(self,
message="Save file before closing?",
caption="Close File",
style=wx.YES_NO|wx.CANCEL|wx.CANCEL_DEFAULT)
ret = dlg.ShowModal()
if ret == wx.ID_CANCEL:
return
if ret == wx.ID_YES:
self.SaveData()
# proceed by opening file
wildcard = "JSON files (*.json)|*.json;"
dlg = wx.FileDialog(self,
defaultDir="",
message="Open Schedule File",
wildcard=wildcard,
style=wx.FD_DEFAULT_STYLE|wx.FD_FILE_MUST_EXIST)
if dlg.ShowModal() == wx.ID_CANCEL:
return
self.ClearUI()
path = dlg.GetPath()
_, file = os.path.split(path)
self.LoadFile(path)
def PasteIntoGroup(self, append=0):
if self._clipboard is None:
return
self.SaveStateToUndoStack()
self.ClearRedoStack()
clip = self._clipboard
schedules = clip["schedules"]
index = self.GetGroupListIndex(self.groupSelection)
# no items in this group
if not self.schedList.GetFirstItem().IsOk():
self.SetScheduleTree(schedules)
self._data[index]["schedules"] = schedules
# append
elif append == 0 and clip["toplevel"] is True:
if self.scheduleSelection.IsOk():
toplevel = self.schedList.GetTopLevelParent(self.scheduleSelection)
previous = self.schedList.GetLastSibling(toplevel)
else:
previous = self.schedList.GetLastChild(self.schedList.GetRootItem())
schedules = self.GetUniqueSchedules(schedules)
newItem = self.schedList.InsertSubTree(previous, schedules)
# insert schedule
elif append == 1 and clip["toplevel"] is True:
toplevel = self.schedList.GetTopLevelParent(self.scheduleSelection)
previous = self.schedList.GetPreviousSibling(toplevel)
schedules = self.GetUniqueSchedules(schedules)
newItem = self.schedList.InsertSubTree(previous, schedules)
# insert schedule after
elif append == 2 and clip["toplevel"] is True:
toplevel = self.schedList.GetTopLevelParent(self.scheduleSelection)
schedules = self.GetUniqueSchedules(schedules)
newItem = self.schedList.InsertSubTree(toplevel, schedules)
# append schedule contents inside another item
elif append == 3 and clip["toplevel"] is True:
toplevel = self.schedList.GetTopLevelParent(self.scheduleSelection)
last = self.schedList.GetLastChild(self.scheduleSelection)
extract = self.ExtractContentsFromSchedules(schedules)
for n in sorted(extract.keys()):
e = extract[n]
self.schedList.InsertSubTree(last, e)
# append
elif append == 0 and clip["toplevel"] is False:
previous = self.schedList.GetLastSibling(self.scheduleSelection)
newItem = self.schedList.InsertSubTree(previous, schedules)
# insert schedule
elif append == 1 and clip["toplevel"] is False:
previous = self.schedList.GetPreviousSibling(self.scheduleSelection)
newItem = self.schedList.InsertSubTree(previous, schedules)
# insert schedule after
elif append == 2 and clip["toplevel"] is False:
newItem = self.schedList.InsertSubTree(self.scheduleSelection, schedules)
# append schedule contents inside another item
elif append == 3 and clip["toplevel"] is False:
child = self.schedList.GetFirstChild(self.scheduleSelection)
if not child.IsOk():
self.schedList.InsertSubTree(child, schedules)
else:
last = self.schedList.GetLastChild(child)
self.schedList.InsertSubTree(last, schedules)
logging.debug(newItem)
self._data[index]["schedules"] = self.GetScheduleTree()
self.UpdateScheduleImageList()
def PrependSubTree(self, previous, data):
"""Insert sub tree before item """
items = {}
expanded_items = []
tree = self.schedList
for key in sorted(data.keys()):
if key == "0":
parent = None
else:
parent = key.split(",")[:-1]
parent = ",".join(parent)
parent = items[parent]
value = data[key]["data"]
if not parent:
# parent = tree.GetItemParent(previous)
# parenti
item = tree.PrependItem(previous, value["0"])
else:
item = tree.AppendItem(parent, value["0"])
# tree.SetItemText(item, 1, value["1"])
# tree.SetItemText(item, 2, value["2"])
checked = data[key]["checked"]
if checked == 1:
tree.CheckItem(item)
selected = data[key]["selected"]
if selected is True:
tree.Select(item)
expanded = data[key]["expanded"]
if expanded is True:
expanded_items.append(item)
items[key] = item
for item in expanded_items:
tree.Expand(item)
def RestoreState(self, state):
# print("Restore State", state)
self._data = {}
self.groupList.DeleteAllItems()
self.schedList.DeleteAllItems()
self.SetGroupTree(state["data"])
self.groupList.SelectItemByOrder(state["groupSel"])
self.OnGroupItemSelectionChanged()
self.schedList.SelectItemByOrder(state["schedSel"])
self.UpdateGroupToolbar()
self.UpdateScheduleToolbar()
self.UpdateToolbar()
self._currentSelectionType = state["currentSelectionType"]
def SaveData(self):
jsonData = self.GetDataForJSON()
if self._appConfig["currentFile"] is not False:
self.SaveDataToJSON(self._appConfig["currentFile"], jsonData)
self.commandState = 0
self.UpdateTitlebar()
return
wildcard = "JSON files (*.json)|*.json"
file = wx.FileDialog(self,
defaultDir="",
message="Save File",
wildcard=wildcard,
style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT|wx.FD_CHANGE_DIR)
if file.ShowModal() == wx.ID_CANCEL:
return
filePath = file.GetPath()
self._appConfig["currentFile"] = filePath
self.SaveDataToJSON(self.configPath, self._appConfig)
self.SaveDataToJSON(self._appConfig["currentFile"], jsonData)
self.commandState = 0
self.UpdateRecentFiles(filePath)
self.UpdateTitlebar()
def SaveDataToJSON(self, filePath, data):
with open(filePath, "w") as file:
json.dump(data, file, sort_keys=True, indent=2)
def SaveFileAs(self):
if self._appConfig["currentFile"]:
path, name = os.path.split(self._appConfig["currentFile"])
else:
path, name = "", ""
wildcard = "JSON files (*.json)|*.json;"
file = wx.FileDialog(self,
defaultDir=path,
defaultFile=name,
message="Save Schedule File As...",
wildcard=wildcard,
style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if file.ShowModal() == wx.ID_CANCEL:
return
filePath = file.GetPath()
jsonData = self.GetDataForJSON()
self._appConfig["currentFile"] = filePath
self.SaveDataToJSON(self.configPath, self._appConfig)
self.SaveDataToJSON(filePath, jsonData)
self.commandState = 0
self.UpdateRecentFiles(filePath)
def SaveScheduleTreeToData(self):
"""Cache schedule tree to selected group item in data """
schedules = self.GetScheduleTree()
for item, data in self._data.items():
if self.groupSelection != item:
continue
self._data[self.groupSelection]["schedules"] = schedules
def SaveStateToUndoStack(self):
logging.info("Save State To Undo Stack")
n = self._appConfig["maxUndoCount"]
if n == 0:
pass
else:
state = self.GetCommandState()
self._undoStack.append(state)
if len(self._undoStack) > n:
del self._undoStack[0]
self.commandState += 1
self.UpdateToolbar()
self.UpdateTitlebar()
def SetGroupTree(self, data):
""" set the group list tree """
for idx in sorted([int(x) for x in data.keys() if x != "__version__"]):
item = self.groupList.AppendItemToRoot(data[str(idx)]["columns"]["0"])
self.groupList.CheckItem(item, data[str(idx)]["checked"])
self._data[item] = {"checked": data[str(idx)]["checked"],
"schedules": data[str(idx)]["schedules"]}
self.UpdateGroupImageList()
self.groupList.UnselectAll()
def SetRecentFiles(self):
""" called once on start-up to insert recent file menu items """
if self._appConfig["keepFileList"] is False:
return
for filePath in self._appConfig["fileList"]:
item = wx.MenuItem(id=wx.ID_ANY, text=filePath)
self._fileListMenuItems[filePath] = item
self.Bind(wx.EVT_MENU, self.OnRecentFile, item)
item.SetHelp("Open File: {0}".format(filePath))
self.menuFile.Insert(self.menuFile.GetMenuItemCount() - len(self._fileList) - 1, item)
self._fileList.append(filePath)
def SetScheduleTree(self, data):
""" set the schedule list tree """
self.schedList.SetTree(data)
self.UpdateScheduleImageList()
def SetStatusBar(self, event=None):
""" update status bar when selecting a tree item on sequence"""
selection = self.scheduleSelection
status = self.schedList.GetItemText(selection)
self.GetTopLevelParent().SetStatusText(status)
if event:
event.Skip()
def SetupAcceleratorTable(self):
self.accelTable = wx.AcceleratorTable([
(wx.ACCEL_CTRL, ord('N'), wx.ID_NEW),
(wx.ACCEL_CTRL, ord('O'), wx.ID_OPEN),
(wx.ACCEL_CTRL, ord('S'), wx.ID_SAVE),
(wx.ACCEL_CTRL|wx.ACCEL_SHIFT, ord('S'), wx.ID_SAVEAS),
(wx.ACCEL_CTRL, ord('W'), wx.ID_CLOSE),
# (wx.ACCEL_CTRL, ord('I'), wx.ID_CDROM),
(wx.ACCEL_ALT, ord('P'), wx.ID_PREFERENCES),
(wx.ACCEL_CTRL, ord('Q'), wx.ID_EXIT),
(wx.ACCEL_CTRL, ord('Y'), wx.ID_REDO),
(wx.ACCEL_CTRL, ord('Z'), wx.ID_UNDO),
(wx.ACCEL_CTRL, ord('C'), wx.ID_COPY),
(wx.ACCEL_CTRL, ord('X'), wx.ID_CUT),
(wx.ACCEL_CTRL, ord('V'), wx.ID_PASTE),
(wx.ACCEL_CTRL, ord('H'), wx.ID_HELP),
# (wx.ACCEL_CTRL, ord('I'), self._ids["Import"]),
(wx.ACCEL_CTRL, ord('G'), self._ids["group_Add Group"]),
# (wx.ACCEL_NORMAL, wx.WXK_DELETE, self._ids["schedule_Delete"]),
# (wx.ACCEL_NORMAL, wx.WXK_DELETE, self._ids["group_Delete"]),
(wx.ACCEL_CTRL, 304, wx.ID_ABOUT)])
self.SetAcceleratorTable(self.accelTable)
def SetupHotkeys(self):
""" hook global hotkeys """
# see https://github.com/boppreh/keyboard/issues/139
# should be fixed in future, but now, we only unhook
# if hooks are created in the first place
try:
keyboard.unhook_all()
except AttributeError:
pass
keyboard.add_hotkey(self._appConfig["toggleSchedManHotkey"], self.ToggleScheduleManager)
def ShowAboutDialog(self):
if not self._aboutDialog:
self._aboutDialog = AboutDialog(self)
self._aboutDialog.Bind(wx.EVT_CLOSE, self.OnAboutDialogClose)
self._aboutDialog.Show()
def ShowAddGroupDialog(self, uid=None, caption="Add Group", schedules=[], append=0):
"""
optional to pass schedules/custom caption. In cases, such as copy and paste
where we use this dialog
append option: 0 = append, 1 = before selected item, 2 = after selected item
"""
m = "Group Name:"
groupNames = self.GetGroupNames()
if uid is None:
# find unique group name
i = 1
b = "group_"
uid = b + str(i)
while uid in groupNames:
i += 1
uid = b + str(i)
else:
i = 1
b = uid
uid = b
while uid in groupNames:
uid = b + "_%d" % i
i += 1
while True:
dlg = wx.TextEntryDialog(self, message=m, caption=caption, value=uid)
ret = dlg.ShowModal()
if ret == wx.ID_CANCEL:
return
elif dlg.GetValue() in groupNames:
m = "Group Name: ('{0}' already exists)".format(dlg.GetValue())
continue
elif dlg.GetValue() == "":
m = "Group Name: (Name cannot be empty)"
continue
elif not dlg.GetValue().replace("_", "").isalnum():
m = "Group Name: (Name can only contain 0-9, A-Z. Underscores allowed)"
continue
self.SaveStateToUndoStack()
newName = dlg.GetValue()
if append == 0: # append
newItem = self.groupList.AppendItemToRoot(newName)
elif append == 1: # paste before
previous = self.groupList.GetPreviousSibling(self.groupSelection)
if previous == -1:
newItem = self.groupList.PrependItem(self.groupListRoot, newName)
else:
newItem = self.groupList.InsertItem(self.groupListRoot, previous, newName)
elif append == 2: # paste after
newItem = self.groupList.InsertItem(self.groupListRoot, self.groupSelection, newName)
self.schedList.DeleteAllItems()
self._data[newItem] = {}
self._data[newItem]["checked"] = 0
self._data[newItem]["schedules"] = schedules
self.OnGroupItemSelectionChanged()
self.SetScheduleTree(schedules)
self.groupList.Select(self.groupList.GetFirstItem())
self.groupList.Select(newItem)
self.groupList.SetFocus()
self.UpdateGroupToolbar()
self.UpdateScheduleToolbar()
self.UpdateGroupImageList()
self.UpdateScheduleInfo()
self.ClearRedoStack()
return newItem
def ShowAddScheduleDialog(self):
# find unique schedule name
i = 1
b = "schedule_"
uid = b + str(i)
schedNames = self.GetScheduleNames()
while uid in schedNames:
i += 1
uid = b + str(i)
dlg = dialogs.schedule.AddSchedule(self, blacklist=schedNames)
dlg.SetScheduleName(uid)
ret = dlg.ShowModal()
if ret == wx.ID_CANCEL:
return
self.SaveStateToUndoStack()
newName, newValue = dlg.GetValue()
newItem = self.schedList.AppendItemToRoot(newName + DELIMITER + newValue)
self.schedList.Select(newItem)
self.schedList.CheckItem(newItem)
self.schedList.Expand(newItem)
self.schedList.SetFocus()
self.UpdateScheduleToolbar()
self.UpdateScheduleImageList()
self.SaveScheduleTreeToData()
def ShowCheckForUpdatesDialog(self):
logging.info("Checking For Updates...")
import updatechecker
dlg = updatechecker.CheckForUpdates(self, __version__)
dlg.ShowModal()
def ShowImportDialog(self):
message = "Not yet implemented"
dlg = wx.MessageDialog(self,
message,
caption="Import Schedule File")
dlg.ShowModal()
def ShowSettingsDialog(self):
try:
self._settingsDialog.Show()
except Exception as e:
self._settingsDialog = SettingsFrame(self)
self._settingsDialog.SetValue(self._appConfig)
self._settingsDialog.Show()
self._settingsDialog.Raise()
def ShowUserGuide(self):
try:
self._userGuideDialog.Show()
self._userGuideDialog.Raise()
except Exception as e:
self._userGuideDialog = UserGuideFrame(self)
self._userGuideDialog.CentreOnParent()
self._userGuideDialog.Raise()
self._userGuideDialog.Show()
def ToggleGroupSelection(self):
logging.debug("ToggleGroupSelection")
if not self.groupSelection.IsOk():
return
self.SaveStateToUndoStack()
self.ClearRedoStack()
checked = self.groupList.GetCheckedState(self.groupSelection)
if checked == 1:
self.groupList.UncheckItem(self.groupSelection)
checked = 0
else:
self.groupList.CheckItem(self.groupSelection)
checked = 1
self.UpdateGroupImageList()
index = self.GetGroupListIndex(self.groupSelection)
self._data[index]["checked"] = checked
def ToggleScheduleManager(self):
if self._tools["Enable Schedule Manager"].GetLabel() == "Enable Schedule Manager":
self.EnableScheduleManager()
else:
self.DisableScheduleManager()
def ToggleScheduleSelection(self):
if not self.scheduleSelection.IsOk():
return
self.SaveStateToUndoStack()
self.ClearRedoStack()
checked = self.schedList.GetCheckedState(self.scheduleSelection)
if checked == 1:
self.schedList.UncheckItem(self.scheduleSelection)
checked = 0
else:
self.schedList.CheckItem(self.scheduleSelection)
checked = 1
index = self.GetGroupListIndex(self.groupSelection)
self._data[index]["schedules"] = self.GetScheduleTree()
def UpdateRecentFiles(self, filePath):
if self._appConfig["keepFileList"] is False:
return
if filePath in self._fileList:
self.menuFile.Delete(self._fileListMenuItems[filePath])
del self._fileListMenuItems[filePath]
del self._fileList[self._fileList.index(filePath)]
self._fileListMenuItems[filePath] = item = wx.MenuItem(id=wx.ID_ANY, text=filePath)
self.Bind(wx.EVT_MENU, self.OnRecentFile, item)
item.SetHelp("Open File: {0}".format(filePath))
self.menuFile.Insert(self.menuFile.GetMenuItemCount() - len(self._fileList) - 1, item)
self._fileList.insert(0, filePath)
self.UpdateSettingsDict({"fileList": self._fileList})
def UpdateGroupImageList(self):
item = self.groupList.GetFirstItem()
while item.IsOk():
if self.groupList.GetCheckedState(item) == 1:
self.groupList.SetItemImage(item, self.imageListIndex("groupchecked"))
else:
self.groupList.SetItemImage(item, self.imageListIndex("group"))
item = self.groupList.GetNextItem(item)
def UpdateGroupToolbar(self):
selection = self.groupSelection
state = True
if not selection.IsOk():
state = False
self._currentSelectionType = None
else:
self._currentSelectionType = "group"
for label, btn in self.groupBtns.items():
if label == "Add Group":
btn.Enable()
continue
btn.Enable(state)
if selection.IsOk():
if self.groupList.GetFirstItem() == selection:
self.groupBtns["Up"].Disable()
if not self.groupList.GetNextSibling(selection).IsOk():
self.groupBtns["Down"].Disable()
self.enableTool(wx.ID_REMOVE, state)
def UpdateScheduleInfo(self):
if not self.scheduleSelection.IsOk():
self.infoSched.SetValue("")
self.infoSchedButton.Hide()
self.infoPanelSizer.Layout()
self.infoSched.Refresh()
return
try:
text = self.schedList.GetItemText(self.scheduleSelection)
if self.schedList.IsTopLevel(self.scheduleSelection):
name = "schedule"
_, params = text.split(DELIMITER)
else:
name, params = text.split(DELIMITER)
params = make_tuple(params)
self.infoSched.SetValue(name)
for x, y in params:
self.infoSched.AppendText("\n - {0} = {1}".format(x, y))
self.infoSchedButton.Show()
self.infoPanelSizer.Layout()
w, h = self.infoSchedButton.GetSize()
if w > h:
d = h
else:
d = w
self.infoSchedButton.SetBitmap(self.GetBitmapFromImage(name, (d, d)))
self.infoSchedButton.SetLabel(name)
except Exception as e:
print(e)
self.infoSched.SetValue("")
self.infoSchedButton.Hide()
self.infoPanelSizer.Layout()
self.infoSched.Refresh()
def UpdateScheduleToolbar(self):
if not self.scheduleSelection.IsOk():
for label, btn in self.schedBtns.items():
if label == "Add Schedule":
continue
btn.Disable()
# stop user from being able add function
self.cboxFunctions.Disable()
self.btnAddFunction.Disable()
else:
# enable user to add function
self.cboxFunctions.Enable()
self.btnAddFunction.Enable()
self.schedBtns["Edit"].Enable()
self.schedBtns["Toggle"].Enable()
self.schedBtns["Delete"].Enable()
if self.schedList.GetNextSibling(self.scheduleSelection).IsOk():
self.schedBtns["Down"].Enable()
else:
self.schedBtns["Down"].Disable()
parent = self.schedList.GetItemParent(self.scheduleSelection)
if self.schedList.GetFirstChild(parent) != self.scheduleSelection:
self.schedBtns["Up"].Enable()
else:
self.schedBtns["Up"].Disable()
if self.groupSelection.IsOk():
self.schedBtns["Add Schedule"].Enable()
def UpdateScheduleImageList(self):
item = self.schedList.GetFirstItem()
while item.IsOk():
if self.schedList.GetItemParent(item) == self.schedList.GetRootItem():
self.schedList.SetItemImage(item, self.imageListIndex("schedule"))
else:
action = self.schedList.GetItemText(item).split(DELIMITER)[0]
try:
self.schedList.SetItemImage(item, self.imageListIndex(action.lower()))
except Exception as e:
pass
item = self.schedList.GetNextItem(item)
def UpdateSelectedItemInData(self):
"""Updates application data for the currently selected schedule item"""
schedSel = self.scheduleSelection
if not schedSel.IsOk():
return
idx = self.schedList.GetItemIndex(schedSel)
groupSel = self.groupSelection
for n, (j, k) in enumerate(self._data[groupSel]["schedules"]):
if not j == idx:
continue
self._data[groupSel]["schedules"][n][1]["columns"]["0"] = self.schedList.GetItemText(schedSel)
break
def UpdateSettingsDict(self, data):
self._appConfig.update(data)
self.SaveDataToJSON(self.configPath, self._appConfig)
if self._appConfig["keepFileList"] is False:
self.ClearRecentFiles()
n = self._appConfig["maxUndoCount"]
if n == 0:
self._undoStack = []
self._redoStack = []
elif len(self._undoStack) > n:
self._undoStack = self._undoStack[len(self._undoStack) - n:]
self.SetupHotkeys()
self.UpdateTrayIcon()
self.UpdateToolbarSize()
self.UpdateToolbar()
self.UpdateTitlebar()
def UpdateTitlebar(self):
unsaved = ""
if self.commandState != 0:
unsaved = "*"
try:
_, name = os.path.split(self._appConfig["currentFile"])
self.SetTitle("{0}{1} - {2}".format(unsaved, name, __title__))
except Exception as e:
self.SetTitle("{0}New File.json - {1}".format(unsaved, __title__))
def UpdateToolbar(self):
"""Set toolbar tools state to current clipboard status"""
if self._currentSelectionType:
self.enableTool(wx.ID_CUT, True)
self.enableTool(wx.ID_COPY, True)
elif not self.groupSelection.IsOk():
self.enableTool(wx.ID_CUT, False)
self.enableTool(wx.ID_COPY, False)
if self._clipboard:
self.enableTool(wx.ID_PASTE, True)
else:
self.enableTool(wx.ID_PASTE, False)
if self._undoStack:
self.enableTool(wx.ID_UNDO, True)
else:
self.enableTool(wx.ID_UNDO, False)
if self._redoStack:
self.enableTool(wx.ID_REDO, True)
else:
self.enableTool(wx.ID_REDO, False)
def UpdateToolbarSize(self):
"""Dynamically resize toolbar icons based on frame size"""
if not self.toolbar:
return
sizeChoices = [16, 32, 48, 64, 128, 256]
toolSize = int(self._appConfig["toolbarSize"]), int(self._appConfig["toolbarSize"])
width = self.GetSize()[0]
toolCount = self.toolbar.GetToolsCount()
if toolSize[0] * toolCount > width:
idx = sizeChoices.index(toolSize[0])
for x in reversed(sizeChoices[:idx]):
# try to set the largest toolbar icon size possible
if x * toolCount <= width:
toolSize = x, x
self._overrideToolSize = x
break
else:
self._overrideToolSize = None
if self.toolbar.GetToolBitmapSize() == toolSize:
return
self.toolbar.SetToolBitmapSize(toolSize)
for x in range(toolCount):
tool = self.toolbar.GetToolByPos(x)
label = tool.GetLabel()
if not label:
continue
tool.SetNormalBitmap(self._toolbarBitmaps[toolSize[0]][label])
self.toolbar.Realize()
def UpdateTrayIcon(self):
"""Set or remove tray icon based on config setting"""
if self._appConfig["showTrayIcon"] is True:
if not self.taskBarIcon:
self.CreateTrayIcon()
return
if self.taskBarIcon:
self.taskBarIcon.RemoveTray()
self.taskBarIcon = None
def process_sys_args():
res = {}
for arg in sys.argv[1:]:
if "=" not in arg:
continue
key, value = arg.split("=")[:2]
res[key.lower()] = value.lower()
return res
def set_logging_level():
# Logging Configuration
try:
v = LOG_LEVELS[SYS_ARGS["--verbose"]]
logging.basicConfig(level=v)
except KeyError:
pass
def main():
SYS_ARGS.update(process_sys_args())
set_logging_level()
app = wx.App()
Main()
app.MainLoop()
if __name__ == '__main__':
main()
| swprojects/Advanced-Action-Scheduler | advancedactionscheduler/advancedactionscheduler.py | Python | gpl-2.0 | 97,156 |
'''
A pseudo MSO neuron, with two dendrites and one axon (fake geometry).
'''
import os
import matplotlib
matplotlib.use('Agg')
from brian2 import *
name = os.path.basename(__file__).replace('.py', '')
codefolder = os.path.join('code', name)
print('runing example {}'.format(name))
print('compiling model in {}'.format(codefolder))
set_device('cpp_standalone', build_on_run=False) # multiple runs require this change (see below)
# Morphology
morpho = Soma(30*um)
morpho.axon = Cylinder(diameter=1*um, length=300*um, n=100)
morpho.L = Cylinder(diameter=1*um, length=100*um, n=50)
morpho.R = Cylinder(diameter=1*um, length=150*um, n=50)
# Passive channels
gL = 1e-4*siemens/cm**2
EL = -70*mV
eqs='''
Im = gL * (EL - v) : amp/meter**2
I : amp (point current)
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs,
Cm=1*uF/cm**2, Ri=100*ohm*cm, method='exponential_euler')
neuron.v = EL
neuron.I = 0*amp
# Monitors
mon_soma = StateMonitor(neuron, 'v', record=[0])
mon_L = StateMonitor(neuron.L, 'v', record=True)
mon_R = StateMonitor(neuron, 'v', record=morpho.R[75*um])
run(1*ms)
neuron.I[morpho.L[50*um]] = 0.2*nA # injecting in the left dendrite
run(5*ms)
neuron.I = 0*amp
run(50*ms, report='text', profile=True)
# cf. https://brian2.readthedocs.io/en/stable/user/computation.html#multiple-run-calls
device.build( directory=codefolder, compile = True, run = True, debug=False)
print(profiling_summary())
subplot(211)
plot(mon_L.t/ms, mon_soma[0].v/mV, 'k')
plot(mon_L.t/ms, mon_L[morpho.L[50*um]].v/mV, 'r')
plot(mon_L.t/ms, mon_R[morpho.R[75*um]].v/mV, 'b')
ylabel('v (mV)')
subplot(212)
for x in linspace(0*um, 100*um, 10, endpoint=False):
plot(mon_L.t/ms, mon_L[morpho.L[x]].v/mV)
xlabel('Time (ms)')
ylabel('v (mV)')
#show()
plotpath = os.path.join('plots', '{}.png'.format(name))
savefig(plotpath)
print('plot saved in {}'.format(plotpath))
print('the generated model in {} needs to removed manually if wanted'.format(codefolder))
| brian-team/brian2cuda | examples/compartmental/bipolar_cell_cpp.py | Python | gpl-2.0 | 1,976 |
#! /usr/bin/python
# convert fastq to fasta
# by gjr; Oct 4, 11
"""
Convert fastq to fasta
% python fq2fa.py <file.fastq> <file.fasta>
"""
import sys, os
import screed
def main():
if len(sys.argv) != 3:
print >> sys.stderr, 'Usage: python %s <file.fastq> <file.fasta>'\
%(os.path.basename(sys.argv[0]))
sys.exit(1)
f = sys.argv[1]
fout = sys.argv[2]
fw = open(fout, 'wb')
for n, record in enumerate(screed.open(f)):
name = record['name']
seq = record['sequence']
print >> fw, '>%s\n%s' %(name, seq)
#print (n+1), 'fasta seqs written'
fw.close()
if __name__ == '__main__':
main()
| jiarong/SSUsearch | scripts/fq2fa.py | Python | bsd-3-clause | 691 |
"""
Functions for parameterizing strings useful for code generation.
So far only used by `label`
"""
import numpy as np
def coord_name(dim):
return "coords_{}".format(dim)
def coord_dim_names(ndim):
return ', '.join([coord_name(i) for i in range(ndim)])
def indent_block(string, n=1, indentation=" ", first_line=None):
if first_line is None:
first_line = n
split = string.splitlines()
if len(split) is 0:
return []
first = (indentation * first_line) + split[0] + '\n'
others = ''.join((indentation * n) + s + '\n' for s in split[1:])
return first + others
def prepend_break(string):
return '\n' + string
def shape(dim):
return 'shape_{}'.format(dim)
def assign(lhs, rhs):
return '{} = {}'.format(lhs, rhs)
def shape_init_strings(shp):
return '\n'.join([assign(shape(i), size) for i, size in enumerate(shp)])
def shift(dim):
return 'shift_{}'.format(dim)
def shift_init_strings(neigh_shape):
"""Get the initialization strings for the valid shift values (ndim - 1)"""
return '\n'.join(
[assign(shift(i), size) for i, size in enumerate(neigh_shape[:-1])])
def cursors(dim):
return 'cursors_{}'.format(dim)
def zeros(shp, dtype):
if np.isscalar(shp):
return 'np.zeros({}, dtype={})'.format(shp[0], dtype_string(dtype))
else:
return 'np.zeros({}, dtype={})'.format(tuple(shp), dtype_string(dtype))
def dtype_string(typ):
return 'np.{}'.format(np.dtype(typ))
def cursors_init_string(shp, dtype):
return assign(cursors(len(shp) - 1), zeros(shp, dtype))
def cursors_init_strings(shp, dtype):
return '\n'.join(
[cursors_init_string(shp[:i], dtype) for i in range(1, len(shp))])
def ends(dim):
return 'ends_{}'.format(dim)
def ends_init_string(shp, dtype):
return assign(ends(len(shp) - 1), zeros(shp, dtype))
def ends_init_strings(shp, dtype):
return '\n'.join(
[ends_init_string(shp[:i], dtype) for i in range(1, len(shp))])
def _c(dim):
return 'c{}'.format(dim)
def index(var, idx):
return '{}[{}]'.format(var, idx)
def single_coord_init_string(dim):
return assign(_c(dim), index(coord_name(dim), 0))
def coord_init_strings(ndim):
return '\n'.join(
assign(_c(dim), index(coord_name(dim), 0)) for dim in range(ndim))
def r(dim):
return 'r{dim}'.format(dim)
def range_init_string(dim, size):
return assign('r{}'.format(dim), size // 2)
def range_init_strings(shp):
return '\n'.join(
[range_init_string(dim, size) for dim, size in enumerate(shp)])
def for_loop(element, sequence, body):
"""Build a string nesting a statement in a for loop."""
return 'for {} in {}:\n{}'.format(element, sequence, indent_block(body, 1))
def i_(dim):
return 'i{}'.format(dim)
def loop_over_shape(shp, body):
ret = body
for i, s in zip(reversed(range(len(shp))), reversed(shp)):
ret = for_loop(i_(i), 'range({})'.format(s), ret)
return ret | mvsaha/blahb | blahb/strgen.py | Python | mit | 3,006 |
import unittest
import os
import sys
import shutil
import string
from optparse import OptionParser
import ConfigParser
import logging
sys.dont_write_bytecode = True
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath(os.path.join("..", "coshsh")))
import coshsh
from coshsh.generator import Generator
from coshsh.datasource import Datasource
from coshsh.application import Application
from coshsh.util import setup_logging
class CoshshTest(unittest.TestCase):
def print_header(self):
print "#" * 80 + "\n" + "#" + " " * 78 + "#"
print "#" + string.center(self.id(), 78) + "#"
print "#" + " " * 78 + "#\n" + "#" * 80 + "\n"
def setUp(self):
shutil.rmtree("./var/objects/test1", True)
os.makedirs("./var/objects/test1")
shutil.rmtree("./var/log", True)
os.makedirs("./var/log")
self.config = ConfigParser.ConfigParser()
self.config.read('etc/coshsh.cfg')
self.generator = coshsh.generator.Generator()
setup_logging(logfile="zishsh.log", logdir="./var/log", scrnloglevel=logging.DEBUG, txtloglevel=logging.INFO)
# default, wie im coshsh-cook
setup_logging(logdir="./var/log", scrnloglevel=logging.INFO)
def tearDown(self):
#shutil.rmtree("./var/objects/test1", True)
print
def test_log(self):
logger = logging.getLogger('zishsh')
print logger.__dict__
print
for h in logger.handlers:
print h.__dict__
print
logger.warn("i warn you")
logger.info("i inform you")
logger.debug("i spam you")
self.assert_(os.path.exists("./var/log/zishsh.log"))
with open('./var/log/zishsh.log') as x: zishshlog = x.read()
self.assert_("WARNING" in zishshlog)
self.assert_("INFO" in zishshlog)
self.assert_("DEBUG" not in zishshlog)
def test_write(self):
# innendrin im Code wird logging.getLogger('coshsh') aufgerufen
self.print_header()
self.generator.add_recipe(name='test4', **dict(self.config.items('recipe_TEST4')))
self.config.set("datasource_SIMPLESAMPLE", "name", "simplesample")
cfg = self.config.items("datasource_SIMPLESAMPLE")
self.generator.recipes['test4'].add_datasource(**dict(cfg))
# remove target dir / create empty
self.generator.recipes['test4'].count_before_objects()
self.generator.recipes['test4'].cleanup_target_dir()
self.generator.recipes['test4'].prepare_target_dir()
# check target
# read the datasources
self.generator.recipes['test4'].collect()
self.generator.recipes['test4'].assemble()
# for each host, application get the corresponding template files
# get the template files and cache them in a struct owned by the recipe
# resolve the templates and attach the result as config_files to host/app
self.generator.recipes['test4'].render()
self.assert_(hasattr(self.generator.recipes['test4'].objects['hosts']['test_host_0'], 'config_files'))
self.assert_('host.cfg' in self.generator.recipes['test4'].objects['hosts']['test_host_0'].config_files['nagios'])
# write hosts/apps to the filesystem
self.generator.recipes['test4'].output()
self.assert_(os.path.exists("var/objects/test1/dynamic/hosts"))
self.assert_(os.path.exists("var/objects/test1/dynamic/hosts/test_host_0"))
self.assert_(os.path.exists("var/objects/test1/dynamic/hosts/test_host_0/os_linux_default.cfg"))
self.assert_(os.path.exists("var/objects/test1/dynamic/hosts/test_host_0/os_windows_default.cfg"))
os_windows_default_cfg = open("var/objects/test1/dynamic/hosts/test_host_0/os_windows_default.cfg").read()
self.assert_('os_windows_default_check_unittest' in os_windows_default_cfg)
self.assert_(os.path.exists("./var/log/coshsh.log"))
with open('./var/log/coshsh.log') as x: coshshlog = x.read()
self.assert_("test_host_0" in coshshlog)
if __name__ == '__main__':
unittest.main()
| lausser/coshsh | tests/test_logging.py | Python | agpl-3.0 | 4,032 |
import subprocess
class Process():
"""Methodes for Process managment"""
@staticmethod
def execShell( command=None ):
"""Return the python interpreter."""
'@parameter string command Command shell.'
subprocess.call( command )
@staticmethod
def launchSoftware( softwarePath, arg=None ):
if arg:
if not isinstance( arg, list):
arg = [arg]
subprocess.Popen( [softwarePath] + arg, creationflags=subprocess.CREATE_NEW_CONSOLE )
else:
subprocess.Popen( softwarePath, creationflags=subprocess.CREATE_NEW_CONSOLE )
@staticmethod
def partial( func, *args, **keywords ):
import functools
return functools.partial( func, *args, **keywords )
def launchMayaPython( self, cmd, arg='' ):
import Forge.core
env = Forge.core.Env()
actions = '%s(%s)' %( cmd, arg )
cmdExec = 'exec(\'import sys\\\\nsys.path.append(\\\\\'%s\\\\\')\\\\nsys.path.append(\\\\\'%s\\\\\')\\\\nsys.path.append(\\\\\'%s\\\\\')\\\\nimport Hammer\\\\n%s\')' %( env.forgeLib, env.anvilLib, env.hammerLib, actions )
self.launchSoftware( env.maya, arg=['-command', "python \"%s\";" %( cmdExec )] )
def launchNukePython( self, cmd, arg='' ):
import Forge.core
env = Forge.core.Env()
path = '%snukeStartupTmp.py' %( env.tmp )
actions = '%s(%s)' %( cmd, arg )
cmdExec = 'exec(\'import sys\\nsys.path.append(\\\'%s\\\')\\nsys.path.append(\\\'%s\\\')\\nsys.path.append(\\\'%s\\\')\\nimport Hammer\\n%s\\nimport Forge.core.System\\nForge.core.System.removeFile(\\\'%s\\\')\')' %( env.forgeLib, env.anvilLib, env.hammerLib, actions, path )
Forge.core.System.setFile( path=path, content=cmdExec )
self.launchSoftware( env.nuke, arg=path )
| Black-Cog/Forge | core/Process.py | Python | bsd-3-clause | 1,660 |
import datetime
import os
import tempfile
from io import StringIO
from wsgiref.util import FileWrapper
from django import forms
from django.contrib import admin
from django.contrib.admin import BooleanFieldListFilter
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse, StreamingHttpResponse
from django.urls import path
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from .forms import MediaActionForm
from .models import (
Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField,
AdminOrderedModelMethod, Album, Answer, Answer2, Article, BarAccount, Book,
Bookmark, Category, Chapter, ChapterXtra1, Child, ChildOfReferer, Choice,
City, Collector, Color, Color2, ComplexSortedPerson, CoverLetter,
CustomArticle, CyclicOne, CyclicTwo, DependentChild, DooHickey, EmptyModel,
EmptyModelHidden, EmptyModelMixin, EmptyModelVisible, ExplicitlyProvidedPK,
ExternalSubscriber, Fabric, FancyDoodad, FieldOverridePost,
FilteredManager, FooAccount, FoodDelivery, FunkyTag, Gadget, Gallery,
GenRelReference, Grommet, ImplicitlyGeneratedPK, Ingredient,
InlineReference, InlineReferer, Inquisition, Language, Link,
MainPrepopulated, ModelWithStringPrimaryKey, NotReferenced, OldSubscriber,
OtherStory, Paper, Parent, ParentWithDependentChildren, ParentWithUUIDPK,
Person, Persona, Picture, Pizza, Plot, PlotDetails, PlotProxy,
PluggableSearchPerson, Podcast, Post, PrePopulatedPost,
PrePopulatedPostLargeSlug, PrePopulatedSubPost, Promo, Question,
ReadablePizza, ReadOnlyPizza, Recipe, Recommendation, Recommender,
ReferencedByGenRel, ReferencedByInline, ReferencedByParent,
RelatedPrepopulated, RelatedWithUUIDPKModel, Report, Reservation,
Restaurant, RowLevelChangePermissionModel, Section, ShortMessage, Simple,
Sketch, State, Story, StumpJoke, Subscriber, SuperVillain, Telegram, Thing,
Topping, UnchangeableObject, UndeletableObject, UnorderedObject,
UserMessenger, UserProxy, Villain, Vodcast, Whatsit, Widget, Worker,
WorkHour,
)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
fk_name = 'section'
prepopulated_fields = {
'title': ('content',)
}
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = (
'chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',
'guest_author__promo__book',
)
class ArticleForm(forms.ModelForm):
extra_form_field = forms.BooleanField(required=False)
class Meta:
fields = '__all__'
model = Article
class ArticleAdmin(admin.ModelAdmin):
list_display = (
'content', 'date', callable_year, 'model_year', 'modeladmin_year',
'model_year_reversed', 'section', lambda obj: obj.title,
'order_by_expression',
)
list_editable = ('section',)
list_filter = ('date', 'section')
autocomplete_fields = ('section',)
view_on_site = False
form = ArticleForm
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content', 'extra_form_field'),
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section', 'sub_section')
})
)
def order_by_expression(self, obj):
return obj.model_year
# This ordering isn't particularly useful but shows that expressions can
# be used for admin_order_field.
order_by_expression.admin_order_field = models.F('date') + datetime.timedelta(days=3)
def changelist_view(self, request):
return super().changelist_view(request, extra_context={'extra_var': 'Hello!'})
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'from@example.com',
['to@example.com']
).send()
return super().delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'from@example.com',
['to@example.com']
).send()
return super().save_model(request, obj, form, change)
class ArticleAdmin2(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
def has_view_permission(self, request, obj=None):
"""Only allow viewing objects if id is a multiple of 3."""
return request.user.is_staff and obj is not None and obj.id % 3 == 0
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
popup_response_template = 'custom_admin/popup_response.html'
def changelist_view(self, request):
return super().changelist_view(request, extra_context={'extra_var': 'Hello!'})
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color', 'color__warm', 'color__value', 'pub_date')
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected', 'sketch')
def sketch(self, obj):
# A method with the same name as a reverse accessor.
return 'list-display-sketch'
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition', 'defendant0', 'defendant1')
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super().get_changelist_formset(request, formset=BasePersonModelFormSet, **kwargs)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super().get_queryset(request).order_by('age')
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
action_form = MediaActionForm
def delete_queryset(self, request, queryset):
SubscriberAdmin.overridden = True
super().delete_queryset(request, queryset)
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from an admin action',
'from@example.com',
['to@example.com']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'from@example.com',
['to@example.com']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
def download(modeladmin, request, selected):
buf = StringIO('This is the content of the file')
return StreamingHttpResponse(FileWrapper(buf))
download.short_description = 'Download subscription'
def no_perm(modeladmin, request, selected):
return HttpResponse(content='No permission to perform this action', status=403)
no_perm.short_description = 'No permission to run'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail, download, no_perm]
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
save_as = True
list_display = ('id', 'name',)
list_display_links = ('id',)
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super().save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super().get_queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
show_full_result_count = False
search_fields = ('=titletranslation__text', '=the_recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline", "readonly_link_content")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug': ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug': ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostReadOnlyAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
def has_change_permission(self, *args, **kwargs):
return False
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = (
'posted', 'awesomeness_level', 'coolness', 'value',
'multiline', 'multiline_html', lambda obj: "foo",
'readonly_content',
)
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unknown coolness."
def value(self, instance):
return 1000
value.short_description = 'Value in $US'
def multiline(self, instance):
return "Multiline\ntest\nstring"
def multiline_html(self, instance):
return mark_safe("Multiline<br>\nhtml<br>\ncontent")
class FieldOverridePostForm(forms.ModelForm):
model = FieldOverridePost
class Meta:
help_texts = {
'posted': 'Overridden help text for the date',
}
labels = {
'public': 'Overridden public label',
}
class FieldOverridePostAdmin(PostAdmin):
form = FieldOverridePostForm
class CustomChangeList(ChangeList):
def get_queryset(self, request):
return self.root_queryset.order_by('pk').filter(pk=9999) # Doesn't exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class ToppingAdmin(admin.ModelAdmin):
readonly_fields = ('pizzas',)
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class StudentAdmin(admin.ModelAdmin):
search_fields = ('name',)
class ReadOnlyPizzaAdmin(admin.ModelAdmin):
readonly_fields = ('name', 'toppings')
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return True
def has_delete_permission(self, request, obj=None):
return True
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display = ('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances. Note that the CoverLetter model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).defer('date_written')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).only('title')
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).defer('timestamp')
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances. Note that the Telegram model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).only('title')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content',)
form = StoryForm
ordering = ['-id']
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content',)
ordering = ['-id']
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return format_html('<span style="color: #ff00ff;">{}</span>', obj.name)
colored_name.admin_order_field = 'name'
class PluggableSearchPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age')
search_fields = ('name',)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super().get_search_results(request, queryset, search_term)
try:
search_term_as_int = int(search_term)
except ValueError:
pass
else:
queryset |= self.model.objects.filter(age=search_term_as_int)
return queryset, use_distinct
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class QuestionAdmin(admin.ModelAdmin):
ordering = ['-posted']
search_fields = ['question']
autocomplete_fields = ['related_questions']
class AnswerAdmin(admin.ModelAdmin):
autocomplete_fields = ['question']
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return [path('extra/', self.extra, name='cable_extra')]
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (
('fk', 'm2m'),
('pubdate', 'status'),
('name', 'slug1', 'slug2',),
),
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
model = RelatedPrepopulated
extra = 1
autocomplete_fields = ['fk', 'm2m']
prepopulated_fields = {
'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name'],
}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
autocomplete_fields = ['fk', 'm2m']
prepopulated_fields = {
'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name'],
}
class RelatedPrepopulatedInline3(admin.TabularInline):
model = RelatedPrepopulated
extra = 0
autocomplete_fields = ['fk', 'm2m']
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2, RelatedPrepopulatedInline3]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2', 'slug3'))
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
prepopulated_fields = {
'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name'],
'slug3': ['name'],
}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
list_display_links = ['id']
list_editable = ['name']
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs['extra_context'] = {'show_delete': False}
return super().change_view(*args, **kwargs)
class UnchangeableObjectAdmin(admin.ModelAdmin):
def get_urls(self):
# Disable change_view, but leave other urls untouched
urlpatterns = super().get_urls()
return [p for p in urlpatterns if p.name and not p.name.endswith("_change")]
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [callable_on_unknown]
class CustomManagerAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return FilteredManager.objects
class MessageTestingAdmin(admin.ModelAdmin):
actions = ["message_debug", "message_info", "message_success",
"message_warning", "message_error", "message_extra_tags"]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ['choice']
readonly_fields = ['choice']
fields = ['choice']
class DependentChildAdminForm(forms.ModelForm):
"""
Issue #20522
Form to test child dependency on parent object's validation
"""
def clean(self):
parent = self.cleaned_data.get('parent')
if parent.family_name and parent.family_name != self.cleaned_data.get('family_name'):
raise ValidationError("Children must share a family name with their parents " +
"in this contrived test case")
return super().clean()
class DependentChildInline(admin.TabularInline):
model = DependentChild
form = DependentChildAdminForm
class ParentWithDependentChildrenAdmin(admin.ModelAdmin):
inlines = [DependentChildInline]
# Tests for ticket 11277 ----------------------------------
class FormWithoutHiddenField(forms.ModelForm):
first = forms.CharField()
second = forms.CharField()
class FormWithoutVisibleField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField(widget=forms.HiddenInput)
class FormWithVisibleAndHiddenField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField()
class EmptyModelVisibleAdmin(admin.ModelAdmin):
form = FormWithoutHiddenField
fieldsets = (
(None, {
'fields': (('first', 'second'),),
}),
)
class EmptyModelHiddenAdmin(admin.ModelAdmin):
form = FormWithoutVisibleField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class EmptyModelMixinAdmin(admin.ModelAdmin):
form = FormWithVisibleAndHiddenField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class CityInlineAdmin(admin.TabularInline):
model = City
view_on_site = False
class StateAdmin(admin.ModelAdmin):
inlines = [CityInlineAdmin]
class RestaurantInlineAdmin(admin.TabularInline):
model = Restaurant
view_on_site = True
class CityAdmin(admin.ModelAdmin):
inlines = [RestaurantInlineAdmin]
view_on_site = True
class WorkerAdmin(admin.ModelAdmin):
def view_on_site(self, obj):
return '/worker/%s/%s/' % (obj.surname, obj.name)
class WorkerInlineAdmin(admin.TabularInline):
model = Worker
def view_on_site(self, obj):
return '/worker_inline/%s/%s/' % (obj.surname, obj.name)
class RestaurantAdmin(admin.ModelAdmin):
inlines = [WorkerInlineAdmin]
view_on_site = False
def get_changeform_initial_data(self, request):
return {'name': 'overridden_value'}
class FunkyTagAdmin(admin.ModelAdmin):
list_display = ('name', 'content_object')
class InlineReferenceInline(admin.TabularInline):
model = InlineReference
class InlineRefererAdmin(admin.ModelAdmin):
inlines = [InlineReferenceInline]
class PlotReadonlyAdmin(admin.ModelAdmin):
readonly_fields = ('plotdetails',)
class GetFormsetsArgumentCheckingAdmin(admin.ModelAdmin):
fields = ['name']
def add_view(self, request, *args, **kwargs):
request.is_add_view = True
return super().add_view(request, *args, **kwargs)
def change_view(self, request, *args, **kwargs):
request.is_add_view = False
return super().change_view(request, *args, **kwargs)
def get_formsets_with_inlines(self, request, obj=None):
if request.is_add_view and obj is not None:
raise Exception("'obj' passed to get_formsets_with_inlines wasn't None during add_view")
if not request.is_add_view and obj is None:
raise Exception("'obj' passed to get_formsets_with_inlines was None during change_view")
return super().get_formsets_with_inlines(request, obj)
site = admin.AdminSite(name="admin")
site.site_url = '/my-site-url/'
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(
Section, save_as=True, inlines=[ArticleInline],
readonly_fields=['name_property'], search_fields=['name'],
)
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(FieldOverridePost, FieldOverridePostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(PlotProxy, PlotReadonlyAdmin)
site.register(Bookmark)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
site.register(UnchangeableObject, UnchangeableObjectAdmin)
site.register(State, StateAdmin)
site.register(City, CityAdmin)
site.register(Restaurant, RestaurantAdmin)
site.register(Worker, WorkerAdmin)
site.register(FunkyTag, FunkyTagAdmin)
site.register(ReferencedByParent)
site.register(ChildOfReferer)
site.register(ReferencedByInline)
site.register(InlineReferer, InlineRefererAdmin)
site.register(ReferencedByGenRel)
site.register(GenRelReference)
site.register(ParentWithUUIDPK)
site.register(RelatedPrepopulated, search_fields=['name'])
site.register(RelatedWithUUIDPKModel)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four paths through
# contrib.admin.utils's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(ReadOnlyPizza, ReadOnlyPizzaAdmin)
site.register(ReadablePizza)
site.register(Topping, ToppingAdmin)
site.register(Album, AlbumAdmin)
site.register(Question, QuestionAdmin)
site.register(Answer, AnswerAdmin, date_hierarchy='question__posted')
site.register(Answer2, date_hierarchy='question__expires')
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(FilteredManager, CustomManagerAdmin)
site.register(PluggableSearchPerson, PluggableSearchPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
site.register(ParentWithDependentChildren, ParentWithDependentChildrenAdmin)
site.register(EmptyModelHidden, EmptyModelHiddenAdmin)
site.register(EmptyModelVisible, EmptyModelVisibleAdmin)
site.register(EmptyModelMixin, EmptyModelMixinAdmin)
site.register(StumpJoke)
site.register(Recipe)
site.register(Ingredient)
site.register(NotReferenced)
site.register(ExplicitlyProvidedPK, GetFormsetsArgumentCheckingAdmin)
site.register(ImplicitlyGeneratedPK, GetFormsetsArgumentCheckingAdmin)
site.register(UserProxy)
# Register core models we need in our tests
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
# Used to test URL namespaces
site2 = admin.AdminSite(name="namespaced_admin")
site2.register(User, UserAdmin)
site2.register(Group, GroupAdmin)
site2.register(ParentWithUUIDPK)
site2.register(
RelatedWithUUIDPKModel,
list_display=['pk', 'parent'],
list_editable=['parent'],
raw_id_fields=['parent'],
)
site2.register(Person, save_as_continue=False)
site7 = admin.AdminSite(name="admin7")
site7.register(Article, ArticleAdmin2)
site7.register(Section)
site7.register(PrePopulatedPost, PrePopulatedPostReadOnlyAdmin)
# Used to test ModelAdmin.sortable_by and get_sortable_by().
class ArticleAdmin6(admin.ModelAdmin):
list_display = (
'content', 'date', callable_year, 'model_year', 'modeladmin_year',
'model_year_reversed', 'section',
)
sortable_by = ('date', callable_year)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
class ActorAdmin6(admin.ModelAdmin):
list_display = ('name', 'age')
sortable_by = ('name',)
def get_sortable_by(self, request):
return ('age',)
class ChapterAdmin6(admin.ModelAdmin):
list_display = ('title', 'book')
sortable_by = ()
class ColorAdmin6(admin.ModelAdmin):
list_display = ('value',)
def get_sortable_by(self, request):
return ()
site6 = admin.AdminSite(name='admin6')
site6.register(Article, ArticleAdmin6)
site6.register(Actor, ActorAdmin6)
site6.register(Chapter, ChapterAdmin6)
site6.register(Color, ColorAdmin6)
class ArticleAdmin9(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
# Simulate that the user can't change a specific object.
return obj is None
site9 = admin.AdminSite(name='admin9')
site9.register(Article, ArticleAdmin9)
class ArticleAdmin10(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
return False
site10 = admin.AdminSite(name='admin10')
site10.register(Article, ArticleAdmin10)
| nesdis/djongo | tests/django_tests/tests/v22/tests/admin_views/admin.py | Python | agpl-3.0 | 34,509 |
#!/usr/bin/env python3
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
This is a Unit Test for Rule DisableCamera
@author: ekkehard j. koch
@change: 03/18/2013 Original Implementation
@change: 2016/02/10 roy Added sys.path.append for being able to unit test this
file as well as with the test harness.
'''
from re import search, escape
import unittest
import sys
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.stonix_resources.CommandHelper import CommandHelper
from src.tests.lib.logdispatcher_mock import LogPriority
from src.stonix_resources.rules.DisableCamera import DisableCamera
class zzzTestRuleDisableCamera(RuleTest):
def setUp(self):
'''@change: Breen Malmberg - 06102015 - updated self.cmd and paths to work
with updated unit test functionality
'''
RuleTest.setUp(self)
self.rule = DisableCamera(self.config,
self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.ch = CommandHelper(self.logdispatch)
self.identifier = "041AD784-F0E2-40F5-9433-08ED6B105DDA"
self.rule.camprofile = "/Users/vagrant/stonix/src/stonix_resources/" + \
"files/stonix4macCameraDisablement.mobileconfig"
self.rule.ci.updatecurrvalue(True)
def tearDown(self):
pass
def runTest(self):
self.simpleRuleTest()
def setConditionsForRule(self):
'''Configure system for the unit test
:param self: essential if you override this definition
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
@change: Breen Malmberg - 06102015 - changed this method to reflect
the new functionality of DisableCamera.py
'''
success = True
self.detailedresults = ""
cmd = ["/usr/bin/profiles", "-P"]
if not self.ch.executeCommand(cmd):
success = False
self.detailedresults += "Unable to run profiles command\n"
else:
output = self.ch.getOutput()
if output:
for line in output:
if search(escape(self.identifier), line.strip()):
cmd = ["/usr/bin/profiles", "-R", "-p", self.identifier]
if not self.ch.executeCommand(cmd):
success = False
return success
def checkReportForRule(self, pCompliance, pRuleSuccess):
'''check on whether report was correct
:param self: essential if you override this definition
:param pCompliance: the self.iscompliant value of rule
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pCompliance = " + \
str(pCompliance) + ".")
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
def checkFixForRule(self, pRuleSuccess):
'''check on whether fix was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
def checkUndoForRule(self, pRuleSuccess):
'''check on whether undo was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| CSD-Public/stonix | src/tests/rules/unit_tests/zzzTestRuleDisableCamera.py | Python | gpl-2.0 | 5,618 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import datetime
import argparse
from pyclibrary import CParser
__author__ = 'andreas.dahlberg90@gmail.com (Andreas Dahlberg)'
__version__ = '0.1.0'
class Argument(object):
"""Class representing a function argument."""
_FMT = '{}{}{}{}'
def __init__(self, c_type, name, pointer, const):
self.c_type = c_type
self.name = name
self.pointer = pointer
self.const = const
def to_string(self):
if self.name:
name = ' ' + self.name
else:
name = ''
if self.pointer:
pointer = '*'
else:
pointer = ''
if self.const:
const = 'const '
else:
const = ''
return Argument._FMT.format(const, self.c_type, pointer, name)
class Function(object):
"""Class representing a C-function."""
_FMT = '{} __wrap_{}({})'
def __init__(self, c_type, name, arguments):
self.c_type = c_type
self.name = name
self.arguments = arguments
def to_string(self):
argument_strings = [x.to_string() for x in self.arguments]
lines = []
lines.append(Function._FMT.format(self.c_type, self.name, ', '.join(argument_strings)))
lines.append('{')
if self.c_type != 'void':
lines.append(' mock_type({});'.format(self.c_type))
lines.append('}')
return '\r\n'.join(lines)
class HeaderParser(object):
"""Parser"""
def __init__(self, file_name):
self.parser = CParser([file_name])
def get_functions(self):
"""Get a list with all function objects."""
function_objects = []
functions = self.parser.defs['functions']
for func_name in functions:
args = []
for arg in functions[func_name][1]:
arg_name, arg_type, _, = arg
arg_obj = Argument(
arg_type.type_spec,
arg_name,
is_pointer(arg_type),
is_const(arg_type)
)
args.append(arg_obj)
return_type = functions[func_name][0].type_spec
func_obj = Function(return_type, func_name, args)
function_objects.append(func_obj)
return function_objects
def is_pointer(arg_type):
"""Check if the argument type is a pointer."""
if len(arg_type.declarators) == 1:
return arg_type.declarators[0] == '*'
return False
def is_const(arg_type):
"""Check if the argument type is const."""
if len(arg_type.type_quals[0]) == 1:
return arg_type.type_quals[0][0] == 'const'
return False
def parse(args):
"""Parse header file and print the mock functions."""
header_parser = HeaderParser(args.header)
functions = header_parser.get_functions()
c_code = '\r\n\r\n'.join([x.to_string() for x in functions])
with open(args.template) as template_file:
template = template_file.read()
now = datetime.datetime.now()
params = {
'filename': os.path.basename(args.header).split('.')[0],
'author': 'Andreas Dahlberg',
'date': now.strftime('%Y-%m-%d'),
'brief': 'Mock functions.',
'mock_functions': c_code
}
print(template.format(**params))
def main():
"""Main function handling command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('header')
parser.add_argument('template')
args = parser.parse_args()
parse(args)
return 0
if __name__ == '__main__':
exit(main())
| Andreasdahlberg/sillycat | scripts/mockgen/mockgen.py | Python | gpl-3.0 | 4,321 |
# Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest.mock import Mock, patch
from trove.flavor.views import FlavorView
from trove.tests.unittests import trove_testtools
class FlavorViewsTest(trove_testtools.TestCase):
def setUp(self):
super(FlavorViewsTest, self).setUp()
self.flavor = Mock()
self.flavor.id = 10
self.flavor.str_id = '10'
self.flavor.name = 'test_flavor'
self.flavor.ram = 512
self.links = 'my_links'
self.flavor.vcpus = '10'
self.flavor.disk = '0'
self.flavor.ephemeral = '0'
def tearDown(self):
super(FlavorViewsTest, self).tearDown()
def test_data(self):
data = [
{'flavor_id': 10,
'expected_id': 10,
'expected_str_id': '10'},
{'flavor_id': 'uuid-10',
'expected_id': None,
'expected_str_id': 'uuid-10'},
{'flavor_id': '02',
'expected_id': None,
'expected_str_id': '02'},
]
for datum in data:
flavor_id = datum['flavor_id']
expected_id = datum['expected_id']
expected_str_id = datum['expected_str_id']
msg = "Testing flavor_id: %s - " % flavor_id
self.flavor.id = flavor_id
with patch.object(FlavorView, '_build_links',
Mock(return_value=(self.links))):
view = FlavorView(self.flavor)
result = view.data()
self.assertEqual(expected_id, result['flavor']['id'],
msg + 'invalid id')
self.assertEqual(expected_str_id, result['flavor']['str_id'],
msg + 'invalid str_id')
self.assertEqual(self.flavor.name, result['flavor']['name'],
msg + 'invalid name')
self.assertEqual(self.flavor.ram, result['flavor']['ram'],
msg + 'invalid ram')
self.assertEqual(self.flavor.vcpus, result['flavor']['vcpus'],
msg + 'invalid vcpus')
self.assertEqual(self.flavor.disk, result['flavor']['disk'],
msg + 'invalid disk')
self.assertEqual(self.flavor.ephemeral,
result['flavor']['ephemeral'],
msg + 'invalid ephemeral')
self.assertEqual(self.links, result['flavor']['links'],
msg + 'invalid links')
| openstack/trove | trove/tests/unittests/flavor/test_flavor_views.py | Python | apache-2.0 | 3,188 |
"""This pipeline is intended to make the classification of ADC modality
features."""
from __future__ import division
import os
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import label_binarize
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import f_classif
from sklearn.ensemble import RandomForestClassifier
from protoclass.data_management import GTModality
# Define the path where the patients are stored
path_patients = '/data/prostate/experiments'
# Define the path where the features have been extracted
path_features = '/data/prostate/extraction/mp-mri-prostate'
# Define a list of the path where the feature are kept
adc_features = ['dct-adc', 'edge-adc/kirsch', 'edge-adc/laplacian',
'edge-adc/prewitt', 'edge-adc/scharr', 'edge-adc/sobel',
'gabor-adc', 'harlick-adc', 'ise-adc', 'lbp-adc', 'lbp-adc',
'phase-congruency-adc']
# Define the extension of each features
ext_features = ['_dct_adc.npy', '_edge_adc.npy', '_edge_adc.npy',
'_edge_adc.npy', '_edge_adc.npy', '_edge_adc.npy',
'_gabor_adc.npy', '_haralick_adc.npy', '_ise_adc.npy',
'_lbp_8_1_adc.npy', '_lbp_16_2_adc.npy',
'_phase_congruency_adc.npy']
# Define the path of the balanced data
path_balanced = '/data/prostate/balanced/mp-mri-prostate/exp-3/iht'
ext_balanced = '_adc.npz'
# Define the path of the ground for the prostate
path_gt = ['GT_inv/prostate', 'GT_inv/pz', 'GT_inv/cg', 'GT_inv/cap']
# Define the label of the ground-truth which will be provided
label_gt = ['prostate', 'pz', 'cg', 'cap']
# Generate the different path to be later treated
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
id_patient_list = sorted(id_patient_list)
for id_patient in id_patient_list:
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient, gt)
for gt in path_gt])
# Load all the data once. Splitting into training and testing will be done at
# the cross-validation time
data = []
data_bal = []
label = []
label_bal = []
for idx_pat in range(len(id_patient_list)):
print 'Read patient {}'.format(id_patient_list[idx_pat])
# For each patient we nee to load the different feature
patient_data = []
for idx_feat in range(len(adc_features)):
# Create the path to the patient file
filename_feature = (id_patient_list[idx_pat].lower().replace(' ', '_') +
ext_features[idx_feat])
path_data = os.path.join(path_features, adc_features[idx_feat],
filename_feature)
single_feature_data = np.load(path_data)
# Check if this is only one dimension data
if len(single_feature_data.shape) == 1:
single_feature_data = np.atleast_2d(single_feature_data).T
patient_data.append(single_feature_data)
# Concatenate the data in a single array
patient_data = np.concatenate(patient_data, axis=1)
print 'Imbalanced feature loaded ...'
# Load the dataset from each balancing method
pat_chg = (id_patient_list[idx_pat].lower().replace(' ', '_') +
ext_balanced)
filename = os.path.join(path_balanced, pat_chg)
npz_file = np.load(filename)
data_bal.append(npz_file['data_resampled'])
label_bal.append(npz_file['label_resampled'])
print 'Balanced data loaded ...'
# Create the corresponding ground-truth
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt,
path_patients_list_gt[idx_pat])
print 'Read the GT data for the current patient ...'
# Concatenate the training data
data.append(patient_data)
# Extract the corresponding ground-truth for the testing data
# Get the index corresponding to the ground-truth
roi_prostate = gt_mod.extract_gt_data('prostate', output_type='index')
# Get the label of the gt only for the prostate ROI
gt_cap = gt_mod.extract_gt_data('cap', output_type='data')
label.append(gt_cap[roi_prostate])
print 'Data and label extracted for the current patient ...'
percentiles = np.array([10, 12.5, 15, 17.5, 20, 22.5, 25])
results_p = []
feat_imp_p = []
for p in percentiles:
print 'Computing for percentile: {}'.format(p)
results_cv = []
feat_imp_cv = []
# Go for LOPO cross-validation
for idx_lopo_cv in range(len(id_patient_list)):
# Display some information about the LOPO-CV
print 'Round #{} of the LOPO-CV'.format(idx_lopo_cv + 1)
# Get the testing data
testing_data = data[idx_lopo_cv]
testing_label = np.ravel(label_binarize(label[idx_lopo_cv], [0, 255]))
print 'Create the testing set ...'
# Create the training data and label
# We need to take the balanced data
training_data = [arr for idx_arr, arr in enumerate(data_bal)
if idx_arr != idx_lopo_cv]
training_label = [arr for idx_arr, arr in enumerate(label_bal)
if idx_arr != idx_lopo_cv]
# Concatenate the data
training_data = np.vstack(training_data)
training_label = np.ravel(label_binarize(
np.hstack(training_label).astype(int), [0, 255]))
print 'Create the training set ...'
# Perform the classification for the current cv and the
# given configuration
# Feature selector
sel = SelectPercentile(f_classif, p)
training_data = sel.fit_transform(training_data, training_label)
testing_data = sel.transform(testing_data)
crf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
pred_prob = crf.fit(training_data, training_label).predict_proba(
testing_data)
results_cv.append([pred_prob, crf.classes_])
feat_imp_cv.append(sel.get_support(indices=True))
results_p.append(results_cv)
feat_imp_p.append(feat_imp_cv)
# Save the information
path_store = '/data/prostate/results/mp-mri-prostate/exp-3/selection-extraction/anova/adc'
if not os.path.exists(path_store):
os.makedirs(path_store)
joblib.dump(results_p, os.path.join(path_store,
'results.pkl'))
joblib.dump(feat_imp_p, os.path.join(path_store,
'feat_imp.pkl'))
| I2Cvb/mp-mri-prostate | pipeline/feature-classification/exp-3/selection-extraction/anova/pipeline_classifier_adc.py | Python | mit | 6,570 |
try:
from unicodedata import *
except ImportError:
def category(ch):
"""
ASCII only implementation
"""
if type(ch) is not unicode:
raise TypeError
if len(ch) != 1:
raise TypeError
return _categories.get(ord(ch), 'Co') # "Other, private use"
_categories = {
0: 'Cc', 1: 'Cc', 2: 'Cc', 3: 'Cc', 4: 'Cc', 5: 'Cc',
6: 'Cc', 7: 'Cc', 8: 'Cc', 9: 'Cc', 10: 'Cc', 11: 'Cc',
12: 'Cc', 13: 'Cc', 14: 'Cc', 15: 'Cc', 16: 'Cc', 17: 'Cc',
18: 'Cc', 19: 'Cc', 20: 'Cc', 21: 'Cc', 22: 'Cc', 23: 'Cc',
24: 'Cc', 25: 'Cc', 26: 'Cc', 27: 'Cc', 28: 'Cc', 29: 'Cc',
30: 'Cc', 31: 'Cc', 32: 'Zs', 33: 'Po', 34: 'Po', 35: 'Po',
36: 'Sc', 37: 'Po', 38: 'Po', 39: 'Po', 40: 'Ps', 41: 'Pe',
42: 'Po', 43: 'Sm', 44: 'Po', 45: 'Pd', 46: 'Po', 47: 'Po',
48: 'Nd', 49: 'Nd', 50: 'Nd', 51: 'Nd', 52: 'Nd', 53: 'Nd',
54: 'Nd', 55: 'Nd', 56: 'Nd', 57: 'Nd', 58: 'Po', 59: 'Po',
60: 'Sm', 61: 'Sm', 62: 'Sm', 63: 'Po', 64: 'Po', 65: 'Lu',
66: 'Lu', 67: 'Lu', 68: 'Lu', 69: 'Lu', 70: 'Lu', 71: 'Lu',
72: 'Lu', 73: 'Lu', 74: 'Lu', 75: 'Lu', 76: 'Lu', 77: 'Lu',
78: 'Lu', 79: 'Lu', 80: 'Lu', 81: 'Lu', 82: 'Lu', 83: 'Lu',
84: 'Lu', 85: 'Lu', 86: 'Lu', 87: 'Lu', 88: 'Lu', 89: 'Lu',
90: 'Lu', 91: 'Ps', 92: 'Po', 93: 'Pe', 94: 'Sk', 95: 'Pc',
96: 'Sk', 97: 'Ll', 98: 'Ll', 99: 'Ll', 100: 'Ll', 101: 'Ll',
102: 'Ll', 103: 'Ll', 104: 'Ll', 105: 'Ll', 106: 'Ll', 107: 'Ll',
108: 'Ll', 109: 'Ll', 110: 'Ll', 111: 'Ll', 112: 'Ll', 113: 'Ll',
114: 'Ll', 115: 'Ll', 116: 'Ll', 117: 'Ll', 118: 'Ll', 119: 'Ll',
120: 'Ll', 121: 'Ll', 122: 'Ll', 123: 'Ps', 124: 'Sm', 125: 'Pe',
126: 'Sm', 127: 'Cc', 128: 'Cc', 129: 'Cc', 130: 'Cc', 131: 'Cc',
132: 'Cc', 133: 'Cc', 134: 'Cc', 135: 'Cc', 136: 'Cc', 137: 'Cc',
138: 'Cc', 139: 'Cc', 140: 'Cc', 141: 'Cc', 142: 'Cc', 143: 'Cc',
144: 'Cc', 145: 'Cc', 146: 'Cc', 147: 'Cc', 148: 'Cc', 149: 'Cc',
150: 'Cc', 151: 'Cc', 152: 'Cc', 153: 'Cc', 154: 'Cc', 155: 'Cc',
156: 'Cc', 157: 'Cc', 158: 'Cc', 159: 'Cc', 160: 'Zs', 161: 'Po',
162: 'Sc', 163: 'Sc', 164: 'Sc', 165: 'Sc', 166: 'So', 167: 'So',
168: 'Sk', 169: 'So', 170: 'Ll', 171: 'Pi', 172: 'Sm', 173: 'Cf',
174: 'So', 175: 'Sk', 176: 'So', 177: 'Sm', 178: 'No', 179: 'No',
180: 'Sk', 181: 'Ll', 182: 'So', 183: 'Po', 184: 'Sk', 185: 'No',
186: 'Ll', 187: 'Pf', 188: 'No', 189: 'No', 190: 'No', 191: 'Po',
192: 'Lu', 193: 'Lu', 194: 'Lu', 195: 'Lu', 196: 'Lu', 197: 'Lu',
198: 'Lu', 199: 'Lu', 200: 'Lu', 201: 'Lu', 202: 'Lu', 203: 'Lu',
204: 'Lu', 205: 'Lu', 206: 'Lu', 207: 'Lu', 208: 'Lu', 209: 'Lu',
210: 'Lu', 211: 'Lu', 212: 'Lu', 213: 'Lu', 214: 'Lu', 215: 'Sm',
216: 'Lu', 217: 'Lu', 218: 'Lu', 219: 'Lu', 220: 'Lu', 221: 'Lu',
222: 'Lu', 223: 'Ll', 224: 'Ll', 225: 'Ll', 226: 'Ll', 227: 'Ll',
228: 'Ll', 229: 'Ll', 230: 'Ll', 231: 'Ll', 232: 'Ll', 233: 'Ll',
234: 'Ll', 235: 'Ll', 236: 'Ll', 237: 'Ll', 238: 'Ll', 239: 'Ll',
240: 'Ll', 241: 'Ll', 242: 'Ll', 243: 'Ll', 244: 'Ll', 245: 'Ll',
246: 'Ll', 247: 'Sm', 248: 'Ll', 249: 'Ll', 250: 'Ll', 251: 'Ll',
252: 'Ll', 253: 'Ll', 254: 'Ll'
}
| jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/pyrepl/unicodedata_.py | Python | mit | 3,391 |
""""ML-ENSEMBLE
Testing suite for Layer and Transformer
"""
from mlens.testing import Data, EstimatorContainer, get_layer, run_layer
def test_fit():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test fit"""
args = get_layer('fit', 'multiprocessing', 'temporal', False, False, window=2, step_size=3)
run_layer(*args)
def test_predict():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test predict"""
args = get_layer('predict', 'multiprocessing', 'temporal', False, False, window=2, step_size=3)
run_layer(*args)
def test_transform():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test transform"""
args = get_layer('transform', 'multiprocessing', 'temporal', False, False, window=2, step_size=3)
run_layer(*args)
def test_fit_prep():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | Prep] test fit"""
args = get_layer('fit', 'multiprocessing', 'temporal', False, True, window=2, step_size=3)
run_layer(*args)
def test_predict_prep():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | Prep] test predict"""
args = get_layer('predict', 'multiprocessing', 'temporal', False, True, window=2, step_size=3)
run_layer(*args)
def test_transform_prep():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | Prep] test transform"""
args = get_layer('transform', 'multiprocessing', 'temporal', False, True, window=2, step_size=3)
run_layer(*args)
def test_fit_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | No Prep] test fit"""
args = get_layer('fit', 'multiprocessing', 'temporal', True, False, window=2, step_size=3)
run_layer(*args)
def test_predict_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | No Prep] test predict"""
args = get_layer('predict', 'multiprocessing', 'temporal', True, False, window=2, step_size=3)
run_layer(*args)
def test_transform_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | No Prep] test transform"""
args = get_layer('transform', 'multiprocessing', 'temporal', True, False, window=2, step_size=3)
run_layer(*args)
def test_fit_prep_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | Prep] test fit"""
args = get_layer('fit', 'multiprocessing', 'temporal', True, True, window=2, step_size=3)
run_layer(*args)
def test_predict_prep_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | No Prep] test predict"""
args = get_layer('predict', 'multiprocessing', 'temporal', True, True, window=2, step_size=3)
run_layer(*args)
def test_transform_prep_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | Prep] test transform"""
args = get_layer('transform', 'multiprocessing', 'temporal', True, True, window=2, step_size=3)
run_layer(*args)
def test_fit_fp():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test feature prop fit"""
args = get_layer('fit', 'multiprocessing', 'temporal', False, False, feature_prop=2, window=2, step_size=3)
run_layer(*args)
def test_predict_fp():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test feature prop predict"""
args = get_layer('predict', 'multiprocessing', 'temporal', False, False, feature_prop=2, window=2, step_size=3)
run_layer(*args)
def test_transform_fp():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test feature prop transform"""
args = get_layer('transform', 'multiprocessing', 'temporal', False, False, feature_prop=2, window=2, step_size=3)
run_layer(*args)
| flennerhag/mlens | mlens/parallel/tests/test_b3_layer_temporal.py | Python | mit | 3,745 |
#!/usr/bin/env python3
"""Functions for reading and processing estimator files.
Examples are temperatures, populations, and heating/cooling rates.
"""
# import math
import argparse
import math
import multiprocessing
import os
# import re
import sys
from collections import namedtuple
from functools import lru_cache, partial, reduce
# from itertools import chain
from pathlib import Path
import numpy as np
import pandas as pd
import scipy.signal
from astropy import constants as const
import artistools as at
import artistools.nltepops
variableunits = {
'time': 'days',
'TR': 'K',
'Te': 'K',
'TJ': 'K',
'nne': 'e-/cm3',
'heating': 'erg/s/cm3',
'heating_dep/total_dep': 'Ratio',
'cooling': 'erg/s/cm3',
'velocity': 'km/s',
'velocity_outer': 'km/s',
}
variablelongunits = {
'TR': 'Temperature [K]',
'Te': 'Temperature [K]',
'TJ': 'Temperature [K]',
}
dictlabelreplacements = {
'lognne': 'Log nne',
'Te': 'T$_e$',
'TR': 'T$_R$'
}
def apply_filters(xlist, ylist, args):
filterfunc = at.get_filterfunc(args)
if filterfunc is not None:
ylist = filterfunc(ylist)
return xlist, ylist
def get_ionrecombrates_fromfile(filename):
"""WARNING: copy pasted from artis-atomic! replace with a package import soon ionstage is the lower ion stage."""
print(f'Reading {filename}')
header_row = []
with open(filename, 'r') as filein:
while True:
line = filein.readline()
if line.strip().startswith('TOTAL RECOMBINATION RATE'):
line = filein.readline()
line = filein.readline()
header_row = filein.readline().strip().replace(' n)', '-n)').split()
break
if not header_row:
print("ERROR: no header found")
sys.exit()
index_logt = header_row.index('log(T)')
index_low_n = header_row.index('RRC(low-n)')
index_tot = header_row.index('RRC(total)')
recomb_tuple = namedtuple("recomb_tuple", ['logT', 'RRC_low_n', 'RRC_total'])
records = []
for line in filein:
row = line.split()
if row:
if len(row) != len(header_row):
print('Row contains wrong number of items for header:')
print(header_row)
print(row)
sys.exit()
records.append(recomb_tuple(
*[float(row[index]) for index in [index_logt, index_low_n, index_tot]]))
dfrecombrates = pd.DataFrame.from_records(records, columns=recomb_tuple._fields)
return dfrecombrates
def get_units_string(variable):
if variable in variableunits:
return f' [{variableunits[variable]}]'
elif variable.split('_')[0] in variableunits:
return f' [{variableunits[variable.split("_")[0]]}]'
return ''
def get_ylabel(variable):
if variable in variablelongunits:
return variablelongunits[variable]
elif variable in variableunits:
return f'[{variableunits[variable]}]'
elif variable.split('_')[0] in variableunits:
return f'[{variableunits[variable.split("_")[0]]}]'
return ''
def parse_estimfile(estfilepath, modelpath, get_ion_values=True, get_heatingcooling=True):
"""Generate timestep, modelgridindex, dict from estimator file."""
# itstep = at.get_inputparams(modelpath)['itstep']
with at.zopen(estfilepath, 'rt') as estimfile:
timestep = -1
modelgridindex = -1
estimblock = {}
for line in estimfile:
row = line.split()
if not row:
continue
if row[0] == 'timestep':
# yield the previous block before starting a new one
if timestep >= 0 and modelgridindex >= 0:
yield timestep, modelgridindex, estimblock
timestep = int(row[1])
# if timestep > itstep:
# print(f"Dropping estimator data from timestep {timestep} and later (> itstep {itstep})")
# # itstep in input.txt is updated by ARTIS at every timestep, so the data beyond here
# # could be half-written to disk and cause parsing errors
# return
modelgridindex = int(row[3])
# print(f'Timestep {timestep} cell {modelgridindex}')
estimblock = {}
emptycell = (row[4] == 'EMPTYCELL')
estimblock['emptycell'] = emptycell
if not emptycell:
# will be TR, Te, W, TJ, nne
for variablename, value in zip(row[4::2], row[5::2]):
estimblock[variablename] = float(value)
estimblock['lognne'] = math.log10(estimblock['nne']) if estimblock['nne'] > 0 else float('-inf')
elif row[1].startswith('Z=') and get_ion_values:
variablename = row[0]
if row[1].endswith('='):
atomic_number = int(row[2])
startindex = 3
else:
atomic_number = int(row[1].split('=')[1])
startindex = 2
estimblock.setdefault(variablename, {})
for ion_stage_str, value in zip(row[startindex::2], row[startindex + 1::2]):
if ion_stage_str.strip() in ['SUM:', '(or']:
continue
try:
ion_stage = int(ion_stage_str.rstrip(':'))
except ValueError:
print(f'Cannot parse row: {row}')
continue
value_thision = float(value.rstrip(','))
estimblock[variablename][(atomic_number, ion_stage)] = value_thision
if variablename in ['Alpha_R*nne', 'AlphaR*nne']:
estimblock.setdefault('Alpha_R', {})
estimblock['Alpha_R'][(atomic_number, ion_stage)] = value_thision / estimblock['nne']
else: # variablename == 'populations':
# contribute the ion population to the element population
estimblock[variablename].setdefault(atomic_number, 0.)
estimblock[variablename][atomic_number] += value_thision
if variablename == 'populations':
# contribute the element population to the total population
estimblock['populations'].setdefault('total', 0.)
estimblock['populations']['total'] += estimblock['populations'][atomic_number]
estimblock['nntot'] = estimblock['populations']['total']
elif row[0] == 'heating:' and get_heatingcooling:
for heatingtype, value in zip(row[1::2], row[2::2]):
key = 'heating_' + heatingtype if not heatingtype.startswith('heating_') else heatingtype
estimblock[key] = float(value)
if 'heating_gamma/gamma_dep' in estimblock and estimblock['heating_gamma/gamma_dep'] > 0:
estimblock['gamma_dep'] = (
estimblock['heating_gamma'] /
estimblock['heating_gamma/gamma_dep'])
elif 'heating_dep/total_dep' in estimblock and estimblock['heating_dep/total_dep'] > 0:
estimblock['total_dep'] = (
estimblock['heating_dep'] /
estimblock['heating_dep/total_dep'])
elif row[0] == 'cooling:' and get_heatingcooling:
for coolingtype, value in zip(row[1::2], row[2::2]):
estimblock['cooling_' + coolingtype] = float(value)
# reached the end of file
if timestep >= 0 and modelgridindex >= 0:
yield timestep, modelgridindex, estimblock
@at.diskcache(ignorekwargs=['printfilename'], quiet=False, funcdepends=parse_estimfile, savegzipped=True)
def read_estimators_from_file(modelpath, folderpath, arr_velocity_outer, mpirank, printfilename=False,
get_ion_values=True, get_heatingcooling=True):
estimators_thisfile = {}
estimfilename = f'estimators_{mpirank:04d}.out'
estfilepath = Path(folderpath, estimfilename)
if not estfilepath.is_file():
estfilepath = Path(folderpath, estimfilename + '.gz')
if not estfilepath.is_file():
print(f'Warning: Could not find {estfilepath.relative_to(modelpath.parent)}')
return {}
if printfilename:
filesize = Path(estfilepath).stat().st_size / 1024 / 1024
print(f'Reading {estfilepath.relative_to(modelpath.parent)} ({filesize:.2f} MiB)')
for fileblock_timestep, fileblock_modelgridindex, file_estimblock in parse_estimfile(
estfilepath, modelpath, get_ion_values=get_ion_values, get_heatingcooling=get_heatingcooling):
file_estimblock['velocity_outer'] = arr_velocity_outer[fileblock_modelgridindex]
file_estimblock['velocity'] = file_estimblock['velocity_outer']
estimators_thisfile[(fileblock_timestep, fileblock_modelgridindex)] = file_estimblock
return estimators_thisfile
@lru_cache(maxsize=16)
@at.diskcache(savegzipped=True, funcdepends=[read_estimators_from_file, parse_estimfile])
def read_estimators(modelpath, modelgridindex=None, timestep=None, get_ion_values=True, get_heatingcooling=True):
"""Read estimator files into a nested dictionary structure.
Speed it up by only retrieving estimators for a particular timestep(s) or modelgrid cells.
"""
if modelgridindex is None:
match_modelgridindex = []
elif hasattr(modelgridindex, '__iter__'):
match_modelgridindex = tuple(modelgridindex)
else:
match_modelgridindex = (modelgridindex,)
if -1 in match_modelgridindex:
match_modelgridindex = []
if timestep is None:
match_timestep = []
else:
match_timestep = tuple(timestep) if hasattr(timestep, '__iter__') else (timestep,)
# print(f" matching cells {match_modelgridindex} and timesteps {match_timestep}")
modeldata, _, _ = at.inputmodel.get_modeldata(modelpath)
arr_velocity_outer = tuple(list([float(v) for v in modeldata['velocity_outer'].values]))
mpiranklist = at.get_mpiranklist(modelpath, modelgridindex=match_modelgridindex)
printfilename = len(mpiranklist) < 10
estimators = {}
for folderpath in at.get_runfolders(modelpath, timesteps=match_timestep):
print(f'Reading {len(list(mpiranklist))} estimator files in {folderpath.relative_to(Path(modelpath).parent)}')
processfile = partial(read_estimators_from_file, modelpath, folderpath, arr_velocity_outer,
get_ion_values=get_ion_values, get_heatingcooling=get_heatingcooling,
printfilename=printfilename)
if at.num_processes > 1:
with multiprocessing.Pool(processes=at.num_processes) as pool:
arr_rankestimators = pool.map(processfile, mpiranklist)
pool.close()
pool.join()
pool.terminate()
else:
arr_rankestimators = [processfile(rank) for rank in mpiranklist]
for mpirank, estimators_thisfile in zip(mpiranklist, arr_rankestimators):
dupekeys = list(sorted([k for k in estimators_thisfile if k in estimators]))
for k in dupekeys:
# dropping the lowest timestep is normal for restarts. Only warn about other cases
if k[0] != dupekeys[0][0]:
filepath = Path(folderpath, f'estimators_{mpirank:04d}.out')
print(f'WARNING: Duplicate estimator block for (timestep, mgi) key {k}. '
f'Dropping block from {filepath}')
del estimators_thisfile[k]
estimators.update(estimators_thisfile)
return estimators
def get_averaged_estimators(modelpath, estimators, timesteps, modelgridindex, keys, avgadjcells=0):
"""Get the average of estimators[(timestep, modelgridindex)][keys[0]]...[keys[-1]] across timesteps."""
if isinstance(keys, str):
keys = [keys]
# reduce(lambda d, k: d[k], keys, dictionary) returns dictionary[keys[0]][keys[1]]...[keys[-1]]
# applying all keys in the keys list
# if single timestep, no averaging needed
if not hasattr(timesteps, '__iter__'):
return reduce(lambda d, k: d[k], [(timesteps, modelgridindex)] + keys, estimators)
firsttimestepvalue = reduce(lambda d, k: d[k], [(timesteps[0], modelgridindex)] + keys, estimators)
if isinstance(firsttimestepvalue, dict):
dictout = {k: get_averaged_estimators(modelpath, estimators, timesteps, modelgridindex, keys + [k])
for k in firsttimestepvalue.keys()}
return dictout
else:
tdeltas = at.get_timestep_times_float(modelpath, loc='delta')
valuesum = 0
tdeltasum = 0
for timestep, tdelta in zip(timesteps, tdeltas):
for mgi in range(modelgridindex - avgadjcells, modelgridindex + avgadjcells + 1):
try:
valuesum += reduce(lambda d, k: d[k], [(timestep, mgi)] + keys, estimators) * tdelta
tdeltasum += tdelta
except KeyError:
pass
return valuesum / tdeltasum
# except KeyError:
# if (timestep, modelgridindex) in estimators:
# print(f'Unknown x variable: {xvariable} for timestep {timestep} in cell {modelgridindex}')
# else:
# print(f'No data for cell {modelgridindex} at timestep {timestep}')
# print(estimators[(timestep, modelgridindex)])
# sys.exit()
def get_averageionisation(populations, atomic_number):
free_electron_weighted_pop_sum = 0.
found = False
popsum = 0
for key in populations.keys():
if isinstance(key, tuple) and key[0] == atomic_number:
found = True
ion_stage = key[1]
free_electron_weighted_pop_sum += populations[key] * (ion_stage - 1)
popsum += populations[key]
if not found:
return float('NaN')
return free_electron_weighted_pop_sum / populations[atomic_number]
def get_averageexcitation(modelpath, modelgridindex, timestep, atomic_number, ion_stage, T_exc):
import artistools.nltepops
dfnltepops = at.nltepops.read_files(modelpath, modelgridindex=modelgridindex, timestep=timestep)
adata = at.atomic.get_levels(modelpath)
ionlevels = adata.query('Z == @atomic_number and ion_stage == @ion_stage').iloc[0].levels
energypopsum = 0
ionpopsum = 0
if dfnltepops.empty:
return float('NaN')
else:
dfnltepops_ion = dfnltepops.query(
'modelgridindex==@modelgridindex and timestep==@timestep and Z==@atomic_number & ion_stage==@ion_stage')
k_b = const.k_B.to('eV / K').value
ionpopsum = dfnltepops_ion.n_NLTE.sum()
energypopsum = dfnltepops_ion[dfnltepops_ion.level >= 0].eval(
'@ionlevels.iloc[level].energy_ev.values * n_NLTE').sum()
try:
superlevelrow = dfnltepops_ion[dfnltepops_ion.level < 0].iloc[0]
levelnumber_sl = dfnltepops_ion.level.max() + 1
energy_boltzfac_sum = ionlevels.iloc[levelnumber_sl:].eval(
'energy_ev * g * exp(- energy_ev / @k_b / @T_exc)').sum()
boltzfac_sum = ionlevels.iloc[levelnumber_sl:].eval('g * exp(- energy_ev / @k_b / @T_exc)').sum()
# adjust to the actual superlevel population from ARTIS
energypopsum += energy_boltzfac_sum * superlevelrow.n_NLTE / boltzfac_sum
except IndexError:
# no superlevel
pass
return energypopsum / ionpopsum
| lukeshingles/artistools | artistools/estimators/estimators.py | Python | mit | 15,906 |
#!/usr/bin/env python
"""
Node managment
WORK IN PROGRESS
Some command may not work yet
"""
__author__ = "Peter Shipley"
import ISY
# from ISY.IsyExceptionClass import IsyError
class cmdException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
verbose=0
commands_help = {
"LINK, DISCOVER" : "place PLM into discover mode",
"MV, RENAME" : "rename node",
"RM, DEL, DELETE" : "delete node",
"RESTORE" : "Restore node settings",
"MD, NEWFOLDER, MKDIR" : "create new node folder",
"NODE" : "send command to node",
"RMDIR*" : "delete node folder",
"ENABLE" : "enable node",
"DISABLE" : "disable node",
"LIST, LS" : "list nodes",
"SCENE" : "add or delete node from scene",
"FOLDER*" : "add or delete node from folder",
"DEBUGLEVEL*" : "set/get ISY debug level",
"REBOOT*" : "Reboot ISY",
"HELP" : "print command list",
"VERBOSE" : "set verbose",
"ERROR" : "print last ISY error",
"EXIT" : "exit program",
}
scene_commands_help = {
"DEL, DELETE" : "Delete Scene",
"RM, REMOVE" : "Remove Node from scene",
"ADD" : "Add node to scene",
"NEW" : "Create new scene",
"LS, LIST" : "List Scenes"
}
folder_commands_help = {
}
prog_commands_help = {
}
def doit(isy):
interactive = False
argv = isy.unknown_args[:]
if len(argv) == 0:
print "Entering interactive mode"
import shlex
interactive = True
while 1:
try:
if interactive is True:
print "isylib> ",
argv = shlex.split(raw_input())
if argv is None or len(argv) == 0:
continue
run_comm(isy, argv)
except EOFError:
interactive = False
break
except cmdException as e:
print e
except ISY.IsyError as e:
print "IsyError :"
print "\t", e
finally:
if interactive is not True:
break
def run_comm(isy, argv):
global verbose
cmd = argv.pop(0).upper()
if cmd in ["LINK", "DISCOVER"]:
link_nodes(isy, cmd, argv)
elif cmd in [ "NODE" ]:
do_node(isy, cmd, argv)
elif cmd in ["SCENE"]:
do_scene(isy, cmd, argv)
elif cmd in [ "PROG" ]:
do_prog(isy, cmd, argv)
elif cmd in ["FOLDER", "DIR"]:
do_folder(isy, cmd, argv)
# the following are shortcuts
elif cmd in [ "LS", "LIST" ]:
do_list_node(isy, cmd, argv)
elif cmd in [ "RM", "DEL", "DELETE"]:
do_del_node(isy, cmd, argv)
elif cmd in ["MV", "RENAME"]:
do_rename_nodes(isy ,cmd, argv)
elif cmd in ["RESTORE"]:
do_restore(isy, cmd, argv)
elif cmd in ["MD", "NEWFOLDER", "MKDIR"]:
if ( len(argv) > 0 and argv[0] != "?" ):
foldername = argv.pop(0)
print "newfolder {!s}".format(foldername)
else:
raise cmdException("Syntax :\n\t{!s} <foldername>".format(cmd))
elif cmd in ["RMDIR"]:
pass
elif cmd in ["ENABLE", "EN" ]:
do_node_enable(isy, cmd, argv)
elif cmd in ["DISABLE","DIS"]:
if ( len(argv) > 0 and argv[0] != "?" ):
nodeid = argv.pop(0)
isy.node_enable(nodeid, enable=0)
else:
raise cmdException("Syntax :\n\t{!s} <node_id>".format(cmd))
# The following are debug and maintance
elif cmd in ["ERROR", "ERR"]:
# print last ISY error
pass
elif cmd in ["VERBOSE"]:
if ( len(argv) > 0 ):
if (argv[0] == "?"):
raise cmdException("Syntax :\n\t{!s} [level]".format(cmd))
else:
verbose = int(argv[0])
print "verbose = ", verbose
elif cmd in ["REBOOT"]:
do_reboot(isy)
elif cmd in ["DEBUGLEVEL", "DBG"]:
do_debuglevel(isy)
elif cmd in ["HELP", "?"]:
print_cmds()
elif cmd in ["EXIT"]:
if ( len(argv) > 0 and argv[0] != "?"):
raise cmdException("Syntax :\n\t{!s}".format(cmd))
else:
exit(0)
# DEBUG
elif cmd in ["TEST"]:
if ( len(argv) > 0 and argv[0] != "?"):
do_test(isy, cmd, argv)
else:
raise cmdException("+\t{!s} <node_id>".format(cmd))
else:
print "Unknown command : ", cmd # str(" ").join(argv)
#
# TODO deal with MV node Folder
#
def do_rename_nodes(isy, cmd, argv):
"""
rename node glue
"""
if ( len(argv) > 1 and argv[0] != "?" ):
old = argv.pop(0)
new = argv.pop(0)
print cmd, old, new
isy.rename(old, new)
else:
raise cmdException("Syntax :\n\t{!s} <node_id> <new_name>".format(cmd))
# DEBUG
def do_test(isy, cmd, argv):
if len(argv) == 1:
raise cmdException("Missing Arg:\n\t{!s} <node_id> <new_name>".format(cmd))
else:
print "TEST ", str(", ").join(argv)
def link_nodes(isy, cmd, argv):
"""
Link mode glue
"""
if len(argv) == 0:
do_interactive_link(isy)
cmd = argv.pop(0).upper()
if cmd in [ "START" ]:
isy.node_discover_start()
elif cmd in [ "STOP" ]:
isy.node_discover_stop()
elif cmd in [ "?" ]:
raise cmdException("Syntax :\n\tLINK [START|STOP]\n"
+ "\tPlace PLM into discovery mode\n" )
exit(0)
def do_interactive_link(isy):
isy.load_nodes()
old_node_set = set(isy.node_addrs())
print "Entering Linking Mode"
isy.node_discover()
raw_input("Press Enter to continue...")
isy.node_discover_cancel()
print "Exited Linking Mode"
isy.load_nodes(reload=1)
updated_node_set = set(isy.node_addrs() )
new_node_set = updated_node_set - old_node_set
print "New Nodes : ", str(", ").join(new_node_set)
exit(0)
def do_del_node(isy, cmd, argv):
"""
Delete node glue
"""
if ( len(argv) == 0 or argv[0] == '?' or len(argv) > 1):
raise cmdException("Syntax :\n\t{!s} <node_id>".format(cmd))
nodeid = argv.pop(0)
print "isy.node_del(nodeid)"
def do_restore(isy, cmd, argv):
"""
restore node glue
"""
if ( len(argv) > 0 and argv[0] != "?" ):
nodeid = argv.pop(0)
if nodeid.upper() == "ALL":
print "isy.node_restore_all(nodeid)"
else :
print "isy.node_restore(nodeid)"
else:
raise cmdException("Syntax :\n\t{!s} <node_id>\n\tto restore all nodes, use 'ALL' as node_id\n".format(cmd))
def do_node_enable(isy, cmd, argv):
if ( len(argv) > 0 and argv[0] != "?" ):
nodeid = argv.pop(0)
isy.node_enable(nodeid, enable=1)
else:
raise cmdException("Syntax :\n\t{!s} <node_id>".format(cmd))
def do_node(isy, cmd, argv):
node_cmd={
"ENABLE" : do_node_enable,
"LS" : do_list_node,
"LIST" : do_list_node,
"MV" : do_rename_nodes,
"RENAME" : do_rename_nodes,
"DEL" : do_del_node,
"RESTORE" : do_restore,
"ON" : do_node_on,
"OFF" : do_node_off,
}
if ( len(argv) == 0 or ( len(argv) > 0 and argv[0] == "?") ):
cmdlist = ", ".join( node_cmd.keys() )
raise cmdException("Syntax :\n\t{!s} <command> [node_id]\n\tAvalible commands :\n\t\t{!s}\n".format(cmd, cmdlist))
subcmd = argv.pop(0).upper()
if subcmd in node_cmd:
node_cmd[subcmd](isy, subcmd, argv)
def do_nodes(isy, cmd, argv):
if ( len(argv) == 0 or ( len(argv) > 0 and argv[0] == "?") ):
raise cmdException("Syntax :\n\t{!s} <node_id>\n".format(cmd))
subcmd = argv.pop(0).upper()
if subcmd in [ "ENABLE" ]:
do_node_enable(isy, subcmd, argv)
elif subcmd in [ "LS", "LIST" ]:
do_list_node(isy, subcmd, argv, nodetype=("node"))
elif subcmd in ["MV", "RENAME"]:
do_rename_nodes(isy, subcmd, argv)
elif cmd in [ "RM", "DEL", "DELETE"]:
do_del_node(isy, subcmd, argv)
elif cmd in ["RESTORE"]:
do_restore(isy, subcmd, argv)
elif cmd in ["ON"]:
nodeid = argv.pop(0)
isy.node_comm( nodeid, "ON")
elif cmd in ["OFF"]:
nodeid = argv.pop(0)
isy.node_comm( nodeid, "ON")
else:
raise cmdException("Syntax :\n\t{!s} cmd <node_id>\n".format(cmd))
def do_node_on(isy, cmd, argv):
nodeid = argv.pop(0)
print "TURN", nodeid, "ON"
isy.node_comm( nodeid, "ON")
def do_node_off(isy, cmd, argv):
nodeid = argv.pop(0)
print "TURN", nodeid, "OFF"
isy.node_comm( nodeid, "OFF")
def do_prog(isy, cmd, argv):
pass
def do_folder(isy, cmd, argv):
pass
def do_scene(isy, cmd, argv):
if ( len(argv) == 0 or ( len(argv) > 0 and argv[0] == "?") ):
print_cmds(scene_commands_help)
raise cmdException("Syntax :\n\t{!s} cmd <scene_id>\n".format(cmd))
subcmd = argv.pop(0).upper()
if subcmd in ["ADD", "DELETE", "DEL", "RM" ]:
do_scene_add(isy, subcmd, argv)
elif subcmd in ["NEW"]:
do_scene_new(isy, subcmd, argv)
elif subcmd in ["LS", "LIST"]:
do_list_node(isy, subcmd, argv, nodetype=("scene"))
else:
raise cmdException("Syntax :\n\t{!s} cmd <scene_id>\n".format(cmd))
def do_scene_new(isy, cmd, argv):
if ( len(argv) == 0 or argv[0] == "?" or len(argv) > 1 ):
raise cmdException("Syntax :\n\tSCENE NEW <scene_id>\n".format(cmd))
sceneid = argv.pop(0)
r = isy.scene_new(sname=sceneid)
def do_scene_add(isy, cmd, argv):
"""
add/del node to/from scene glue
create new scene/group glue
"""
if ( len(argv) == 0 or argv[0] == "?" or len(argv) < 3 ):
print "1"
op = "ERR"
else:
op = "ADD"
nflag=0x10
op = argv.pop(0).upper()
if op in ["ADD", "DELETE", "DEL", "RM" ]:
nodeid = argv.pop(0)
sceneid = argv.pop(0)
else:
op = "ERR"
# print "a do_scene", op, nodeid, "sceneid", nflag, "argv=", str(",").join(argv)
if len(argv) > 0:
optflag = argv.pop(0).upper()
if optflag in [ "CONTROLLER", "0X10", "16" ] or optflag.startswith("CON"):
nflag=0x10
elif optflag in [ "RESPONDER", "0X20", "32" ] or optflag.startswith("RES"):
nflag=0x20
else:
op = "ERR"
if op in [ "ADD" ]:
# isy.scene_add_node( sceneid, nodeid, nflag)
print "isy.scene_add_node", sceneid, nodeid, nflag
if op in [ "DELETE", "DEL", "RM" ]:
# isy.scene_del_node( sceneid, nodeid)
print "isy.scene_del_node", sceneid, nodeid
else:
raise cmdException("Syntax :\n\t{!s} [ADD|DEL] <scene_id> <node_id> [controller|responder]\n".format(cmd))
def do_list_node(isy, cmd, argv, nodetype=None):
"""
list node glue
"""
# "nodetype", ("node", "scene")
if ( len(argv) > 0 and argv[0] == "?" ):
raise cmdException("Syntax :\n\t{!s} [-l]".format(cmd))
if nodetype is None:
nodetype = ("node", "scene")
if len(argv) > 0 and argv[0] == "-l":
pfmt = "{:<22} {:>12}\t{:<12}{!s:<12} {!s:}"
else:
pfmt = "{:<22} {:>12}\t{:<12}{!s:<12}"
# see isy_nodes.py
if len(argv) > 0:
nodeid = argv.pop(0)
node = isy.get_node(nodeid)
print node.name, node.address, node.formatted, node.enabled, node.ramprate
else:
print(pfmt.format("Node Name", "Address", "Status", "Enabled", "Path"))
print(pfmt.format("---------", "-------", "------", "------", "----"))
for nod in isy.node_iter(nodetype=nodetype):
if nod.objtype == "scene":
print(pfmt.format(nod.name, nod.address, "-", "-", "-"))
else:
print(pfmt.format(nod.name, nod.address,
nod.formatted, nod.enabled, nod.path))
def do_debuglevel(isy):
pass
def do_reboot(isy):
pass
# ask "are you sure ?"
# isy.reboot(isy)
def print_cmds(cmd_list=commands_help):
for k, v in cmd_list.items():
print " {!s:<22} :\t{!s}".format(k, v)
print "\nFor more detail on command run command with arg '?'"
print "\n* == may not be implemented\n"
if __name__ == '__main__':
myisy = ISY.Isy(parsearg=1, faststart=1) # debug=0x80
doit(myisy)
exit(0)
| evilpete/ISYlib-python | bin/isy_manage_node.py | Python | bsd-2-clause | 12,400 |
from prospector.suppression import get_suppressions
def filter_messages(relative_filepaths, root, messages):
"""
This method post-processes all messages output by all tools, in order to filter
out any based on the overall output.
The main aim currently is to use information about messages suppressed by
pylint due to inline comments, and use that to suppress messages from other
tools representing the same problem.
For example:
import banana # pylint:disable=unused-import
In this situation, pylint will not warn about an unused import as there is
inline configuration to disable the warning. Pyflakes will still raise that
error, however, because it does not understand pylint disabling messages.
This method uses the information about suppressed messages from pylint to
squash the unwanted redundant error from pyflakes and frosted.
"""
paths_to_ignore, lines_to_ignore, messages_to_ignore = get_suppressions(relative_filepaths, root, messages)
filtered = []
for message in messages:
# first get rid of the pylint informational messages
if message.source == 'pylint' and message.code in ('suppressed-message',):
continue
# some files are skipped entirely by messages
if message.location.path in paths_to_ignore:
continue
# some lines are skipped entirely by messages
if message.location.path in lines_to_ignore:
if message.location.line in lines_to_ignore[message.location.path]:
continue
# and some lines have only certain messages explicitly ignored
if message.location.path in messages_to_ignore:
if message.location.line in messages_to_ignore[message.location.path]:
if message.code in messages_to_ignore[message.location.path][message.location.line]:
continue
# otherwise this message was not filtered
filtered.append(message)
return filtered
| pahaz/prospector | prospector/postfilter.py | Python | gpl-2.0 | 2,021 |
# hunspell.py -- a wrapper class for hunspell
#
# (c) 2013 Juergen Weigert jw@suse.de
# Distribute under GPL-2.0 or ask
#
# 2013-01-31, V0.1 jw - initial draught: word by word I/O
# 2013-02-01, V0.1 jw - added own _readline() to use buffering. Pythons readline()
# does single byte read()s, which is slow.
# 2013-02-02, V0.2 jw - check_words() now remembers a wordlist, pushes all out
# with an extra thread, reads back async, and reassembles.
# This is much more efficient
#
import os,subprocess,re
__VERSION__ = '0.2'
class Hunspell():
"""A pure python module to interface with hunspell.
It was written as a replacement for the hunspell module from
http://code.google.com/p/pyhunspell/, which appears to be in unmaintained.
and more difficult to use, due to lack of examples and documentation.
"""
def __init__(self, dicts=['en_US']):
self.cmd = ['hunspell', '-i', 'utf-8', '-a']
self.dicts = dicts
self.proc = None
self.attr = None
self.buffer = ''
def _start(self):
cmd = self.cmd
if self.dicts is not None and len(self.dicts):
cmd += ['-d', ','.join(self.dicts)]
try:
self.proc = subprocess.Popen(cmd, shell=False,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
except OSError as e:
self.proc = "%s failed: errno=%d %s" % (cmd, e.errno, e.strerror)
raise OSError(self.proc)
header = ''
while True:
more = self.proc.stdout.readline().rstrip()
if len(more) > 5 and more[0:5] == '@(#) ': # version line with -a
self.version = more[5:]
break
elif len(more) > 9 and more[0:9] == 'Hunspell ': # version line w/o -a
self.version = more
break
else:
header += more # stderr should be collected here. It does not work
if len(header): self.header = header
self.buffer = ''
def _readline(self):
# python readline() is horribly stupid on this pipe. It reads single
# byte, just like java did in the 1980ies. Sorry, this is not
# acceptable in 2013.
if self.proc is None:
raise Error("Hunspell_readline before _start")
while True:
idx = self.buffer.find('\n')
if idx < 0:
more = self.proc.stdout.read()
if not len(more):
r = self.buffer
self.buffer = ''
return r
self.buffer += more
else:
break
r = self.buffer[0:idx+1]
self.buffer = self.buffer[idx+1:]
return r
def _load_attr(self):
try:
p = subprocess.Popen(self.cmd + ['-D'], shell=False,
stdin=open('/dev/null'), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
except OSError as e:
raise OSError("%s failed: errno=%d %s" % (self.cmd + ['-D'], e.errno, e.strerror))
self.attr = {}
header=''
while True:
line = p.stdout.readline().rstrip()
if not len(line):
break
# AVAILABLE DICTIONARIES (path is not mandatory for -d option):
m = re.match('([A-Z]+\s[A-Z]+).*:$', line)
if m:
header = m.group(1)
self.attr[header] = []
elif len(header):
self.attr[header].append(line)
return self.attr
def dicts(self,dicts=None):
"""returns or sets the dictionaries that hunspell shall try to use"""
if dicts is not None:
self.dicts = dicts
return self.dicts
def list_dicts(self):
"""query hunspell about the available dictionaries.
Returns a key value dict where keys are short names, and values
are path names. You can pick some or all of the returned keys,
and use the list (or one) as an argument to
the next Hunspell() instance, or as an argument
to the dicts() method.
"""
if self.attr is None: self._load_attr()
r = {}
for d in self.attr['AVAILABLE DICTIONARIES']:
words = d.split('/')
r[words[-1]] = d
return r
def dict_search_path(self):
"""returns a list of pathnames, actually used by hunspell to load
spelling dictionaries from.
"""
if self.attr is None: self._load_attr()
r = []
for d in self.attr['SEARCH PATH']:
r += d.split(':')
return r
def dicts_loaded(self):
"""query the spelling dictionaries that will actually be used for
the next check_words() call.
"""
if self.attr is None: self._load_attr()
return self.attr['LOADED DICTIONARY']
def check_words(self, words):
"""takes a list of words as parameter, and checks them against the
loaded spelling dictionaries. A key value dict is returned, where
every key represents a word that was not found in the
spelling dictionaries. Values are lists of correction suggestions.
check_words() is implemented by calling the hunspell binary in pipe mode.
This is fairly robust, but not optimized for efficiency.
"""
if self.proc is None:
self._start()
childpid = os.fork()
if childpid == 0:
for w in words:
self.proc.stdin.write(("^"+w+"\n").encode('utf8'))
os._exit(0)
self.proc.stdin.close()
bad_words = {}
while True:
line = self._readline()
if len(line) == 0:
break
line = line.rstrip()
if not len(line) or line[0] in '*+-': continue
if line[0] == '#':
car = line.split(' ')
bad_words[car[1]] = [] # no suggestions
elif line[0] != '&':
print "hunspell protocoll error: '%s'" % line
continue # unknown stuff
# '& Radae 7 0: Radar, Ramada, Estrada, Prada, Rad, Roadie, Readable\n'
a = line.split(': ')
if len(a) >= 2:
car = a[0].split(' ')
cdr = a[1].split(', ')
bad_words[car[1]] = cdr
else:
print("bad hunspell reply: %s, split as %s" % (line, a))
self.proc = None
return bad_words
if __name__ == "__main__":
from pprint import pprint
h = Hunspell()
pprint(h.list_dicts())
pprint(h.dict_search_path())
pprint(h.check_words(["ppppp", '123', '', 'gorkicht', 'gemank', 'haus', '']))
pprint(h.check_words(["Radae", 'blood', 'mensch', 'green', 'blea', 'fork']))
pprint(h.version)
| jnweiger/pdfcompare | hunspell.py | Python | gpl-2.0 | 6,973 |
# -*- coding: utf-8 -*-
from pyfr.backends.cuda.base import CUDABackend
| BrianVermeire/PyFR | pyfr/backends/cuda/__init__.py | Python | bsd-3-clause | 73 |
# third party
# local Django
from pom.pages.basePage import BasePage
from pom.locators.administratorReportPageLocators import \
AdministratorReportPageLocators
from pom.pages.homePage import HomePage
from pom.pageUrls import PageUrls
class AdministratorReportPage(BasePage):
administrator_report_page = PageUrls.administrator_report_page
def __init__(self, driver):
self.driver = driver
self.home_page = HomePage(self.driver)
self.elements = AdministratorReportPageLocators()
super(AdministratorReportPage, self).__init__(driver)
def go_to_admin_report(self):
self.home_page.get_admin_report_link().click()
def go_to_view_report_page(self):
return self.element_by_xpath(self.elements.VIEW_REPORT).click()
def get_volunteer_name(self):
return self.element_by_xpath(self.elements.VOLUNTEER_NAME).text
def get_hours(self):
return self.element_by_xpath(self.elements.HOURS).text
def get_shift_summary(self):
return self.element_by_xpath(
self.elements.REPORT_SHIFT_SUMMARY_PATH).text
def get_rejection_context(self):
return self.element_by_xpath(self.elements.REJECT_REPORT).text
def reject_report(self):
self.element_by_xpath(self.elements.REJECT_REPORT + '//a').click()
def get_report(self):
return self.element_by_xpath(self.elements.REPORT)
def get_approval_context(self):
return self.element_by_xpath(self.elements.APPROVE_REPORT).text
def approve_report(self):
self.element_by_xpath(self.elements.APPROVE_REPORT + '//a').click()
| systers/vms | vms/pom/pages/administratorReportPage.py | Python | gpl-2.0 | 1,622 |
from pyexcel_ods3 import get_data
def aEntero(s):
try:
return int(s)
except ValueError:
return s
def limpiar(data):
#data = get_data("AmesHousing.ods")
datos = {}
for elem in (data['AmesHousing']):
#print(elem)
#Elimiar Order y PID
elem = elem[2:]
n = len(elem) - 1
for i in range(len(elem)):
if elem[n -1] != 'Normal' or elem[45] > 1500 or '' in elem:
break
try:
datos[i].append(aEntero(elem[i]))
except:
datos[i] = []
datos[i].append(aEntero(elem[i]))
return datos
def main():
data = get_data("AmesHousing.ods")
datos = limpiar(data)
#for elem in datos:
# print(elem)
if __name__ == '__main__':
main() | dougtc1/IA2-GradientDescent | prueba.py | Python | gpl-3.0 | 682 |
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
from scipy.misc import doccer
from scipy._lib._version import NumpyVersion
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
_input_doc = \
"""input : array_like
Input array to filter."""
_axis_doc = \
"""axis : int, optional
The axis of `input` along which to calculate. Default is -1."""
_output_doc = \
"""output : array, optional
The `output` parameter passes an array in which to store the
filter output."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0."""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input': _input_doc,
'axis': _axis_doc,
'output': _output_doc,
'size_foot': _size_foot_doc,
'mode': _mode_doc,
'cval': _cval_doc,
'origin': _origin_doc,
'extra_arguments': _extra_arguments_doc,
'extra_keywords': _extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
@docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
"""
if order not in range(4):
raise ValueError('Order outside 0..3 not implemented')
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = math.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
# implement first, second and third order derivatives:
if order == 1: # first derivative
weights[lw] = 0.0
for ii in range(1, lw + 1):
x = float(ii)
tmp = -x / sd * weights[lw + ii]
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
elif order == 2: # second derivative
weights[lw] *= -1.0 / sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd
weights[lw + ii] = tmp
weights[lw - ii] = tmp
elif order == 3: # third derivative
weights[lw] = 0.0
sd2 = sd * sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
return correlate1d(input, weights, axis, output, mode, cval, 0)
@docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
%(output)s
%(mode)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
if not set(orders).issubset(set(range(4))):
raise ValueError('Order outside 0..4 not implemented')
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> plt.gray() # show the filtered result in grayscale
>>> plt.imshow(result)
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,)
return return_value
@docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> plt.gray() # show the filtered result in grayscale
>>> plt.imshow(result)
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0)
return return_value
@docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords = None):
"""N-dimensional Laplace filter using a provided second derivative function
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative2(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return return_value
@docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-dimensional Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> plt.gray() # show the filtered result in grayscale
>>> plt.imshow(result)
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords = None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return return_value
@docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return return_value
@docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
input : array-like
input array to filter
weights : ndarray
array of weights, same number of dimensions as input
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
input : array_like
Input array to filter.
weights : array_like
Array of weights, same number of dimensions as input
output : ndarray, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
the `mode` parameter determines how the array borders are
handled. For 'constant' mode, values beyond borders are set to be
`cval`. Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : array_like, optional
The `origin` parameter controls the placement of the filter,
relative to the centre of the current element of the input.
Default of 0 is equivalent to ``(0,)*input.ndim``.
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return return_value
@docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return return_value
@docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return return_value
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
if numpy.alltrue(numpy.ravel(footprint), axis=0):
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return return_value
@docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return return_value
@docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculates a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
median_filter : ndarray
Return of same shape as `input`.
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculate a one-dimensional filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : callable
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments, extra_keywords)
return return_value
@docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculates a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : callable
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return return_value
| yuanagain/seniorthesis | venv/lib/python2.7/site-packages/scipy/ndimage/filters.py | Python | mit | 42,327 |
"""
Django settings for learn_django project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p#!k=rol_-(8@h@efw)3&1ex7zj_avn15p0=14gngdekp*@q!#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'learn_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learn_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# email config
EMAIL_HOST = 'mail.dzji.com'
EMAIL_HOST_USER = 'zhengyuze@dzji.com'
#EMAIL_HOST_PASSWORD = '123456'
EMAIL_PORT = 25
EMAIL_USE_TSL = False
| z-yz/DjangoDyExample_learn | chap_2_blog/learn_django/settings.py | Python | mit | 2,869 |
#!/usr/bin/env python
from ..common import *
from .universal import *
__all__ = ['kakao_download']
def kakao_download(url, output_dir='.', info_only=False, **kwargs):
json_request_url = 'https://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json?vid={}'
# in this implementation playlist not supported so use url_without_playlist
# if want to support playlist need to change that
if re.search('playlistId', url):
url = re.search(r"(.+)\?.+?", url).group(1)
page = get_content(url)
try:
vid = re.search(r"<meta name=\"vid\" content=\"(.+)\">", page).group(1)
title = re.search(r"<meta name=\"title\" content=\"(.+)\">", page).group(1)
meta_str = get_content(json_request_url.format(vid))
meta_json = json.loads(meta_str)
standard_preset = meta_json['output_list']['standard_preset']
output_videos = meta_json['output_list']['output_list']
size = ''
if meta_json['svcname'] == 'smr_pip':
for v in output_videos:
if v['preset'] == 'mp4_PIP_SMR_480P':
size = int(v['filesize'])
break
else:
for v in output_videos:
if v['preset'] == standard_preset:
size = int(v['filesize'])
break
video_url = meta_json['location']['url']
print_info(site_info, title, 'mp4', size)
if not info_only:
download_urls([video_url], title, 'mp4', size, output_dir, **kwargs)
except:
universal_download(url, output_dir, merge=kwargs['merge'], info_only=info_only, **kwargs)
site_info = "tv.kakao.com"
download = kakao_download
download_playlist = playlist_not_supported('kakao')
| xyuanmu/you-get | src/you_get/extractors/kakao.py | Python | mit | 1,771 |
# Copyright (c) 2010 ActiveState Software Inc. All rights reserved.
# This module is deprecated
from appdirs import *
#---- self test code
if __name__ == "__main__":
print("applib: user data dir: %s" % user_data_dir("Komodo", "ActiveState"))
print("applib: site data dir: %s" % site_data_dir("Komodo", "ActiveState"))
print("applib: user cache dir: %s" % user_cache_dir("Komodo", "ActiveState"))
| igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/site-packages/pypm/external/applib/location.py | Python | mit | 414 |
import Gears as gears
from .. import *
import traceback
import inspect
from .Base import *
class Generic(Base) :
def __init__(self, **args):
super().__init__(**args)
def boot(
self,
*,
name : 'Pass name to display in sequence overview plot.'
= 'Generic',
duration : 'Pass time in frames. Defaults to stimulus duration. Superseded by duration_s is given.'
= 0,
duration_s : 'Pass time in seconds (takes precendece over duration given in frames).'
= 0,
pif : 'Root pif component. (Pif.* or Composition.*)'
= Pif.Solid(),
alphaMask : 'Pif component that provides the opacity when multiple passes or quads are combined. (Pif.* or Composition.*)'
= Pif.Solid(color=0.5),
rasterizingMode : 'The type of geometry to be rasterized for the spass. (fullscreen/bezier/triangles/quads)'
= 'fullscreen',
polygonMask : 'Data defining the geometry to be rasterized.'
= [{'x':0, 'y':0}, {'x':0, 'y':1}, {'x':1, 'y':0}],
polygonMotion : 'Motion component for polygon animation. (Motion.*)'
= Motion.Linear()
):
self.name = name
self.duration = duration
sequence = self.getSequence()
stimulus = self.getStimulus()
self.duration = duration
if(duration == 0):
self.duration = stimulus.getDuration()
if(duration_s != 0):
self.duration = int(duration_s // sequence.getFrameInterval_s() + 1)
self.setPolygonMask(rasterizingMode, polygonMask)
if rasterizingMode == 'triangles':
polygonMotion.applyForward(self, 'polygonMotionTransform')
pif.apply(self, "fig")
alphaMask.apply(self, "alphaMask")
self.stimulusGeneratorShaderSource = """
in vec2 pos;
in vec2 fTexCoord;
void main() { vec4 outcolor = vec4(
fig(pos, time), alphaMask(pos, time).x );
outcolor.rgb = temporalProcess(outcolor.rgb, fTexCoord);
outcolor.rgb = toneMap(outcolor.rgb);
if(swizzleForFft == 0x00020000)
outcolor = vec4(outcolor.r, 0, outcolor.g, 0);
//outcolor = vec4(0, 0, 0, 0);
else if(swizzleForFft == 0x00000406)
outcolor = vec4(outcolor.b, 0, outcolor.a, 0);
gl_FragData[0] = outcolor;
}
"""
| szecsi/Gears | GearsPy/Project/Components/Pass/Generic.py | Python | gpl-2.0 | 2,781 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.