repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
MaxTakahashi/hammr | refs/heads/master | tests/unit/commands/scan/__init__.py | 12133432 | |
hellhovnd/django | refs/heads/master | tests/field_defaults/__init__.py | 12133432 | |
imtapps/django-dynamic-validation | refs/heads/master | dynamic_validation/migrations/__init__.py | 12133432 | |
mattgiguere/scikit-learn | refs/heads/master | sklearn/tests/__init__.py | 12133432 | |
JavML/django | refs/heads/master | tests/contenttypes_tests/__init__.py | 12133432 | |
yfli/django-tastypie | refs/heads/master | tastypie/management/commands/__init__.py | 12133432 | |
manqala/erpnext | refs/heads/develop | erpnext/docs/user/manual/en/human-resources/setup/__init__.py | 12133432 | |
hyperized/ansible | refs/heads/devel | lib/ansible/module_utils/network/ios/argspec/l2_interfaces/__init__.py | 12133432 | |
ibinti/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/conf/locale/en_GB/__init__.py | 12133432 | |
ABaldwinHunter/django-clone-classic | refs/heads/master | django/contrib/redirects/migrations/__init__.py | 12133432 | |
mila/django-urldecorators | refs/heads/master | urldecorators/tests/__init__.py | 1 |
from .tests import * |
dgarros/ansible | refs/heads/devel | lib/ansible/modules/network/nxos/nxos_acl.py | 29 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: nxos_acl
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages access list entries for ACLs.
description:
- Manages access list entries for ACLs.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- C(state=absent) removes the ACE if it exists.
- C(state=delete_acl) deletes the ACL if it exists.
- For idempotency, use port numbers for the src/dest port
params like I(src_port1) and names for the well defined protocols
for the I(proto) param.
- Although this module is idempotent in that if the ace as presented in
the task is identical to the one on the switch, no changes will be made.
If there is any difference, what is in Ansible will be pushed (configured
options will be overridden). This is to improve security, but at the
same time remember an ACE is removed, then re-added, so if there is a
change, the new ACE will be exactly what parameters you are sending to
the module.
options:
seq:
description:
- Sequence number of the entry (ACE).
required: false
default: null
name:
description:
- Case sensitive name of the access list (ACL).
required: true
action:
description:
- Action of the ACE.
required: false
default: null
choices: ['permit', 'deny', 'remark']
remark:
description:
- If action is set to remark, this is the description.
required: false
default: null
proto:
description:
- Port number or protocol (as supported by the switch).
required: false
default: null
src:
description:
- Source ip and mask using IP/MASK notation and
supports keyword 'any'.
required: false
default: null
src_port_op:
description:
- Source port operands such as eq, neq, gt, lt, range.
required: false
default: null
choices: ['any', 'eq', 'gt', 'lt', 'neq', 'range']
src_port1:
description:
- Port/protocol and also first (lower) port when using range
operand.
required: false
default: null
src_port2:
description:
- Second (end) port when using range operand.
required: false
default: null
dest:
description:
- Destination ip and mask using IP/MASK notation and supports the
keyword 'any'.
required: false
default: null
dest_port_op:
description:
- Destination port operands such as eq, neq, gt, lt, range.
required: false
default: null
choices: ['any', 'eq', 'gt', 'lt', 'neq', 'range']
dest_port1:
description:
- Port/protocol and also first (lower) port when using range
operand.
required: false
default: null
dest_port2:
description:
- Second (end) port when using range operand.
required: false
default: null
log:
description:
- Log matches against this entry.
required: false
default: null
choices: ['enable']
urg:
description:
- Match on the URG bit.
required: false
default: null
choices: ['enable']
ack:
description:
- Match on the ACK bit.
required: false
default: null
choices: ['enable']
psh:
description:
- Match on the PSH bit.
required: false
default: null
choices: ['enable']
rst:
description:
- Match on the RST bit.
required: false
default: null
choices: ['enable']
syn:
description:
- Match on the SYN bit.
required: false
default: null
choices: ['enable']
fin:
description:
- Match on the FIN bit.
required: false
default: null
choices: ['enable']
established:
description:
- Match established connections.
required: false
default: null
choices: ['enable']
fragments:
description:
- Check non-initial fragments.
required: false
default: null
choices: ['enable']
time-range:
description:
- Name of time-range to apply.
required: false
default: null
precedence:
description:
- Match packets with given precedence.
required: false
default: null
choices: ['critical', 'flash', 'flash-override', 'immediate',
'internet', 'network', 'priority', 'routine']
dscp:
description:
- Match packets with given dscp value.
required: false
default: null
choices: ['af11', 'af12', 'af13', 'af21', 'af22', 'af23','af31','af32',
'af33', 'af41', 'af42', 'af43', 'cs1', 'cs2', 'cs3', 'cs4',
'cs5', 'cs6', 'cs7', 'default', 'ef']
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent','delete_acl']
'''
EXAMPLES = '''
# configure ACL ANSIBLE
- nxos_acl:
name: ANSIBLE
seq: 10
action: permit
proto: tcp
src: 1.1.1.1/24
dest: any
state: present
provider: "{{ nxos_provider }}"
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["ip access-list ANSIBLE", "10 permit tcp 1.1.1.1/24 any"]
'''
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
if module.params['transport'] == 'cli':
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
return body
def get_acl(module, acl_name, seq_number):
command = 'show ip access-list'
new_acl = []
saveme = {}
acl_body = {}
body = execute_show_command(command, module)[0]
all_acl_body = body['TABLE_ip_ipv6_mac']['ROW_ip_ipv6_mac']
for acl in all_acl_body:
if acl.get('acl_name') == acl_name:
acl_body = acl
try:
acl_entries = acl_body['TABLE_seqno']['ROW_seqno']
acl_name = acl_body.get('acl_name')
except KeyError: # could be raised if no ACEs are configured for an ACL
return {}, [{'acl': 'no_entries'}]
if isinstance(acl_entries, dict):
acl_entries = [acl_entries]
for each in acl_entries:
temp = {}
options = {}
remark = each.get('remark')
temp['name'] = acl_name
temp['seq'] = str(each.get('seqno'))
if remark:
temp['remark'] = remark
temp['action'] = 'remark'
else:
temp['action'] = each.get('permitdeny')
temp['proto'] = each.get('proto', each.get('proto_str', each.get('ip')))
temp['src'] = each.get('src_any', each.get('src_ip_prefix'))
temp['src_port_op'] = each.get('src_port_op')
temp['src_port1'] = each.get('src_port1_num')
temp['src_port2'] = each.get('src_port2_num')
temp['dest'] = each.get('dest_any', each.get('dest_ip_prefix'))
temp['dest_port_op'] = each.get('dest_port_op')
temp['dest_port1'] = each.get('dest_port1_num')
temp['dest_port2'] = each.get('dest_port2_num')
options['log'] = each.get('log')
options['urg'] = each.get('urg')
options['ack'] = each.get('ack')
options['psh'] = each.get('psh')
options['rst'] = each.get('rst')
options['syn'] = each.get('syn')
options['fin'] = each.get('fin')
options['established'] = each.get('established')
options['dscp'] = each.get('dscp_str')
options['precedence'] = each.get('precedence_str')
options['fragments'] = each.get('fragments')
options['time_range'] = each.get('timerange')
keep = {}
for key, value in temp.items():
if value:
keep[key] = value
options_no_null = {}
for key, value in options.items():
if value is not None:
options_no_null[key] = value
keep['options'] = options_no_null
if keep.get('seq') == seq_number:
saveme = dict(keep)
new_acl.append(keep)
return saveme, new_acl
def _acl_operand(operand, srcp1, sprcp2):
sub_entry = ' ' + operand
if operand == 'range':
sub_entry += ' ' + srcp1 + ' ' + sprcp2
else:
sub_entry += ' ' + srcp1
return sub_entry
def config_core_acl(proposed):
seq = proposed.get('seq')
action = proposed.get('action')
remark = proposed.get('remark')
proto = proposed.get('proto')
src = proposed.get('src')
src_port_op = proposed.get('src_port_op')
src_port1 = proposed.get('src_port1')
src_port2 = proposed.get('src_port2')
dest = proposed.get('dest')
dest_port_op = proposed.get('dest_port_op')
dest_port1 = proposed.get('dest_port1')
dest_port2 = proposed.get('dest_port2')
ace_start_entries = [action, proto, src]
if not remark:
ace = seq + ' ' + ' '.join(ace_start_entries)
if src_port_op:
ace += _acl_operand(src_port_op, src_port1, src_port2)
ace += ' ' + dest
if dest_port_op:
ace += _acl_operand(dest_port_op, dest_port1, dest_port2)
else:
ace = seq + ' remark ' + remark
return ace
def config_acl_options(options):
ENABLE_ONLY = ['psh', 'urg', 'log', 'ack', 'syn',
'established', 'rst', 'fin', 'fragments',
'log']
OTHER = ['dscp', 'precedence', 'time-range']
# packet-length is the only option not currently supported
if options.get('time_range'):
options['time-range'] = options.get('time_range')
options.pop('time_range')
command = ''
for option, value in options.items():
if option in ENABLE_ONLY:
if value == 'enable':
command += ' ' + option
elif option in OTHER:
command += ' ' + option + ' ' + value
if command:
command = command.strip()
return command
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def main():
argument_spec = dict(
seq=dict(required=False, type='str'),
name=dict(required=True, type='str'),
action=dict(required=False, choices=['remark', 'permit', 'deny']),
remark=dict(required=False, type='str'),
proto=dict(required=False, type='str'),
src=dict(required=False, type='str'),
src_port_op=dict(required=False),
src_port1=dict(required=False, type='str'),
src_port2=dict(required=False, type='str'),
dest=dict(required=False, type='str'),
dest_port_op=dict(required=False),
dest_port1=dict(required=False, type='str'),
dest_port2=dict(required=False, type='str'),
log=dict(required=False, choices=['enable']),
urg=dict(required=False, choices=['enable']),
ack=dict(required=False, choices=['enable']),
psh=dict(required=False, choices=['enable']),
rst=dict(required=False, choices=['enable']),
syn=dict(required=False, choices=['enable']),
fragments=dict(required=False, choices=['enable']),
fin=dict(required=False, choices=['enable']),
established=dict(required=False, choices=['enable']),
time_range=dict(required=False),
precedence=dict(required=False, choices=['critical', 'flash',
'flash-override',
'immediate', 'internet',
'network', 'priority',
'routine']),
dscp=dict(required=False, choices=['af11', 'af12', 'af13', 'af21',
'af22', 'af23', 'af31', 'af32',
'af33', 'af41', 'af42', 'af43',
'cs1', 'cs2', 'cs3', 'cs4',
'cs5', 'cs6', 'cs7', 'default',
'ef']),
state=dict(choices=['absent', 'present', 'delete_acl'], default='present'),
protocol=dict(choices=['http', 'https'], default='http'),
host=dict(required=True),
username=dict(type='str'),
password=dict(no_log=True, type='str'),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = dict(changed=False, warnings=warnings)
state = module.params['state']
action = module.params['action']
remark = module.params['remark']
dscp = module.params['dscp']
precedence = module.params['precedence']
seq = module.params['seq']
name = module.params['name']
seq = module.params['seq']
if action == 'remark' and not remark:
module.fail_json(msg='when state is action, remark param is also required')
REQUIRED = ['seq', 'name', 'action', 'proto', 'src', 'dest']
ABSENT = ['name', 'seq']
if state == 'present':
if action and remark and seq:
pass
else:
for each in REQUIRED:
if module.params[each] is None:
module.fail_json(msg="req'd params when state is present:",
params=REQUIRED)
elif state == 'absent':
for each in ABSENT:
if module.params[each] is None:
module.fail_json(msg='require params when state is absent',
params=ABSENT)
elif state == 'delete_acl':
if module.params['name'] is None:
module.fail_json(msg="param name req'd when state is delete_acl")
if dscp and precedence:
module.fail_json(msg='only one of the params dscp/precedence '
'are allowed')
OPTIONS_NAMES = ['log', 'urg', 'ack', 'psh', 'rst', 'syn', 'fin',
'established', 'dscp', 'precedence', 'fragments',
'time_range']
CORE = ['seq', 'name', 'action', 'proto', 'src', 'src_port_op',
'src_port1', 'src_port2', 'dest', 'dest_port_op',
'dest_port1', 'dest_port2', 'remark']
proposed_core = dict((param, value) for (param, value) in
module.params.items()
if param in CORE and value is not None)
proposed_options = dict((param, value) for (param, value) in
module.params.items()
if param in OPTIONS_NAMES and value is not None)
proposed = {}
proposed.update(proposed_core)
proposed.update(proposed_options)
existing_options = {}
# getting existing existing_core=dict, acl=list, seq=list
existing_core, acl = get_acl(module, name, seq)
if existing_core:
existing_options = existing_core.get('options')
existing_core.pop('options')
commands = []
delta_core = {}
delta_options = {}
if not existing_core.get('remark'):
delta_core = dict(
set(proposed_core.items()).difference(
existing_core.items())
)
delta_options = dict(
set(proposed_options.items()).difference(
existing_options.items())
)
if state == 'present':
if delta_core or delta_options:
if existing_core: # if the ace exists already
commands.append(['no {0}'.format(seq)])
if delta_options:
myacl_str = config_core_acl(proposed_core)
myacl_str += ' ' + config_acl_options(proposed_options)
else:
myacl_str = config_core_acl(proposed_core)
command = [myacl_str]
commands.append(command)
elif state == 'absent':
if existing_core:
commands.append(['no {0}'.format(seq)])
elif state == 'delete_acl':
if acl[0].get('acl') != 'no_entries':
commands.append(['no ip access-list {0}'.format(name)])
cmds = []
if commands:
preface = []
if state in ['present', 'absent']:
preface = ['ip access-list {0}'.format(name)]
commands.insert(0, preface)
cmds = flatten_list(commands)
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
results['changed'] = True
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
|
handroissuazo/tensorflow | refs/heads/master | tensorflow/python/summary/impl/directory_watcher_test.py | 37 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for directory_watcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.summary.impl import directory_watcher
from tensorflow.python.summary.impl import io_wrapper
class _ByteLoader(object):
"""A loader that loads individual bytes from a file."""
def __init__(self, path):
self._f = open(path)
self.bytes_read = 0
def Load(self):
while True:
self._f.seek(self.bytes_read)
byte = self._f.read(1)
if byte:
self.bytes_read += 1
yield byte
else:
return
class DirectoryWatcherTest(test_util.TensorFlowTestCase):
def setUp(self):
# Put everything in a directory so it's easier to delete.
self._directory = os.path.join(self.get_temp_dir(), 'monitor_dir')
os.mkdir(self._directory)
self._watcher = directory_watcher.DirectoryWatcher(self._directory,
_ByteLoader)
self.stubs = googletest.StubOutForTesting()
def tearDown(self):
self.stubs.CleanUp()
try:
shutil.rmtree(self._directory)
except OSError:
# Some tests delete the directory.
pass
def _WriteToFile(self, filename, data):
path = os.path.join(self._directory, filename)
with open(path, 'a') as f:
f.write(data)
def _LoadAllEvents(self):
"""Loads all events in the watcher."""
for _ in self._watcher.Load():
pass
def assertWatcherYields(self, values):
self.assertEqual(list(self._watcher.Load()), values)
def testRaisesWithBadArguments(self):
with self.assertRaises(ValueError):
directory_watcher.DirectoryWatcher(None, lambda x: None)
with self.assertRaises(ValueError):
directory_watcher.DirectoryWatcher('dir', None)
def testEmptyDirectory(self):
self.assertWatcherYields([])
def testSingleWrite(self):
self._WriteToFile('a', 'abc')
self.assertWatcherYields(['a', 'b', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleWrites(self):
self._WriteToFile('a', 'abc')
self.assertWatcherYields(['a', 'b', 'c'])
self._WriteToFile('a', 'xyz')
self.assertWatcherYields(['x', 'y', 'z'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleLoads(self):
self._WriteToFile('a', 'a')
self._watcher.Load()
self._watcher.Load()
self.assertWatcherYields(['a'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleFilesAtOnce(self):
self._WriteToFile('b', 'b')
self._WriteToFile('a', 'a')
self.assertWatcherYields(['a', 'b'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testFinishesLoadingFileWhenSwitchingToNewFile(self):
self._WriteToFile('a', 'a')
# Empty the iterator.
self.assertEquals(['a'], list(self._watcher.Load()))
self._WriteToFile('a', 'b')
self._WriteToFile('b', 'c')
# The watcher should finish its current file before starting a new one.
self.assertWatcherYields(['b', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testIntermediateEmptyFiles(self):
self._WriteToFile('a', 'a')
self._WriteToFile('b', '')
self._WriteToFile('c', 'c')
self.assertWatcherYields(['a', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testPathFilter(self):
self._watcher = directory_watcher.DirectoryWatcher(
self._directory, _ByteLoader,
lambda path: 'do_not_watch_me' not in path)
self._WriteToFile('a', 'a')
self._WriteToFile('do_not_watch_me', 'b')
self._WriteToFile('c', 'c')
self.assertWatcherYields(['a', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testDetectsNewOldFiles(self):
self._WriteToFile('b', 'a')
self._LoadAllEvents()
self._WriteToFile('a', 'a')
self._LoadAllEvents()
self.assertTrue(self._watcher.OutOfOrderWritesDetected())
def testIgnoresNewerFiles(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
self._WriteToFile('q', 'a')
self._LoadAllEvents()
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testDetectsChangingOldFiles(self):
self._WriteToFile('a', 'a')
self._WriteToFile('b', 'a')
self._LoadAllEvents()
self._WriteToFile('a', 'c')
self._LoadAllEvents()
self.assertTrue(self._watcher.OutOfOrderWritesDetected())
def testDoesntCrashWhenFileIsDeleted(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
os.remove(os.path.join(self._directory, 'a'))
self._WriteToFile('b', 'b')
self.assertWatcherYields(['b'])
def testRaisesRightErrorWhenDirectoryIsDeleted(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
shutil.rmtree(self._directory)
with self.assertRaises(directory_watcher.DirectoryDeletedError):
self._LoadAllEvents()
def testDoesntRaiseDirectoryDeletedErrorIfOutageIsTransient(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
shutil.rmtree(self._directory)
# Fake a single transient I/O error.
def FakeFactory(original):
def Fake(*args, **kwargs):
if FakeFactory.has_been_called:
original(*args, **kwargs)
else:
raise OSError('lp0 temporarily on fire')
return Fake
FakeFactory.has_been_called = False
for stub_name in ['ListDirectoryAbsolute', 'ListRecursively']:
self.stubs.Set(io_wrapper, stub_name,
FakeFactory(getattr(io_wrapper, stub_name)))
for stub_name in ['IsDirectory', 'Exists', 'Stat']:
self.stubs.Set(gfile, stub_name,
FakeFactory(getattr(gfile, stub_name)))
with self.assertRaises((IOError, OSError)):
self._LoadAllEvents()
if __name__ == '__main__':
googletest.main()
|
metno/modelstatus | refs/heads/master | productstatus/core/tests/test_kafka_resource.py | 2 | from . import ProductstatusResourceTest
from django.conf import settings
import tastypie.exceptions
class KafkaResourceTest(ProductstatusResourceTest):
RESPONSE_OBJECT = {
u'id': 'default',
u'topic': settings.KAFKA_TOPIC,
u'brokers': settings.KAFKA_BROKERS,
u'ssl': settings.KAFKA_SSL,
u'ssl_verify': settings.KAFKA_SSL_VERIFY,
u'heartbeat_interval': settings.KAFKA_HEARTBEAT_INTERVAL,
u'resource_uri': '/api/v1/kafka/default/',
}
def test_get_collection(self):
"""!
@brief Test that the Kafka configuration details are returned as a list.
"""
url = "%s/kafka/" % self.url_prefix
response = self.api_client.get(url, format='json')
self.assertValidJSONResponse(response)
decoded_content = self.unserialize(response)
self.assertListEqual(decoded_content['objects'], [self.RESPONSE_OBJECT])
def test_get_resource_default(self):
"""!
@brief Test that the Kafka configuration details are returned.
"""
url = "%s/kafka/default/" % self.url_prefix
response = self.api_client.get(url, format='json')
self.assertValidJSONResponse(response)
decoded_content = self.unserialize(response)
self.assertDictEqual(decoded_content, self.RESPONSE_OBJECT)
def test_get_resource_not_found(self):
"""!
@brief Test that the Kafka configuration details are not returned for a
resource id other than 'default'.
"""
url = "%s/kafka/foo/" % self.url_prefix
with self.assertRaises(tastypie.exceptions.NotFound):
self.api_client.get(url, format='json')
|
faassen/imagestore | refs/heads/master | src/imagestore/imagecontainer.py | 2 | import grok
from lxml import etree
from z3c.blobfile.image import Image
from zope.exceptions.interfaces import DuplicationError
from imagestore.interfaces import IRest
from imagestore.xml import XmlContainerBase, NS, XmlContainerFactoryBase
from imagestore.rest import StoreLayer, success_message, error_message, embed_http
from imagestore.util import is_legal_name
from imagestore.rest import Read, Write
class ImageContainer(grok.Container):
grok.implements(IRest)
class ImageContainerXml(XmlContainerBase):
tag = 'images'
is_deletable = False
is_replacable = False
class ImageContainerFactory(XmlContainerFactoryBase):
grok.name('{%s}images' % NS)
def factory(self):
result = ImageContainer()
result.__name__ = 'images'
return result
class Factory(grok.View):
"""This is a form-driven POST factory.
It exists to help clients that cannot do proper REST submits with
binary data.
"""
grok.layer(StoreLayer)
grok.require(Write)
tree = None
def update(self, **kw):
form = self.request.form
# get the Filedata
data = form.get('Filedata', None)
if data is None:
self.response.setStatus(400, 'Bad Request')
self.tree = error_message("Filedata is missing from request.")
return
# let's try to get the filename wherever we can
slug = form.get('slug', None)
if slug is None:
slug = form.get('Filename', None)
if slug is None:
slug = data.filename
self.tree = create_image(self.context, self.request, self.response,
slug, data.read())
def render(self):
embed_http(self.request, self.response, self.tree)
return etree.tostring(self.tree, encoding='UTF-8')
class Rest(grok.REST):
grok.layer(StoreLayer)
@grok.require(Write)
def POST(self):
self.response.setHeader('Content-Type',
'application/xml; charset=UTF-8')
slug = self.request.getHeader('Slug', None)
tree = create_image(self.context, self.request, self.response,
slug, self.body)
embed_http(self.request, self.response, tree)
return etree.tostring(tree, encoding='UTF-8')
def create_image(context, request, response, slug, data):
if slug is None:
response.setStatus(400, 'Bad Request')
return error_message('Slug header is missing from request.')
if not is_legal_name(slug):
response.setStatus(400, 'Bad Request')
return error_message(
"Slug name '%s' contains illegal characters." % slug)
try:
context[slug] = image = Image()
# XXX apparently location info isn't set up properly in Image
image.__parent__ = context
image.__name__ = slug
except DuplicationError:
response.setHeader('Location',
grok.url(request, context[slug]))
response.setStatus(
409, 'Conflict')
return error_message(
"There is already a resource with this name in this location.")
image.data = data
response.setStatus(201, 'Created')
response.setHeader('Location', grok.url(request, image))
return success_message()
|
otherness-space/myProject002 | refs/heads/master | my_project_002/lib/python2.7/site-packages/bson/tz_util.py | 20 | # Copyright 2010-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timezone related utilities for BSON."""
from datetime import (timedelta,
tzinfo)
ZERO = timedelta(0)
class FixedOffset(tzinfo):
"""Fixed offset timezone, in minutes east from UTC.
Implementation based from the Python `standard library documentation
<http://docs.python.org/library/datetime.html#tzinfo-objects>`_.
Defining __getinitargs__ enables pickling / copying.
"""
def __init__(self, offset, name):
if isinstance(offset, timedelta):
self.__offset = offset
else:
self.__offset = timedelta(minutes=offset)
self.__name = name
def __getinitargs__(self):
return self.__offset, self.__name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
utc = FixedOffset(0, "UTC")
"""Fixed offset timezone representing UTC."""
|
tictakk/servo | refs/heads/ticbranch | tests/wpt/harness/wptrunner/browsers/chrome.py | 99 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from .base import Browser, ExecutorBrowser, require_arg
from ..webdriver_server import ChromeDriverServer
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorselenium import (SeleniumTestharnessExecutor,
SeleniumRefTestExecutor)
__wptrunner__ = {"product": "chrome",
"check_args": "check_args",
"browser": "ChromeBrowser",
"executor": {"testharness": "SeleniumTestharnessExecutor",
"reftest": "SeleniumRefTestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_options": "env_options"}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(**kwargs):
return {"binary": kwargs["binary"],
"webdriver_binary": kwargs["webdriver_binary"]}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
from selenium.webdriver import DesiredCapabilities
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["capabilities"] = dict(DesiredCapabilities.CHROME.items())
if kwargs["binary"] is not None:
executor_kwargs["capabilities"]["chromeOptions"] = {"binary": kwargs["binary"]}
return executor_kwargs
def env_options():
return {"host": "web-platform.test",
"bind_hostname": "true"}
class ChromeBrowser(Browser):
"""Chrome is backed by chromedriver, which is supplied through
``wptrunner.webdriver.ChromeDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary="chromedriver"):
"""Creates a new representation of Chrome. The `binary` argument gives
the browser binary to use for testing."""
Browser.__init__(self, logger)
self.binary = binary
self.server = ChromeDriverServer(self.logger, binary=webdriver_binary)
def start(self):
self.server.start(block=False)
def stop(self):
self.server.stop()
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the driver is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive()
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.server.url}
|
trezorg/django | refs/heads/master | django/conf/locale/bg/formats.py | 316 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u' ' # Non-breaking space
# NUMBER_GROUPING =
|
clairetang6/bokeh | refs/heads/master | bokeh/mixins.py | 4 | from bokeh.util.deprecate import deprecated_module
deprecated_module('bokeh.mixins', '0.11', 'use bokeh.core.property_mixins instead')
del deprecated_module
from .core.property_mixins import * # NOQA
|
sanyaade-mobiledev/clusto | refs/heads/master | src/clusto/drivers/devices/appliance/basicappliance.py | 7 |
from clusto.drivers import Device
from clusto.drivers.devices import PortMixin, IPMixin
class BasicAppliance(IPMixin, PortMixin, Device):
"""
Basic appliance Driver
"""
_clusto_type = 'appliance'
_driver_name = 'basicappliance'
_portmeta = { 'pwr-nema-5' : { 'numports':2, },
'nic-eth' : { 'numports':1, },
'console-serial' : { 'numports':1, },
}
|
ianunruh/hvac | refs/heads/master | hvac/constants/azure.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Constants related to the Azure auth method and/or secrets engine."""
VALID_ENVIRONMENTS = [
'AzurePublicCloud',
'AzureUSGovernmentCloud',
'AzureChinaCloud',
'AzureGermanCloud',
]
|
XiaodunServerGroup/xiaodun-platform | refs/heads/master | lms/djangoapps/instructor_task/__init__.py | 12133432 | |
unicef/un-partner-portal | refs/heads/develop | backend/unpp_api/apps/public/__init__.py | 12133432 | |
mith1979/ansible_automation | refs/heads/master | applied_python/applied_python/lib/python2.7/site-packages/pylint/test/data/__init__.py | 12133432 | |
michael-borisov/django-social-auth | refs/heads/master | example/example/__init__.py | 12133432 | |
praekelt/jmbo-foundry | refs/heads/develop | foundry/context_processors.py | 1 | from django.contrib.sites.models import get_current_site
from django.conf import settings
from foundry.utils import get_preference
def foundry(request):
# get_preference, get_current_site do caching
return {
'FOUNDRY': settings.FOUNDRY,
'LAYER_PATH': settings.LAYERS['layers'][-1] + '/',
'CURRENT_SITE': get_current_site(request),
'ANALYTICS_TAGS': get_preference('GeneralPreferences', 'analytics_tags'),
'SITE_DESCRIPTION': get_preference('GeneralPreferences', 'site_description'),
'FOUNDRY_HAS_FACEBOOK_CONNECT': getattr(settings, 'FACEBOOK_APP_ID', '') != '',
'FOUNDRY_HAS_TWITTER_OAUTH': getattr(settings, 'TWITTER_CONSUMER_KEY', '') != '',
'FOUNDRY_HAS_GOOGLE_OAUTH2': getattr(settings, 'GOOGLE_OAUTH2_CLIENT_ID', '') != '',
'FOUNDRY_HAS_GALLERY': 'gallery' in settings.INSTALLED_APPS,
'FOUNDRY_HAS_BANNER': 'banner' in settings.INSTALLED_APPS,
}
|
kalahbrown/HueBigSQL | refs/heads/master | desktop/core/ext-py/Pygments-1.3.1/pygments/lexers/text.py | 56 | # -*- coding: utf-8 -*-
"""
pygments.lexers.text
~~~~~~~~~~~~~~~~~~~~
Lexers for non-source code file types.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from bisect import bisect
from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
bygroups, include, using, this, do_insertions
from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \
Generic, Operator, Number, Whitespace, Literal
from pygments.util import get_bool_opt
from pygments.lexers.other import BashLexer
__all__ = ['IniLexer', 'SourcesListLexer', 'BaseMakefileLexer',
'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer',
'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer',
'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer',
'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer',
'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg']
filenames = ['*.ini', '*.cfg', '*.properties']
mimetypes = ['text/x-ini']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*?$', Comment),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*?)$',
bygroups(Name.Attribute, Text, Operator, Text, String))
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class SourcesListLexer(RegexLexer):
"""
Lexer that highlights debian sources.list files.
*New in Pygments 0.7.*
"""
name = 'Debian Sourcelist'
aliases = ['sourceslist', 'sources.list']
filenames = ['sources.list']
mimetype = ['application/x-debian-sourceslist']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment),
(r'^(deb(?:-src)?)(\s+)',
bygroups(Keyword, Text), 'distribution')
],
'distribution': [
(r'#.*?$', Comment, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\s$[]+', String),
(r'\[', String.Other, 'escaped-distribution'),
(r'\$', String),
(r'\s+', Text, 'components')
],
'escaped-distribution': [
(r'\]', String.Other, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\]$]+', String.Other),
(r'\$', String.Other)
],
'components': [
(r'#.*?$', Comment, '#pop:2'),
(r'$', Text, '#pop:2'),
(r'\s+', Text),
(r'\S+', Keyword.Pseudo),
]
}
def analyse_text(text):
for line in text.split('\n'):
line = line.strip()
if not (line.startswith('#') or line.startswith('deb ') or
line.startswith('deb-src ') or not line):
return False
return True
class MakefileLexer(Lexer):
"""
Lexer for BSD and GNU make extensions (lenient enough to handle both in
the same file even).
*Rewritten in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['make', 'makefile', 'mf', 'bsdmake']
filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
mimetypes = ['text/x-makefile']
r_special = re.compile(r'^(?:'
# BSD Make
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
# GNU Make
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)')
r_comment = re.compile(r'^\s*@?#')
def get_tokens_unprocessed(self, text):
ins = []
lines = text.splitlines(True)
done = ''
lex = BaseMakefileLexer(**self.options)
backslashflag = False
for line in lines:
if self.r_special.match(line) or backslashflag:
ins.append((len(done), [(0, Comment.Preproc, line)]))
backslashflag = line.strip().endswith('\\')
elif self.r_comment.match(line):
ins.append((len(done), [(0, Comment, line)]))
else:
done += line
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
yield item
class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
*New in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['basemake']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
(r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
(r'\s+', Text),
(r'#.*?\n', Comment),
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
bygroups(Keyword, Text), 'export'),
(r'export\s+', Keyword),
# assignment
(r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n|.*\n)+)',
bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))),
# strings
(r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single),
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
'block-header'),
# TODO: add paren handling (grr)
],
'export': [
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
(r'\n', Text, '#pop'),
(r'\s+', Text),
],
'block-header': [
(r'[^,\\\n#]+', Number),
(r',', Punctuation),
(r'#.*?\n', Comment),
(r'\\\n', Text), # line continuation
(r'\\.', Text),
(r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
],
}
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace']
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
*New in Pygments 0.10.*
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'{', Operator),
(r'}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Text, Keyword, Text)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'.*\n', Text),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Text, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]*', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]*', Generic.Deleted),
],
}
class IrcLogsLexer(RegexLexer):
"""
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
"""
name = 'IRC logs'
aliases = ['irc']
filenames = ['*.weechatlog']
mimetypes = ['text/x-irclog']
flags = re.VERBOSE | re.MULTILINE
timestamp = r"""
(
# irssi / xchat and others
(?: \[|\()? # Opening bracket or paren for the timestamp
(?: # Timestamp
(?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits
[T ])? # Date/time separator: T or space
(?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits
)
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
|
# weechat
\d{4}\s\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
# xchat
\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
)?
"""
tokens = {
'root': [
# log start/end
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
# hack
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
# normal msgs
("^" + timestamp + r"""
(\s*<.*?>\s*) # Nick """,
bygroups(Comment.Preproc, Name.Tag), 'msg'),
# /me msgs
("^" + timestamp + r"""
(\s*[*]\s+) # Star
([^\s]+\s+.*?\n) # Nick + rest of message """,
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
# join/part msgs
("^" + timestamp + r"""
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
([^\s]+\s+) # Nick + Space
(.*?\n) # Rest of message """,
bygroups(Comment.Preproc, Keyword, String, Comment)),
(r"^.*?\n", Text),
],
'msg': [
(r"[^\s]+:(?!//)", Name.Attribute), # Prefix
(r".*\n", Text, '#pop'),
],
}
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
*New in Pygments 0.6.*
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
(r'', Text, '#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
*New in Pygments 0.6.*
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1234567]', '*.man']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(?i)(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]*', Text, 'textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(..', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
*New in Pygments 0.6.*
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-zA-Z][a-zA-Z0-9]*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
(r'\.+', Text),
],
'value': [
(r'$', Text, '#pop'),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([a-zA-Z0-9][a-zA-Z0-9_./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'[^\s"]+', Text)
]
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
*New in Pygments 0.7.*
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-zivx]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'}}}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'{{{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
*New in Pygments 0.7.*
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: langauge`` and
``.. code:: language`` directives with a lexer for the given
language (default: ``True``). *New in Pygments 0.8.*
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
yield item
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)([\w\t ]+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
(r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`\\]+', String),
(r'\\.', String),
(r'``', String, '#pop'),
(r'[`\\]', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
*New in Pygments 0.8.*
"""
name = 'VimL'
aliases = ['vim']
filenames = ['*.vim', '.vimrc']
mimetypes = ['text/x-vim']
flags = re.MULTILINE
tokens = {
'root': [
# Who decided that doublequote was a good comment character??
(r'^\s*".*', Comment),
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
(r'[ \t]+', Text),
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
(r"'(\\\\|\\'|[^\n'])*'", String.Single),
(r'-?\d+', Number),
(r'#[0-9a-f]{6}', Number.Hex),
(r'^:', Punctuation),
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
Keyword),
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
(r'\b\w+\b', Name.Other), # These are postprocessed below
(r'.', Text),
],
}
def __init__(self, **options):
from pygments.lexers._vimbuiltins import command, option, auto
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w: return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
class GettextLexer(RegexLexer):
"""
Lexer for Gettext catalog files.
*New in Pygments 0.9.*
"""
name = 'Gettext Catalog'
aliases = ['pot', 'po']
filenames = ['*.pot', '*.po']
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
tokens = {
'root': [
(r'^#,\s.*?$', Keyword.Type),
(r'^#:\s.*?$', Keyword.Declaration),
#(r'^#$', Comment),
(r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
(r'^(")([\w-]*:)(.*")$',
bygroups(String, Name.Property, String)),
(r'^".*"$', String),
(r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$',
bygroups(Name.Variable, Text, String)),
(r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
]
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
*New in Pygments 0.9.*
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = [ "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to",
"anonymize_headers", "append_domain", "as_whois_server",
"auth_param_basic", "authenticate_children",
"authenticate_program", "authenticate_ttl", "broken_posts",
"buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem",
"cache_mem_high", "cache_mem_low", "cache_mgr",
"cachemgr_passwd", "cache_peer", "cache_peer_access",
"cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low",
"client_db", "client_lifetime", "client_netmask",
"connect_timeout", "coredump_dir", "dead_peer_timeout",
"debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters",
"delay_pools", "deny_info", "dns_children", "dns_defnames",
"dns_nameservers", "dns_testnames", "emulate_httpd_log",
"err_html_text", "fake_user_agent", "firewall_ip",
"forwarded_for", "forward_snmpd_port", "fqdncache_size",
"ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients",
"header_access", "header_replace", "hierarchy_stoplist",
"high_response_time_warning", "high_page_fault_warning",
"htcp_port", "http_access", "http_anonymizer", "httpd_accel",
"httpd_accel_host", "httpd_accel_port",
"httpd_accel_uses_host_header", "httpd_accel_with_proxy",
"http_port", "http_reply_access", "icp_access",
"icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average",
"inside_firewall", "ipcache_high", "ipcache_low",
"ipcache_size", "local_domain", "local_ip", "logfile_rotate",
"log_fqdn", "log_icp_queries", "log_mime_hdrs",
"maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy",
"mime_table", "min_http_poll_cnt", "min_icp_poll_cnt",
"minimum_direct_hops", "minimum_object_size",
"minimum_retry_timeout", "miss_access", "negative_dns_ttl",
"negative_ttl", "neighbor_timeout", "neighbor_type_domain",
"netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache",
"passthrough_proxy", "pconn_timeout", "pid_filename",
"pinger_program", "positive_dns_ttl", "prefer_direct",
"proxy_auth", "proxy_auth_realm", "query_icmp", "quick_abort",
"quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age",
"reference_age", "refresh_pattern", "reload_into_ims",
"request_body_max_size", "request_size", "request_timeout",
"shutdown_lifetime", "single_parent_bypass",
"siteselect_timeout", "snmp_access", "snmp_incoming_address",
"snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address",
"tcp_recv_bufsize", "test_reachability", "udp_hit_obj",
"udp_hit_obj_size", "udp_incoming_address",
"udp_outgoing_address", "unique_hostname", "unlinkd_program",
"uri_whitespace", "useragent_log", "visible_hostname",
"wais_relay", "wais_relay_host", "wais_relay_port",
]
opts = [ "proxy-only", "weight", "ttl", "no-query", "default",
"round-robin", "multicast-responder", "on", "off", "all",
"deny", "allow", "via", "parent", "no-digest", "heap", "lru",
"realm", "children", "credentialsttl", "none", "disable",
"offline_toggle", "diskd", "q1", "q2",
]
actions = [ "shutdown", "info", "parameter", "server_list",
"client_list", r'squid\.conf',
]
actions_stats = [ "objects", "vm_objects", "utilization",
"ipcache", "fqdncache", "dns", "redirector", "io",
"reply_headers", "filedescriptors", "netdb",
]
actions_log = [ "status", "enable", "disable", "clear"]
acls = [ "url_regex", "urlpath_regex", "referer_regex", "port",
"proto", "req_mime_type", "rep_mime_type", "method",
"browser", "user", "src", "dst", "time", "dstdomain", "ident",
"snmp_community",
]
ip_re = r'\b(?:\d{1,3}\.){3}\d{1,3}\b'
def makelistre(list):
return r'\b(?:'+'|'.join(list)+r')\b'
tokens = {
'root': [
(r'\s+', Text),
(r'#', Comment, 'comment'),
(makelistre(keywords), Keyword),
(makelistre(opts), Name.Constant),
# Actions
(makelistre(actions), String),
(r'stats/'+makelistre(actions), String),
(r'log/'+makelistre(actions)+r'=', String),
(makelistre(acls), Keyword),
(ip_re+r'(?:/(?:'+ip_re+r')|\d+)?', Number),
(r'\b\d+\b', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.*', Comment, '#pop'),
],
}
class DebianControlLexer(RegexLexer):
"""
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
*New in Pygments 0.9.*
"""
name = 'Debian Control file'
aliases = ['control']
filenames = ['control']
tokens = {
'root': [
(r'^(Description)', Keyword, 'description'),
(r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'),
(r'^((Build-)?Depends)', Keyword, 'depends'),
(r'^((?:Python-)?Version)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^((?:Installed-)?Size)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^(MD5Sum|SHA1|SHA256)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$',
bygroups(Keyword, Whitespace, String)),
],
'maintainer': [
(r'<[^>]+>', Generic.Strong),
(r'<[^>]+>$', Generic.Strong, '#pop'),
(r',\n?', Text),
(r'.', Text),
],
'description': [
(r'(.*)(Homepage)(: )([^\s]+)', bygroups(Text, String, Name, Name.Class)),
(r':.*\n', Generic.Strong),
(r' .*\n', Text),
('', Text, '#pop'),
],
'depends': [
(r':\s*', Text),
(r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)),
(r'\(', Text, 'depend_vers'),
(r',', Text),
(r'\|', Operator),
(r'[\s]+', Text),
(r'[}\)]\s*$', Text, '#pop'),
(r'[}]', Text),
(r'[^,]$', Name.Function, '#pop'),
(r'([\+\.a-zA-Z0-9-][\s\n]*)', Name.Function),
(r'\[.*?\]', Name.Entity),
],
'depend_vers': [
(r'\),', Text, '#pop'),
(r'\)[^,]', Text, '#pop:2'),
(r'([><=]+)(\s*)([^\)]+)', bygroups(Operator, Text, Number))
]
}
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
*New in Pygments 0.11.*
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors' : [
# a full-form tag
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[0-9A-Za-z_-]+)?'
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[0-9A-Za-z_-]+', Name.Label),
# an alias
(r'\*[0-9A-Za-z_-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[^\n\r\f\v]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+|[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+|[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = []
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
#(r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class CMakeLexer(RegexLexer):
"""
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
*New in Pygments 1.2.*
"""
name = 'CMake'
aliases = ['cmake']
filenames = ['*.cmake']
mimetypes = ['text/x-cmake']
tokens = {
'root': [
#(r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
# r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
# r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
# r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
# r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
# r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
# r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
# r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
# r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
# r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
# r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
# r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
# r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
# r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
# r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
# r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
# r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
# r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
# r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
# r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
# r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
# r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
# r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
# r'COUNTARGS)\b', Name.Builtin, 'args'),
(r'\b([A-Za-z_]+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
Punctuation), 'args'),
include('keywords'),
include('ws')
],
'args': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)),
(r'(?s)".*?"', String.Double),
(r'\\\S+', String),
(r'[^\)$"# \t\n]+', String),
(r'\n', Text), # explicitly legal
include('keywords'),
include('ws')
],
'string': [
],
'keywords': [
(r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
],
'ws': [
(r'[ \t]+', Text),
(r'#.+\n', Comment),
]
}
|
danalec/dotfiles | refs/heads/master | sublime/.config/sublime-text-3/Packages/SublimeCodeIntel/libs/chardet/euctwprober.py | 2993 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
|
jleclanche/fireplace | refs/heads/master | fireplace/cards/gvg/priest.py | 1 | from ..utils import *
##
# Minions
class GVG_009:
"""Shadowbomber"""
play = Hit(ALL_HEROES, 3)
class GVG_011:
"""Shrinkmeister"""
requirements = {PlayReq.REQ_MINION_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Buff(TARGET, "GVG_011a")
GVG_011a = buff(atk=-2)
class GVG_014:
"""Vol'jin"""
requirements = {PlayReq.REQ_MINION_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = SwapHealth(SELF, TARGET, "GVG_014a")
class GVG_014a:
max_health = lambda self, i: self.health
class GVG_072:
"""Shadowboxer"""
events = Heal(ALL_MINIONS).on(Hit(RANDOM_ENEMY_CHARACTER, 1))
class GVG_083:
"""Upgraded Repair Bot"""
requirements = {
PlayReq.REQ_FRIENDLY_TARGET: 0,
PlayReq.REQ_MINION_TARGET: 0,
PlayReq.REQ_TARGET_IF_AVAILABLE: 0,
PlayReq.REQ_TARGET_WITH_RACE: 17}
# The Enchantment ID is correct
play = Buff(TARGET, "GVG_069a")
GVG_069a = buff(health=4)
##
# Spells
class GVG_008:
"""Lightbomb"""
def play(self):
for target in self.game.board:
yield Hit(target, target.atk)
class GVG_010:
"""Velen's Chosen"""
requirements = {PlayReq.REQ_MINION_TARGET: 0, PlayReq.REQ_TARGET_TO_PLAY: 0}
play = Buff(TARGET, "GVG_010b")
GVG_010b = buff(+2, +4, spellpower=1)
class GVG_012:
"""Light of the Naaru"""
requirements = {PlayReq.REQ_TARGET_TO_PLAY: 0}
play = Heal(TARGET, 3), (DAMAGE(TARGET) >= 1) & Summon(CONTROLLER, "EX1_001")
|
auto-mat/klub | refs/heads/diakonie | apps/aklub/migrations/0048_auto_20191022_1528.py | 1 | # Generated by Django 2.2.6 on 2019-10-22 13:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aklub', '0047_auto_20190919_1514'),
]
operations = [
migrations.AddField(
model_name='donorpaymentchannel',
name='SS',
field=models.CharField(blank=True, help_text='Specific symbol', max_length=30, null=True, verbose_name='SS'),
),
migrations.AlterField(
model_name='accountstatements',
name='type',
field=models.CharField(choices=[('account', 'Account statement - Fio Banka'), ('account_cs', 'Account statement - Česká spořitelna'), ('account_kb', 'Account statement - Komerční Banka'), ('account_csob', 'Account statement - ČSOB'), ('account_sberbank', 'Account statement - Sberbank'), ('darujme', 'Darujme.cz')], max_length=20),
),
]
|
mahak/ansible | refs/heads/devel | test/units/executor/__init__.py | 12133432 | |
JianyuWang/neutron | refs/heads/master | neutron/tests/unit/db/metering/__init__.py | 12133432 | |
tiagoantao/mega-analysis | refs/heads/master | haploStats/statIHSBin.py | 1 | import sys
from __future__ import print_function
import math
from MEGA.haplo import parseHaplo, standartizeIHS, doSlidingWindow
import scipy
maxChro = 23
slide = 200000
size = 200000
iHSThr = 2
uihs = {}
inff=open("inf", "w")
for k in range(1, maxChro+1):
f=open("%d.uiHS" %(k))
for res in parseHaplo(f):
myuihs = res["uiHS"]
if myuihs in [float("inf"),float("-inf")] or math.isnan(myuihs):
inff.write("%d\t%s\t%d\n" % (k, res["snp"], res["pos"]))
else:
bin = int(res["freq"]*100)/5
uihs.setdefault(bin,[]).append(myuihs)
f.close()
inff.close()
STAT = open("iHSstats", "w")
freqs = list(uihs.keys())
freqs.sort()
means = {}
stds = {}
for freq in freqs:
means[freq] = scipy.mean(uihs[freq])
stds[freq] = scipy.std(uihs[freq])
print(freq, means[freq], stds[freq], file=STAT)
#print freq, means[freq], stds[freq]
STAT.close()
chroCalc={}
myBins = {}
for k in range(1, maxChro+1):
w=open("%d.iHS" %(k,), "w")
pos, val = [], []
f=open("%d.uiHS" %(k,))
for res in standartizeIHS(f, means, stds):
pos.append(res["pos"])
val.append(res["iHS"])
#print res["freq"], "Y", res["iHS"], "X", res["uiHS"], means[res["freq"]],stds[res["freq"]]
w.write("%s\t%d\t%f\n" % (res["snp"], res["pos"], res["iHS"]))
f.close()
buckets, buckPos = doSlidingWindow(pos, val, slide, size)
posResult = {}
chroCalc[k] = posResult
for i in range(len(buckPos)):
myMin, myMax = buckPos[i]
vals = buckets[i]
above = [x for x in vals if abs(x)>iHSThr]
absVals = [abs(x) for x in vals]
#print vals
if len(vals)>0:
bin = len(vals)/20
iHSCut = float(len(above))/len(vals)
myBins.setdefault(bin,[]).append(iHSCut)
xpEHHCut =max(absVals)
posResult[(myMin+myMax)/2] = len(vals), iHSCut, sum(absVals)/len(vals), bin
#print len(vals), iHSCut, sum(absVals)/len(vals), bin
else:
#posResult[(myMin+myMax)/2] = None
pass
def getP(lst, val):
for li in range(len(lst)):
if val<lst[li]:
return 1.0-float(li)/len(lst)
return 0.0
wf=open("ihs.bucket", "w")
for bin in myBins:
myBins[bin].sort()
wf.write("%d\t%d\n" % (bin,len(myBins[bin])))
wf.close()
empP = {}
for k in range(1, maxChro+1):
posResult = chroCalc[k]
poses = list(posResult.keys())
for pos in poses:
ihs = posResult[pos][1]
bin = posResult[pos][3]
empP[(bin,ihs)] = getP(myBins[bin], ihs)
wf=open("ihs.window", "w")
for k in range(1, maxChro+1):
posResult = chroCalc[k]
poses = list(posResult.keys())
poses.sort()
for pos in poses:
myLen = posResult[pos][0]
ihs = posResult[pos][1]
mean = posResult[pos][2]
bin = posResult[pos][3]
wf.write("%d\t%d\t%d\t%f\t%f\t%d\t%f\n" % (k, pos, myLen, ihs, mean, bin, empP[bin,ihs]))
wf.close()
|
acsone/project-service | refs/heads/8.0 | project_sla/__init__.py | 23 | # -*- coding: utf-8 -*-
from . import project_sla
from . import analytic_account
from . import project_sla_control
from . import project_issue
from . import project_task
from . import report
|
tecip-nes/pyot | refs/heads/master | manage.py | 1 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pyot.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
brokenjacobs/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_user.py | 60 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_user
short_description: Manages users on Apache CloudStack based clouds.
description:
- Create, update, disable, lock, enable and remove users.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
username:
description:
- Username of the user.
required: true
account:
description:
- Account the user will be created under.
- Required on C(state=present).
required: false
default: null
password:
description:
- Password of the user to be created.
- Required on C(state=present).
- Only considered on creation and will not be updated if user exists.
required: false
default: null
first_name:
description:
- First name of the user.
- Required on C(state=present).
required: false
default: null
last_name:
description:
- Last name of the user.
- Required on C(state=present).
required: false
default: null
email:
description:
- Email of the user.
- Required on C(state=present).
required: false
default: null
timezone:
description:
- Timezone of the user.
required: false
default: null
domain:
description:
- Domain the user is related to.
required: false
default: 'ROOT'
state:
description:
- State of the user.
- C(unlocked) is an alias for C(enabled).
required: false
default: 'present'
choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create an user in domain 'CUSTOMERS'
local_action:
module: cs_user
account: developers
username: johndoe
password: S3Cur3
last_name: Doe
first_name: John
email: john.doe@example.com
domain: CUSTOMERS
# Lock an existing user in domain 'CUSTOMERS'
local_action:
module: cs_user
username: johndoe
domain: CUSTOMERS
state: locked
# Disable an existing user in domain 'CUSTOMERS'
local_action:
module: cs_user
username: johndoe
domain: CUSTOMERS
state: disabled
# Enable/unlock an existing user in domain 'CUSTOMERS'
local_action:
module: cs_user
username: johndoe
domain: CUSTOMERS
state: enabled
# Remove an user in domain 'CUSTOMERS'
local_action:
module: cs_user
name: customer_xy
domain: CUSTOMERS
state: absent
'''
RETURN = '''
---
id:
description: UUID of the user.
returned: success
type: string
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
username:
description: Username of the user.
returned: success
type: string
sample: johndoe
fist_name:
description: First name of the user.
returned: success
type: string
sample: John
last_name:
description: Last name of the user.
returned: success
type: string
sample: Doe
email:
description: Emailof the user.
returned: success
type: string
sample: john.doe@example.com
api_key:
description: API key of the user.
returned: success
type: string
sample: JLhcg8VWi8DoFqL2sSLZMXmGojcLnFrOBTipvBHJjySODcV4mCOo29W2duzPv5cALaZnXj5QxDx3xQfaQt3DKg
api_secret:
description: API secret of the user.
returned: success
type: string
sample: FUELo3LB9fa1UopjTLPdqLv_6OXQMJZv9g9N4B_Ao3HFz8d6IGFCV9MbPFNM8mwz00wbMevja1DoUNDvI8C9-g
account:
description: Account name of the user.
returned: success
type: string
sample: developers
account_type:
description: Type of the account.
returned: success
type: string
sample: user
timezone:
description: Timezone of the user.
returned: success
type: string
sample: enabled
created:
description: Date the user was created.
returned: success
type: string
sample: Doe
state:
description: State of the user.
returned: success
type: string
sample: enabled
domain:
description: Domain the user is related.
returned: success
type: string
sample: ROOT
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackUser(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackUser, self).__init__(module)
self.returns = {
'username': 'username',
'firstname': 'first_name',
'lastname': 'last_name',
'email': 'email',
'secretkey': 'api_secret',
'apikey': 'api_key',
'timezone': 'timezone',
}
self.account_types = {
'user': 0,
'root_admin': 1,
'domain_admin': 2,
}
self.user = None
def get_account_type(self):
account_type = self.module.params.get('account_type')
return self.account_types[account_type]
def get_user(self):
if not self.user:
args = {}
args['domainid'] = self.get_domain('id')
users = self.cs.listUsers(**args)
if users:
user_name = self.module.params.get('username')
for u in users['user']:
if user_name.lower() == u['username'].lower():
self.user = u
break
return self.user
def enable_user(self):
user = self.get_user()
if not user:
user = self.present_user()
if user['state'].lower() != 'enabled':
self.result['changed'] = True
args = {}
args['id'] = user['id']
if not self.module.check_mode:
res = self.cs.enableUser(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user = res['user']
return user
def lock_user(self):
user = self.get_user()
if not user:
user = self.present_user()
# we need to enable the user to lock it.
if user['state'].lower() == 'disabled':
user = self.enable_user()
if user['state'].lower() != 'locked':
self.result['changed'] = True
args = {}
args['id'] = user['id']
if not self.module.check_mode:
res = self.cs.lockUser(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user = res['user']
return user
def disable_user(self):
user = self.get_user()
if not user:
user = self.present_user()
if user['state'].lower() != 'disabled':
self.result['changed'] = True
args = {}
args['id'] = user['id']
if not self.module.check_mode:
user = self.cs.disableUser(**args)
if 'errortext' in user:
self.module.fail_json(msg="Failed: '%s'" % user['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
user = self.poll_job(user, 'user')
return user
def present_user(self):
missing_params = []
for required_params in [
'account',
'email',
'password',
'first_name',
'last_name',
]:
if not self.module.params.get(required_params):
missing_params.append(required_params)
if missing_params:
self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
user = self.get_user()
if user:
user = self._update_user(user)
else:
user = self._create_user(user)
return user
def _create_user(self, user):
self.result['changed'] = True
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain('id')
args['username'] = self.module.params.get('username')
args['password'] = self.module.params.get('password')
args['firstname'] = self.module.params.get('first_name')
args['lastname'] = self.module.params.get('last_name')
args['email'] = self.module.params.get('email')
args['timezone'] = self.module.params.get('timezone')
if not self.module.check_mode:
res = self.cs.createUser(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user = res['user']
# register user api keys
res = self.cs.registerUserKeys(id=user['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user.update(res['userkeys'])
return user
def _update_user(self, user):
args = {}
args['id'] = user['id']
args['firstname'] = self.module.params.get('first_name')
args['lastname'] = self.module.params.get('last_name')
args['email'] = self.module.params.get('email')
args['timezone'] = self.module.params.get('timezone')
if self.has_changed(args, user):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateUser(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user = res['user']
# register user api keys
if 'apikey' not in user:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.registerUserKeys(id=user['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
user.update(res['userkeys'])
return user
def absent_user(self):
user = self.get_user()
if user:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.deleteUser(id=user['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
return user
def get_result(self, user):
super(AnsibleCloudStackUser, self).get_result(user)
if user:
if 'accounttype' in user:
for key,value in self.account_types.items():
if value == user['accounttype']:
self.result['account_type'] = key
break
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
username = dict(required=True),
account = dict(default=None),
state = dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'),
domain = dict(default='ROOT'),
email = dict(default=None),
first_name = dict(default=None),
last_name = dict(default=None),
password = dict(default=None, no_log=True),
timezone = dict(default=None),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_acc = AnsibleCloudStackUser(module)
state = module.params.get('state')
if state in ['absent']:
user = acs_acc.absent_user()
elif state in ['enabled', 'unlocked']:
user = acs_acc.enable_user()
elif state in ['disabled']:
user = acs_acc.disable_user()
elif state in ['locked']:
user = acs_acc.lock_user()
else:
user = acs_acc.present_user()
result = acs_acc.get_result(user)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
henryyang42/NTHU_Course | refs/heads/master | crawler/management/commands/crawl_course.py | 1 | from django.core.management.base import BaseCommand
from crawler.crawler import crawl_course, crawl_dept
from crawler.course import get_cou_codes
try:
from crawler.decaptcha import Entrance, DecaptchaFailure
except ImportError:
Entrance = None
from data_center.models import Course, Department
from utils.config import get_config
def get_auth_pair(url):
if Entrance is not None:
try:
return Entrance(url).get_ticket()
except DecaptchaFailure:
print('Automated decaptcha failed.')
else:
print('crawler.decaptcha not available (requires tesseract >= 3.03).')
print('Please provide valid ACIXSTORE and auth_num from')
print(url)
ACIXSTORE = input('ACIXSTORE: ')
auth_num = input('auth_num: ')
return ACIXSTORE, auth_num
class Command(BaseCommand):
args = ''
help = 'Help crawl the course data from NTHU.'
def handle(self, *args, **kwargs):
if len(args) == 0:
import time
start_time = time.time()
cou_codes = get_cou_codes()
for ys in [get_config('crawler', 'semester')]:
ACIXSTORE, auth_num = get_auth_pair(
'https://www.ccxp.nthu.edu.tw/ccxp/INQUIRE'
'/JH/6/6.2/6.2.9/JH629001.php'
)
print('Crawling course for ' + ys)
crawl_course(ACIXSTORE, auth_num, cou_codes, ys)
ACIXSTORE, auth_num = get_auth_pair(
'https://www.ccxp.nthu.edu.tw/ccxp/INQUIRE'
'/JH/6/6.2/6.2.3/JH623001.php'
)
print('Crawling dept for ' + ys)
crawl_dept(ACIXSTORE, auth_num, cou_codes, ys)
print('===============================\n')
elapsed_time = time.time() - start_time
print('Total %.3f second used.' % elapsed_time)
if len(args) == 1:
if args[0] == 'clear':
Course.objects.all().delete()
Department.objects.all().delete()
|
abstract-open-solutions/OCB | refs/heads/8.0 | addons/gamification/models/badge.py | 287 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.translate import _
from datetime import date
import logging
_logger = logging.getLogger(__name__)
class gamification_badge_user(osv.Model):
"""User having received a badge"""
_name = 'gamification.badge.user'
_description = 'Gamification user badge'
_order = "create_date desc"
_rec_name = "badge_name"
_columns = {
'user_id': fields.many2one('res.users', string="User", required=True, ondelete="cascade"),
'sender_id': fields.many2one('res.users', string="Sender", help="The user who has send the badge"),
'badge_id': fields.many2one('gamification.badge', string='Badge', required=True, ondelete="cascade"),
'challenge_id': fields.many2one('gamification.challenge', string='Challenge originating', help="If this badge was rewarded through a challenge"),
'comment': fields.text('Comment'),
'badge_name': fields.related('badge_id', 'name', type="char", string="Badge Name"),
'create_date': fields.datetime('Created', readonly=True),
'create_uid': fields.many2one('res.users', string='Creator', readonly=True),
}
def _send_badge(self, cr, uid, ids, context=None):
"""Send a notification to a user for receiving a badge
Does not verify constrains on badge granting.
The users are added to the owner_ids (create badge_user if needed)
The stats counters are incremented
:param ids: list(int) of badge users that will receive the badge
"""
res = True
temp_obj = self.pool.get('email.template')
user_obj = self.pool.get('res.users')
template_id = self.pool['ir.model.data'].get_object(cr, uid, 'gamification', 'email_template_badge_received', context)
for badge_user in self.browse(cr, uid, ids, context=context):
body_html = temp_obj.render_template(cr, uid, template_id.body_html, 'gamification.badge.user', badge_user.id, context=context)
res = user_obj.message_post(
cr, uid, badge_user.user_id.id,
body=body_html,
subtype='gamification.mt_badge_granted',
partner_ids=[badge_user.user_id.partner_id.id],
context=context)
return res
def create(self, cr, uid, vals, context=None):
self.pool.get('gamification.badge').check_granting(cr, uid, badge_id=vals.get('badge_id'), context=context)
return super(gamification_badge_user, self).create(cr, uid, vals, context=context)
class gamification_badge(osv.Model):
"""Badge object that users can send and receive"""
CAN_GRANT = 1
NOBODY_CAN_GRANT = 2
USER_NOT_VIP = 3
BADGE_REQUIRED = 4
TOO_MANY = 5
_name = 'gamification.badge'
_description = 'Gamification badge'
_inherit = ['mail.thread']
def _get_owners_info(self, cr, uid, ids, name, args, context=None):
"""Return:
the list of unique res.users ids having received this badge
the total number of time this badge was granted
the total number of users this badge was granted to
"""
result = dict((res_id, {'stat_count': 0, 'stat_count_distinct': 0, 'unique_owner_ids': []}) for res_id in ids)
cr.execute("""
SELECT badge_id, count(user_id) as stat_count,
count(distinct(user_id)) as stat_count_distinct,
array_agg(distinct(user_id)) as unique_owner_ids
FROM gamification_badge_user
WHERE badge_id in %s
GROUP BY badge_id
""", (tuple(ids),))
for (badge_id, stat_count, stat_count_distinct, unique_owner_ids) in cr.fetchall():
result[badge_id] = {
'stat_count': stat_count,
'stat_count_distinct': stat_count_distinct,
'unique_owner_ids': unique_owner_ids,
}
return result
def _get_badge_user_stats(self, cr, uid, ids, name, args, context=None):
"""Return stats related to badge users"""
result = dict.fromkeys(ids, False)
badge_user_obj = self.pool.get('gamification.badge.user')
first_month_day = date.today().replace(day=1).strftime(DF)
for bid in ids:
result[bid] = {
'stat_my': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('user_id', '=', uid)], context=context, count=True),
'stat_this_month': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('create_date', '>=', first_month_day)], context=context, count=True),
'stat_my_this_month': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('user_id', '=', uid), ('create_date', '>=', first_month_day)], context=context, count=True),
'stat_my_monthly_sending': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('create_uid', '=', uid), ('create_date', '>=', first_month_day)], context=context, count=True)
}
return result
def _remaining_sending_calc(self, cr, uid, ids, name, args, context=None):
"""Computes the number of badges remaining the user can send
0 if not allowed or no remaining
integer if limited sending
-1 if infinite (should not be displayed)
"""
result = dict.fromkeys(ids, False)
for badge in self.browse(cr, uid, ids, context=context):
if self._can_grant_badge(cr, uid, badge.id, context) != 1:
# if the user cannot grant this badge at all, result is 0
result[badge.id] = 0
elif not badge.rule_max:
# if there is no limitation, -1 is returned which means 'infinite'
result[badge.id] = -1
else:
result[badge.id] = badge.rule_max_number - badge.stat_my_monthly_sending
return result
_columns = {
'name': fields.char('Badge', required=True, translate=True),
'description': fields.text('Description'),
'image': fields.binary("Image", help="This field holds the image used for the badge, limited to 256x256"),
'rule_auth': fields.selection([
('everyone', 'Everyone'),
('users', 'A selected list of users'),
('having', 'People having some badges'),
('nobody', 'No one, assigned through challenges'),
],
string="Allowance to Grant",
help="Who can grant this badge",
required=True),
'rule_auth_user_ids': fields.many2many('res.users', 'rel_badge_auth_users',
string='Authorized Users',
help="Only these people can give this badge"),
'rule_auth_badge_ids': fields.many2many('gamification.badge',
'gamification_badge_rule_badge_rel', 'badge1_id', 'badge2_id',
string='Required Badges',
help="Only the people having these badges can give this badge"),
'rule_max': fields.boolean('Monthly Limited Sending',
help="Check to set a monthly limit per person of sending this badge"),
'rule_max_number': fields.integer('Limitation Number',
help="The maximum number of time this badge can be sent per month per person."),
'stat_my_monthly_sending': fields.function(_get_badge_user_stats,
type="integer",
string='My Monthly Sending Total',
multi='badge_users',
help="The number of time the current user has sent this badge this month."),
'remaining_sending': fields.function(_remaining_sending_calc, type='integer',
string='Remaining Sending Allowed', help="If a maxium is set"),
'challenge_ids': fields.one2many('gamification.challenge', 'reward_id',
string="Reward of Challenges"),
'goal_definition_ids': fields.many2many('gamification.goal.definition', 'badge_unlocked_definition_rel',
string='Rewarded by',
help="The users that have succeeded theses goals will receive automatically the badge."),
'owner_ids': fields.one2many('gamification.badge.user', 'badge_id',
string='Owners', help='The list of instances of this badge granted to users'),
'active': fields.boolean('Active'),
'unique_owner_ids': fields.function(_get_owners_info,
string='Unique Owners',
help="The list of unique users having received this badge.",
multi='unique_users',
type="many2many", relation="res.users"),
'stat_count': fields.function(_get_owners_info, string='Total',
type="integer",
multi='unique_users',
help="The number of time this badge has been received."),
'stat_count_distinct': fields.function(_get_owners_info,
type="integer",
string='Number of users',
multi='unique_users',
help="The number of time this badge has been received by unique users."),
'stat_this_month': fields.function(_get_badge_user_stats,
type="integer",
string='Monthly total',
multi='badge_users',
help="The number of time this badge has been received this month."),
'stat_my': fields.function(_get_badge_user_stats, string='My Total',
type="integer",
multi='badge_users',
help="The number of time the current user has received this badge."),
'stat_my_this_month': fields.function(_get_badge_user_stats,
type="integer",
string='My Monthly Total',
multi='badge_users',
help="The number of time the current user has received this badge this month."),
}
_defaults = {
'rule_auth': 'everyone',
'active': True,
}
def check_granting(self, cr, uid, badge_id, context=None):
"""Check the user 'uid' can grant the badge 'badge_id' and raise the appropriate exception
if not
Do not check for SUPERUSER_ID
"""
status_code = self._can_grant_badge(cr, uid, badge_id, context=context)
if status_code == self.CAN_GRANT:
return True
elif status_code == self.NOBODY_CAN_GRANT:
raise osv.except_osv(_('Warning!'), _('This badge can not be sent by users.'))
elif status_code == self.USER_NOT_VIP:
raise osv.except_osv(_('Warning!'), _('You are not in the user allowed list.'))
elif status_code == self.BADGE_REQUIRED:
raise osv.except_osv(_('Warning!'), _('You do not have the required badges.'))
elif status_code == self.TOO_MANY:
raise osv.except_osv(_('Warning!'), _('You have already sent this badge too many time this month.'))
else:
_logger.exception("Unknown badge status code: %d" % int(status_code))
return False
def _can_grant_badge(self, cr, uid, badge_id, context=None):
"""Check if a user can grant a badge to another user
:param uid: the id of the res.users trying to send the badge
:param badge_id: the granted badge id
:return: integer representing the permission.
"""
if uid == SUPERUSER_ID:
return self.CAN_GRANT
badge = self.browse(cr, uid, badge_id, context=context)
if badge.rule_auth == 'nobody':
return self.NOBODY_CAN_GRANT
elif badge.rule_auth == 'users' and uid not in [user.id for user in badge.rule_auth_user_ids]:
return self.USER_NOT_VIP
elif badge.rule_auth == 'having':
all_user_badges = self.pool.get('gamification.badge.user').search(cr, uid, [('user_id', '=', uid)], context=context)
for required_badge in badge.rule_auth_badge_ids:
if required_badge.id not in all_user_badges:
return self.BADGE_REQUIRED
if badge.rule_max and badge.stat_my_monthly_sending >= badge.rule_max_number:
return self.TOO_MANY
# badge.rule_auth == 'everyone' -> no check
return self.CAN_GRANT
def check_progress(self, cr, uid, context=None):
try:
model, res_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'badge_hidden')
except ValueError:
return True
badge_user_obj = self.pool.get('gamification.badge.user')
if not badge_user_obj.search(cr, uid, [('user_id', '=', uid), ('badge_id', '=', res_id)], context=context):
values = {
'user_id': uid,
'badge_id': res_id,
}
badge_user_obj.create(cr, SUPERUSER_ID, values, context=context)
return True
|
nbari/zunzuncito | refs/heads/master | zunzuncito/request.py | 1 | """
handles the request
"""
class Request(object):
def __init__(self, logger, request_id, environ):
self.log = logger
self.request_id = request_id
self.environ = environ
self.URI = '/'
self.host = None
self.method = environ['REQUEST_METHOD']
self.path = []
self.py_mod = None
self.resource = None
self.version = None
self.vroot = 'default'
"""
set the HOST
"""
if 'HTTP_HOST' in environ:
self.host = environ['HTTP_HOST'].split(':')[0]
"""
set the request URI
"""
if 'REQUEST_URI' in environ:
self.URI = environ['REQUEST_URI']
elif 'PATH_INFO' in environ:
self.URI = environ['PATH_INFO']
@property
def host_url(self):
"""
The URL through the host (no path)
"""
e = self.environ
scheme = e.get('wsgi.url_scheme')
url = scheme + '://'
host = e.get('HTTP_HOST')
if host is not None:
if ':' in host:
host, port = host.split(':', 1)
else:
port = None
else:
host = e.get('SERVER_NAME')
port = e.get('SERVER_PORT')
if scheme == 'https':
if port == '443':
port = None
elif scheme == 'http':
if port == '80':
port = None
url += host
if port:
url += ':%s' % port
return url
def is_secure(self):
return 'wsgi.url_scheme' in self.environ \
and self.environ['wsgi.url_scheme'] == 'https'
|
alaski/nova | refs/heads/master | nova/tests/functional/db/test_request_spec.py | 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova import context
from nova.db.sqlalchemy import api as db
from nova.db.sqlalchemy import api_models
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import request_spec
from nova import test
from nova.tests import fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
from nova.tests.unit import fake_request_spec
CONF = cfg.CONF
class RequestSpecTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(RequestSpecTestCase, self).setUp()
self.useFixture(fixtures.Database(database='api'))
self.context = context.RequestContext('fake-user', 'fake-project')
self.spec_obj = request_spec.RequestSpec()
self.instance_uuid = None
def _create_spec(self):
args = fake_request_spec.fake_db_spec()
args.pop('id', None)
self.instance_uuid = args['instance_uuid']
request_spec.RequestSpec._from_db_object(self.context, self.spec_obj,
self.spec_obj._create_in_db(self.context, args))
return self.spec_obj
def test_get_by_instance_uuid_not_found(self):
self.assertRaises(exception.RequestSpecNotFound,
self.spec_obj._get_by_instance_uuid_from_db, self.context,
self.instance_uuid)
def test_get_by_uuid(self):
spec = self._create_spec()
db_spec = self.spec_obj.get_by_instance_uuid(self.context,
self.instance_uuid)
self.assertTrue(obj_base.obj_equal_prims(spec, db_spec))
def test_save_in_db(self):
spec = self._create_spec()
old_az = spec.availability_zone
spec.availability_zone = '%s-new' % old_az
spec.save()
db_spec = self.spec_obj.get_by_instance_uuid(self.context,
spec.instance_uuid)
self.assertTrue(obj_base.obj_equal_prims(spec, db_spec))
self.assertNotEqual(old_az, db_spec.availability_zone)
def test_double_create(self):
spec = self._create_spec()
self.assertRaises(exception.ObjectActionError, spec.create)
def test_destroy(self):
spec = self._create_spec()
spec.destroy()
self.assertRaises(
exception.RequestSpecNotFound,
self.spec_obj._get_by_instance_uuid_from_db, self.context,
self.instance_uuid)
def test_destroy_not_found(self):
spec = self._create_spec()
spec.destroy()
self.assertRaises(exception.RequestSpecNotFound, spec.destroy)
@db.api_context_manager.writer
def _delete_request_spec(context, instance_uuid):
"""Deletes a RequestSpec by the instance_uuid."""
context.session.query(api_models.RequestSpec).filter_by(
instance_uuid=instance_uuid).delete()
class RequestSpecInstanceMigrationTestCase(
integrated_helpers._IntegratedTestBase):
api_major_version = 'v2.1'
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
def setUp(self):
super(RequestSpecInstanceMigrationTestCase, self).setUp()
self.context = context.get_admin_context()
fake_network.set_stub_network_methods(self)
def _create_instances(self, old=2, total=5):
request = self._build_minimal_create_server_request()
# Create all instances that would set a RequestSpec object
request.update({'max_count': total})
self.api.post_server({'server': request})
self.instances = objects.InstanceList.get_all(self.context)
# Make sure that we have all the needed instances
self.assertEqual(total, len(self.instances))
# Fake the legacy behaviour by removing the RequestSpec for some old.
for i in range(0, old):
_delete_request_spec(self.context, self.instances[i].uuid)
# Just add a deleted instance to make sure we don't create
# a RequestSpec object for it.
del request['max_count']
server = self.api.post_server({'server': request})
self.api.delete_server(server['id'])
# Make sure we have the deleted instance only soft-deleted in DB
deleted_instances = objects.InstanceList.get_by_filters(
self.context, filters={'deleted': True})
self.assertEqual(1, len(deleted_instances))
def test_migration(self):
self._create_instances(old=2, total=5)
match, done = request_spec.migrate_instances_add_request_spec(
self.context, 2)
self.assertEqual(2, match)
self.assertEqual(2, done)
# Run again the migration call for making sure that we don't check
# again the same instances
match, done = request_spec.migrate_instances_add_request_spec(
self.context, 3)
self.assertEqual(3, match)
self.assertEqual(0, done)
# Make sure we ran over all the instances
match, done = request_spec.migrate_instances_add_request_spec(
self.context, 50)
self.assertEqual(0, match)
self.assertEqual(0, done)
# Make sure all instances have now a related RequestSpec
for uuid in [instance.uuid for instance in self.instances]:
try:
objects.RequestSpec.get_by_instance_uuid(self.context, uuid)
except exception.RequestSpecNotFound:
self.fail("RequestSpec not found for instance UUID :%s ", uuid)
def test_migration_with_none_old(self):
self._create_instances(old=0, total=5)
# Make sure no migrations can be found
match, done = request_spec.migrate_instances_add_request_spec(
self.context, 50)
self.assertEqual(5, match)
self.assertEqual(0, done)
|
hieukypc/ERP | refs/heads/master | openerp/addons/website_mass_mailing/controllers/main.py | 18 | # -*- coding: utf-8 -*-
from openerp import http, SUPERUSER_ID
from openerp.addons.mass_mailing.controllers.main import MassMailController
from openerp.http import request
class MassMailController(MassMailController):
@http.route(['/mail/mailing/<int:mailing_id>/unsubscribe'], type='http', website=True, auth='public')
def mailing(self, mailing_id, email=None, res_id=None, **post):
mailing = request.env['mail.mass_mailing'].sudo().browse(mailing_id)
if mailing.exists():
if mailing.mailing_model == 'mail.mass_mailing.contact':
contacts = request.env['mail.mass_mailing.contact'].sudo().search([('email', '=', email)])
return request.website.render('website_mass_mailing.page_unsubscribe', {
'contacts': contacts,
'email': email,
'mailing_id': mailing_id})
else:
super(MassMailController, self).mailing(mailing_id, email=email, res_id=res_id, **post)
return request.website.render('website_mass_mailing.page_unsubscribed')
@http.route(['/mail/mailing/unsubscribe'], type='json', auth='none')
def unsubscribe(self, mailing_id, opt_in_ids, opt_out_ids, email):
mailing = request.env['mail.mass_mailing'].sudo().browse(mailing_id)
if mailing.exists():
mailing.update_opt_out(mailing_id, email, opt_in_ids, False)
mailing.update_opt_out(mailing_id, email, opt_out_ids, True)
@http.route('/website_mass_mailing/is_subscriber', type='json', website=True, auth="public")
def is_subscriber(self, list_id, **post):
cr, uid, context = request.cr, request.uid, request.context
Contacts = request.registry['mail.mass_mailing.contact']
Users = request.registry['res.users']
is_subscriber = False
email = None
if uid != request.website.user_id.id:
email = Users.browse(cr, SUPERUSER_ID, uid, context).email
elif request.session.get('mass_mailing_email'):
email = request.session['mass_mailing_email']
if email:
contact_ids = Contacts.search(cr, SUPERUSER_ID, [('list_id', '=', int(list_id)), ('email', '=', email), ('opt_out', '=', False)], context=context)
is_subscriber = len(contact_ids) > 0
return {'is_subscriber': is_subscriber, 'email': email}
@http.route('/website_mass_mailing/subscribe', type='json', website=True, auth="public")
def subscribe(self, list_id, email, **post):
cr, uid, context = request.cr, request.uid, request.context
Contacts = request.registry['mail.mass_mailing.contact']
parsed_email = Contacts.get_name_email(email, context=context)[1]
contact_ids = Contacts.search_read(
cr, SUPERUSER_ID,
[('list_id', '=', int(list_id)), ('email', '=', parsed_email)],
['opt_out'], context=context)
if not contact_ids:
Contacts.add_to_list(cr, SUPERUSER_ID, email, int(list_id), context=context)
else:
if contact_ids[0]['opt_out']:
Contacts.write(cr, SUPERUSER_ID, [contact_ids[0]['id']], {'opt_out': False}, context=context)
# add email to session
request.session['mass_mailing_email'] = email
return True
@http.route(['/website_mass_mailing/get_content'], type='json', website=True, auth="public")
def get_mass_mailing_content(self, newsletter_id, **post):
data = self.is_subscriber(newsletter_id, **post)
mass_mailing_list = request.registry['mail.mass_mailing.list'].browse(request.cr, SUPERUSER_ID, int(newsletter_id), request.context)
data.update({
'content': mass_mailing_list.popup_content,
'redirect_url': mass_mailing_list.popup_redirect_url
})
return data
|
vlegoff/tsunami | refs/heads/master | src/secondaires/systeme/contextes/systeme.py | 1 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le contexte 'systeme'"""
import sys
import traceback
from code import InteractiveConsole
from primaires.format.constantes import ponctuations_finales
from primaires.interpreteur.contexte import Contexte
class Systeme(Contexte):
"""Contexte permettant d'entrer du code Python.
"""
nom = "systeme:python_console"
def __init__(self, pere):
"""Constructeur du contexte"""
Contexte.__init__(self, pere)
self.opts.prompt_prf = ""
self.opts.prompt_clr = ""
self.opts.nl = False
self.espace = {}
self.console = InteractiveConsole(self.espace)
self.py_prompt = ">>> "
def __getstate__(self):
attrs = Contexte.__getstate__(self)
attrs["espace"] = {}
attrs["py_prompt"] = ">>> "
if "console" in attrs:
del attrs["console"]
return attrs
def get_prompt(self):
"""Retourne le prompt"""
return self.py_prompt
def accueil(self):
"""Message d'accueil du contexte"""
res = "|tit|Console Python|ff|\n\n" \
"Vous pouvez entrer ici du code Python et voir le résultat " \
"des instructions\nque vous entrez.\n" \
"Vous pouvez utilisez la variable |cmd|importeur|ff| " \
"qui contient, comme\nson nom l'indique, l'importeur et par " \
"extension, une bonne partie de Kassie.\n" \
"|att|Tapez |ff||cmd|/q|ff||att| pour quitter.|ff|\n\n" \
"|tit|Python {}|ff|\n\n".format(sys.version)
return res
def interpreter(self, msg):
"""Méthode d'interprétation du contexte"""
self.espace["importeur"] = type(self).importeur
self.espace["joueur"] = self.pere.joueur
if msg.startswith("/"):
msg = msg[1:]
if msg == "q":
self.fermer()
self.pere << "Fermeture de la console Python."
else:
self.pere << "|err|Option inconnue.|ff|"
else:
# Exécution du code
sys.stdin = self.pere
sys.stdout = self.pere
sys.stderr = self.pere
ret = False
nb_msg = self.pere.nb_msg
try:
ret = self.console.push(msg)
self.py_prompt = "... " if ret else ">>> "
except Exception:
self.pere << traceback.format_exc()
else:
if nb_msg == self.pere.nb_msg:
self.pere.envoyer("")
finally:
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
|
jeffery9/mixprint_addons | refs/heads/master | portal_event/__init__.py | 53 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import event
|
whereismyjetpack/ansible | refs/heads/devel | lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py | 50 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: azure_rm_storageaccount
version_added: "2.1"
short_description: Manage Azure storage accounts.
description:
- Create, update or delete a storage account.
options:
resource_group:
description:
- Name of the resource group to use.
required: true
name:
description:
- Name of the storage account to update or create.
required: false
default: null
state:
description:
- Assert the state of the storage account. Use 'present' to create or update a storage account and
'absent' to delete an account.
default: present
required: false
choices:
- absent
- present
location:
description:
- Valid azure location. Defaults to location of the resource group.
required: false
default: resource_group location
account_type:
description:
- "Type of storage account. Required when creating a storage account. NOTE: Standard_ZRS and Premium_LRS
accounts cannot be changed to other account types, and other account types cannot be changed to
Standard_ZRS or Premium_LRS."
required: false
default: null
choices:
- Premium_LRS
- Standard_GRS
- Standard_LRS
- Standard_RAGRS
- Standard_ZRS
aliases:
- type
custom_domain:
description:
- User domain assigned to the storage account. Must be a dictionary with 'name' and 'use_sub_domain'
keys where 'name' is the CNAME source. Only one custom domain is supported per storage account at this
time. To clear the existing custom domain, use an empty string for the custom domain name property.
- Can be added to an existing storage account. Will be ignored during storage account creation.
required: false
default: null
kind:
description:
- The 'kind' of storage.
required: false
default: 'Storage'
choices:
- Storage
- StorageBlob
version_added: "2.2"
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: remove account, if it exists
azure_rm_storageaccount:
resource_group: Testing
name: clh0002
state: absent
- name: create an account
azure_rm_storageaccount:
resource_group: Testing
name: clh0002
type: Standard_RAGRS
tags:
- testing: testing
- delete: on-exit
'''
RETURN = '''
state:
description: Current state of the storage account.
returned: always
type: dict
sample: {
"account_type": "Standard_RAGRS",
"custom_domain": null,
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/testing/providers/Microsoft.Storage/storageAccounts/clh0003",
"location": "eastus2",
"name": "clh0003",
"primary_endpoints": {
"blob": "https://clh0003.blob.core.windows.net/",
"queue": "https://clh0003.queue.core.windows.net/",
"table": "https://clh0003.table.core.windows.net/"
},
"primary_location": "eastus2",
"provisioning_state": "Succeeded",
"resource_group": "Testing",
"secondary_endpoints": {
"blob": "https://clh0003-secondary.blob.core.windows.net/",
"queue": "https://clh0003-secondary.queue.core.windows.net/",
"table": "https://clh0003-secondary.table.core.windows.net/"
},
"secondary_location": "centralus",
"status_of_primary": "Available",
"status_of_secondary": "Available",
"tags": null,
"type": "Microsoft.Storage/storageAccounts"
}
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.storage.cloudstorageaccount import CloudStorageAccount
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.mgmt.storage.models.storage_management_client_enums import ProvisioningState, SkuName, SkuTier, Kind
from azure.mgmt.storage.models import StorageAccountUpdateParameters, CustomDomain, \
StorageAccountCreateParameters, Sku
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMStorageAccount(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
account_type=dict(type='str', choices=[], aliases=['type']),
custom_domain=dict(type='dict'),
location=dict(type='str'),
name=dict(type='str', required=True),
resource_group=dict(required=True, type='str'),
state=dict(default='present', choices=['present', 'absent']),
force=dict(type='bool', default=False),
tags=dict(type='dict'),
kind=dict(type='str', default='Storage', choices=['Storage', 'BlobStorage'])
)
for key in SkuName:
self.module_arg_spec['account_type']['choices'].append(getattr(key, 'value'))
self.results = dict(
changed=False,
state=dict()
)
self.account_dict = None
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.account_type = None
self.custom_domain = None
self.tags = None
self.force = None
self.kind = None
super(AzureRMStorageAccount, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if len(self.name) < 3 or len(self.name) > 24:
self.fail("Parameter error: name length must be between 3 and 24 characters.")
if self.custom_domain:
if self.custom_domain.get('name', None) is None:
self.fail("Parameter error: expecting custom_domain to have a name attribute of type string.")
if self.custom_domain.get('use_sub_domain', None) is None:
self.fail("Parameter error: expecting custom_domain to have a use_sub_domain "
"attribute of type boolean.")
self.account_dict = self.get_account()
if self.state == 'present' and self.account_dict and \
self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE :
self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state "
"to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE))
if self.account_dict is not None:
self.results['state'] = self.account_dict
else:
self.results['state'] = dict()
if self.state == 'present':
if not self.account_dict:
self.results['state'] = self.create_account()
else:
self.update_account()
elif self.state == 'absent' and self.account_dict:
self.delete_account()
self.results['state'] = dict(Status='Deleted')
return self.results
def check_name_availability(self):
self.log('Checking name availability for {0}'.format(self.name))
try:
response = self.storage_client.storage_accounts.check_name_availability(self.name)
except AzureHttpError as e:
self.log('Error attempting to validate name.')
self.fail("Error checking name availability: {0}".format(str(e)))
if not response.name_available:
self.log('Error name not available.')
self.fail("{0} - {1}".format(response.message, response.reason))
def get_account(self):
self.log('Get properties for account {0}'.format(self.name))
account_obj = None
account_dict = None
try:
account_obj = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name)
except CloudError:
pass
if account_obj:
account_dict = self.account_obj_to_dict(account_obj)
return account_dict
def account_obj_to_dict(self, account_obj):
account_dict = dict(
id=account_obj.id,
name=account_obj.name,
location=account_obj.location,
resource_group=self.resource_group,
type=account_obj.type,
sku_tier=account_obj.sku.tier.value,
sku_name=account_obj.sku.name.value,
provisioning_state=account_obj.provisioning_state.value,
secondary_location=account_obj.secondary_location,
status_of_primary=(account_obj.status_of_primary.value
if account_obj.status_of_primary is not None else None),
status_of_secondary=(account_obj.status_of_secondary.value
if account_obj.status_of_secondary is not None else None),
primary_location=account_obj.primary_location
)
account_dict['custom_domain'] = None
if account_obj.custom_domain:
account_dict['custom_domain'] = dict(
name=account_obj.custom_domain.name,
use_sub_domain=account_obj.custom_domain.use_sub_domain
)
account_dict['primary_endpoints'] = None
if account_obj.primary_endpoints:
account_dict['primary_endpoints'] = dict(
blob=account_obj.primary_endpoints.blob,
queue=account_obj.primary_endpoints.queue,
table=account_obj.primary_endpoints.table
)
account_dict['secondary_endpoints'] = None
if account_obj.secondary_endpoints:
account_dict['secondary_endpoints'] = dict(
blob=account_obj.secondary_endpoints.blob,
queue=account_obj.secondary_endpoints.queue,
table=account_obj.secondary_endpoints.table
)
account_dict['tags'] = None
if account_obj.tags:
account_dict['tags'] = account_obj.tags
return account_dict
def update_account(self):
self.log('Update storage account {0}'.format(self.name))
if self.account_type:
if self.account_type != self.account_dict['sku_name']:
# change the account type
if self.account_dict['sku_name'] in [SkuName.premium_lrs, SkuName.standard_zrs]:
self.fail("Storage accounts of type {0} and {1} cannot be changed.".format(
SkuName.premium_lrs, SkuName.standard_zrs))
if self.account_type in [SkuName.premium_lrs, SkuName.standard_zrs]:
self.fail("Storage account of type {0} cannot be changed to a type of {1} or {2}.".format(
self.account_dict['sku_name'], SkuName.premium_lrs, SkuName.standard_zrs))
self.results['changed'] = True
self.account_dict['sku_name'] = self.account_type
if self.results['changed'] and not self.check_mode:
# Perform the update. The API only allows changing one attribute per call.
try:
self.log("sku_name: %s" % self.account_dict['sku_name'])
self.log("sku_tier: %s" % self.account_dict['sku_tier'])
sku = Sku(SkuName(self.account_dict['sku_name']))
sku.tier = SkuTier(self.account_dict['sku_tier'])
parameters = StorageAccountUpdateParameters(sku=sku)
self.storage_client.storage_accounts.update(self.resource_group,
self.name,
parameters)
except Exception as exc:
self.fail("Failed to update account type: {0}".format(str(exc)))
if self.custom_domain:
if not self.account_dict['custom_domain'] or \
self.account_dict['custom_domain'] != self.account_dict['custom_domain']:
self.results['changed'] = True
self.account_dict['custom_domain'] = self.custom_domain
if self.results['changed'] and not self.check_mode:
new_domain = CustomDomain(name=self.custom_domain['name'],
use_sub_domain=self.custom_domain['use_sub_domain'])
parameters = StorageAccountUpdateParameters(custom_domain=new_domain)
try:
self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
except Exception as exc:
self.fail("Failed to update custom domain: {0}".format(str(exc)))
update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags'])
if update_tags:
self.results['changed'] = True
if not self.check_mode:
parameters = StorageAccountUpdateParameters(tags=self.account_dict['tags'])
try:
self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
except Exception as exc:
self.fail("Failed to update tags: {0}".format(str(exc)))
def create_account(self):
self.log("Creating account {0}".format(self.name))
if not self.location:
self.fail('Parameter error: location required when creating a storage account.')
if not self.account_type:
self.fail('Parameter error: account_type required when creating a storage account.')
self.check_name_availability()
self.results['changed'] = True
if self.check_mode:
account_dict = dict(
location=self.location,
account_type=self.account_type,
name=self.name,
resource_group=self.resource_group,
tags=dict()
)
if self.tags:
account_dict['tags'] = self.tags
return account_dict
sku = Sku(SkuName(self.account_type))
sku.tier = SkuTier.standard if 'Standard' in self.account_type else SkuTier.premium
parameters = StorageAccountCreateParameters(sku, self.kind, self.location, tags=self.tags)
self.log(str(parameters))
try:
poller = self.storage_client.storage_accounts.create(self.resource_group, self.name, parameters)
self.get_poller_result(poller)
except AzureHttpError as e:
self.log('Error creating storage account.')
self.fail("Failed to create account: {0}".format(str(e)))
# the poller doesn't actually return anything
return self.get_account()
def delete_account(self):
if self.account_dict['provisioning_state'] == ProvisioningState.succeeded.value and \
self.account_has_blob_containers() and self.force:
self.fail("Account contains blob containers. Is it in use? Use the force option to attempt deletion.")
self.log('Delete storage account {0}'.format(self.name))
self.results['changed'] = True
if not self.check_mode:
try:
status = self.storage_client.storage_accounts.delete(self.resource_group, self.name)
self.log("delete status: ")
self.log(str(status))
except AzureHttpError as e:
self.fail("Failed to delete the account: {0}".format(str(e)))
return True
def account_has_blob_containers(self):
'''
If there are blob containers, then there are likely VMs depending on this account and it should
not be deleted.
'''
self.log('Checking for existing blob containers')
blob_service = self.get_blob_client(self.resource_group, self.name)
try:
response = blob_service.list_containers()
except AzureMissingResourceHttpError:
# No blob storage available?
return False
if len(response.items) > 0:
return True
return False
def main():
AzureRMStorageAccount()
if __name__ == '__main__':
main()
|
jakesdavis/ionic-boilerplate-template | refs/heads/master | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/input.py | 713 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
oshanz/Learn-Django | refs/heads/master | tests/generic_views/test_detail.py | 53 | from __future__ import absolute_import
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.views.generic.base import View
from .models import Artist, Author, Page
class DetailViewTest(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_simple_object(self):
res = self.client.get('/detail/obj/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], {'foo': 'bar'})
self.assertIsInstance(res.context['view'], View)
self.assertTemplateUsed(res, 'generic_views/detail.html')
def test_detail_by_pk(self):
res = self.client.get('/detail/author/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertEqual(res.context['author'], Author.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_custom_pk(self):
res = self.client.get('/detail/author/bycustompk/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertEqual(res.context['author'], Author.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_slug(self):
res = self.client.get('/detail/author/byslug/scott-rosenberg/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg'))
self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg'))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_custom_slug(self):
res = self.client.get('/detail/author/bycustomslug/scott-rosenberg/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg'))
self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg'))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_verbose_name(self):
res = self.client.get('/detail/artist/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Artist.objects.get(pk=1))
self.assertEqual(res.context['artist'], Artist.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/artist_detail.html')
def test_template_name(self):
res = self.client.get('/detail/author/1/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertEqual(res.context['author'], Author.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/about.html')
def test_template_name_suffix(self):
res = self.client.get('/detail/author/1/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertEqual(res.context['author'], Author.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/author_view.html')
def test_template_name_field(self):
res = self.client.get('/detail/page/1/field/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Page.objects.get(pk=1))
self.assertEqual(res.context['page'], Page.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/page_template.html')
def test_context_object_name(self):
res = self.client.get('/detail/author/1/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=1))
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_duplicated_context_object_name(self):
res = self.client.get('/detail/author/1/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, '/detail/author/invalid/url/')
def test_invalid_queryset(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/detail/author/invalid/qs/')
def test_non_model_object_with_meta(self):
res = self.client.get('/detail/nonmodel/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'].id, "non_model_1")
|
viccuad/gomoduino | refs/heads/master | docs/d-bus/python-dbus-doc_examples/example-signal-recipient.py | 1 | #!/usr/bin/env python
usage = """Usage:
python example-signal-emitter.py &
python example-signal-recipient.py
python example-signal-recipient.py --exit-service
"""
# Copyright (C) 2004-2006 Red Hat Inc. <http://www.redhat.com/>
# Copyright (C) 2005-2007 Collabora Ltd. <http://www.collabora.co.uk/>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import traceback
from gi.repository import GLib
import dbus
import dbus.mainloop.glib
def handle_reply(msg):
print msg
def handle_error(e):
print str(e)
def emit_signal():
#call the emitHelloSignal method
object.emitHelloSignal(dbus_interface="com.example.TestService")
#reply_handler = handle_reply, error_handler = handle_error)
# exit after waiting a short time for the signal
GLib.timeout_add(2000, loop.quit)
if sys.argv[1:] == ['--exit-service']:
object.Exit(dbus_interface='com.example.TestService')
return False
def hello_signal_handler(hello_string):
print ("Received signal (by connecting using remote object) and it says: "
+ hello_string)
def catchall_signal_handler(*args, **kwargs):
print ("Caught signal (in catchall handler) "
+ kwargs['dbus_interface'] + "." + kwargs['member'])
for arg in args:
print " " + str(arg)
def catchall_hello_signals_handler(hello_string):
print "Received a hello signal and it says " + hello_string
def catchall_testservice_interface_handler(hello_string, dbus_message):
print "com.example.TestService interface says " + hello_string + " when it sent signal " + dbus_message.get_member()
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SessionBus()
try:
object = bus.get_object("com.example.TestService","/com/example/TestService/object")
object.connect_to_signal("HelloSignal", hello_signal_handler, dbus_interface="com.example.TestService", arg0="Hello")
except dbus.DBusException:
traceback.print_exc()
print usage
sys.exit(1)
#lets make a catchall
bus.add_signal_receiver(catchall_signal_handler, interface_keyword='dbus_interface', member_keyword='member')
bus.add_signal_receiver(catchall_hello_signals_handler, dbus_interface = "com.example.TestService", signal_name = "HelloSignal")
bus.add_signal_receiver(catchall_testservice_interface_handler, dbus_interface = "com.example.TestService", message_keyword='dbus_message')
# Tell the remote object to emit the signal after a short delay
GLib.timeout_add(2000, emit_signal)
loop = GLib.MainLoop()
loop.run()
|
promulo/machinery | refs/heads/master | inspect_helpers/yum_repositories.py | 2 | #!/usr/bin/python
# Copyright (c) 2013-2016 SUSE LLC
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, contact SUSE LLC.
#
# To contact SUSE about this file by physical or electronic mail,
# you may find current contact information at www.suse.com
import yum
try:
import json
except:
import simplejson as json
yb = yum.YumBase()
repositories = []
for repo in yb.repos.sort():
repo_dict = dict()
repo_dict["alias"] = repo.id
repo_dict["name"] = repo.name
repo_dict["type"] = "rpm-md"
repo_dict["url"] = repo.baseurl or []
repo_dict["mirrorlist"] = repo.mirrorlist or ""
repo_dict["enabled"] = repo.enabled
repo_dict["gpgcheck"] = repo.gpgcheck
repo_dict["gpgkey"] = repo.gpgkey
repositories.append(repo_dict)
print(json.dumps(repositories))
|
bburgdave/fabric | refs/heads/master | bddtests/steps/chaincode_rbac_impl.py | 43 | import os
import re
import time
import copy
import base64
from datetime import datetime, timedelta
import sys, requests, json
import bdd_test_util
import bdd_grpc_util
from grpc.beta import implementations
import fabric_pb2
import chaincode_pb2
import devops_pb2
LAST_REQUESTED_TCERT="lastRequestedTCert"
@when(u'user "{enrollId}" requests a new application TCert')
def step_impl(context, enrollId):
assert 'users' in context, "users not found in context. Did you register a user?"
(channel, userRegistration) = bdd_grpc_util.getGRPCChannelAndUser(context, enrollId)
stub = devops_pb2.beta_create_Devops_stub(channel)
secret = bdd_grpc_util.getSecretForUserRegistration(userRegistration)
response = stub.EXP_GetApplicationTCert(secret,2)
assert response.status == fabric_pb2.Response.SUCCESS, 'Failure getting TCert from {0}, for user "{1}": {2}'.format(userRegistration.composeService,enrollId, response.msg)
tcert = response.msg
userRegistration.lastResult = tcert
@when(u'user "{enrollId}" stores their last result as "{tagName}"')
def step_impl(context, enrollId, tagName):
assert 'users' in context, "users not found in context. Did you register a user?"
# Retrieve the userRegistration from the context
userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
userRegistration.tags[tagName] = userRegistration.lastResult
@when(u'user "{enrollId}" sets metadata to their stored value "{tagName}"')
def step_impl(context, enrollId, tagName):
assert 'users' in context, "users not found in context. Did you register a user?"
# Retrieve the userRegistration from the context
userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
assert tagName in userRegistration.tags, 'Tag "{0}" not found in user "{1}" tags'.format(tagName, enrollId)
context.metadata = userRegistration.tags[tagName]
@when(u'user "{enrollId}" deploys chaincode "{chaincodePath}" aliased as "{ccAlias}" with ctor "{ctor}" and args')
def step_impl(context, enrollId, chaincodePath, ccAlias, ctor):
bdd_grpc_util.deployChaincode(context, enrollId, chaincodePath, ccAlias, ctor)
@when(u'user "{enrollId}" gives stored value "{tagName}" to "{recipientEnrollId}"')
def step_impl(context, enrollId, tagName, recipientEnrollId):
assert 'users' in context, "users not found in context. Did you register a user?"
# Retrieve the userRegistration from the context
userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
recipientUserRegistration = bdd_test_util.getUserRegistration(context, recipientEnrollId)
# Copy value from target to recipient
recipientUserRegistration.tags[tagName] = userRegistration.tags[tagName]
@when(u'"{enrollId}" uses application TCert "{assignerAppTCert}" to assign role "{role}" to application TCert "{assigneeAppTCert}"')
def step_impl(context, enrollId, assignerAppTCert, role, assigneeAppTCert):
assert 'users' in context, "users not found in context. Did you register a user?"
assert 'compose_containers' in context, "compose_containers not found in context"
(channel, userRegistration) = bdd_grpc_util.getGRPCChannelAndUser(context, enrollId)
stub = devops_pb2.beta_create_Devops_stub(channel)
# First get binding with EXP_PrepareForTx
secret = bdd_grpc_util.getSecretForUserRegistration(userRegistration)
response = stub.EXP_PrepareForTx(secret,2)
assert response.status == fabric_pb2.Response.SUCCESS, 'Failure getting Binding from {0}, for user "{1}": {2}'.format(userRegistration.composeService,enrollId, response.msg)
binding = response.msg
# Now produce the sigma EXP_ProduceSigma
chaincodeInput = chaincode_pb2.ChaincodeInput(function = "addRole", args = (base64.b64encode(userRegistration.tags[assigneeAppTCert]), role) )
chaincodeInputRaw = chaincodeInput.SerializeToString()
appTCert = userRegistration.tags[assignerAppTCert]
sigmaInput = devops_pb2.SigmaInput(secret = secret, appTCert = appTCert, data = chaincodeInputRaw + binding)
response = stub.EXP_ProduceSigma(sigmaInput,2)
assert response.status == fabric_pb2.Response.SUCCESS, 'Failure prducing sigma from {0}, for user "{1}": {2}'.format(userRegistration.composeService,enrollId, response.msg)
sigmaOutputBytes = response.msg
# Parse the msg bytes as a SigmaOutput message
sigmaOutput = devops_pb2.SigmaOutput()
sigmaOutput.ParseFromString(sigmaOutputBytes)
print('Length of sigma = {0}'.format(len(sigmaOutput.sigma)))
# Now execute the transaction with the saved binding, EXP_ExecuteWithBinding
assert "grpcChaincodeSpec" in context, "grpcChaincodeSpec NOT found in context"
newChaincodeSpec = chaincode_pb2.ChaincodeSpec()
newChaincodeSpec.CopyFrom(context.grpcChaincodeSpec)
newChaincodeSpec.metadata = sigmaOutput.asn1Encoding
newChaincodeSpec.ctorMsg.CopyFrom(chaincodeInput)
ccInvocationSpec = chaincode_pb2.ChaincodeInvocationSpec(chaincodeSpec = newChaincodeSpec)
executeWithBinding = devops_pb2.ExecuteWithBinding(chaincodeInvocationSpec = ccInvocationSpec, binding = binding)
response = stub.EXP_ExecuteWithBinding(executeWithBinding,60)
assert response.status == fabric_pb2.Response.SUCCESS, 'Failure executeWithBinding from {0}, for user "{1}": {2}'.format(userRegistration.composeService,enrollId, response.msg)
context.response = response
context.transactionID = response.msg
@then(u'"{enrollId}"\'s last transaction should have failed with message that contains "{msg}"')
def step_impl(context, enrollId, msg):
assert 'users' in context, "users not found in context. Did you register a user?"
assert 'compose_containers' in context, "compose_containers not found in context"
txResult = bdd_grpc_util.getTxResult(context, enrollId)
assert txResult.errorCode > 0, "Expected failure (errorCode > 0), instead found errorCode={0}".format(txResult.errorCode)
assert msg in txResult.error, "Expected error to contain'{0}', instead found '{1}".format(msg, txResult.error)
@then(u'"{enrollId}"\'s last transaction should have succeeded')
def step_impl(context, enrollId):
txResult = bdd_grpc_util.getTxResult(context, enrollId)
assert txResult.errorCode == 0, "Expected success (errorCode == 0), instead found errorCode={0}, error={1}".format(txResult.errorCode, txResult.error)
@when(u'user "{enrollId}" invokes chaincode "{ccAlias}" function name "{functionName}" with args')
def step_impl(context, enrollId, ccAlias, functionName):
response = bdd_grpc_util.invokeChaincode(context, enrollId, ccAlias, functionName)
context.response = response
context.transactionID = response.msg
#assert response.status == fabric_pb2.Response.SUCCESS, 'Failure invoking chaincode {0} on {1}, for user "{2}": {3}'.format(ccAlias, userRegistration.composeService,enrollId, response.msg)
@given(u'user "{enrollId}" stores a reference to chaincode "{ccAlias}" as "{tagName}"')
def step_impl(context, enrollId, ccAlias, tagName):
# Retrieve the userRegistration from the context
userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
deployedCcSpec = bdd_grpc_util.getDeployment(context, ccAlias)
assert deployedCcSpec != None, "Deployment NOT found for chaincode alias '{0}'".format(ccAlias)
userRegistration.tags[tagName] = deployedCcSpec.chaincodeID.name
|
madmax983/h2o-3 | refs/heads/master | py2/testdir_single_jvm/test_summary2_NY0.py | 21 | import unittest, time, sys, random, math, getpass
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_util, h2o_print as h2p
def write_syn_dataset(csvPathname, rowCount, colCount, SEED, choices):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
naCnt = [0 for j in range(colCount)]
for i in range(rowCount):
rowData = []
for j in range(colCount):
ri = random.choice(choices)
if ri=='0' or ri==' 0':
naCnt[j] += 1
rowData.append(ri)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
return naCnt
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_summary2_NY0(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
choicesList = [
('N', 'Y', '0'),
('n', 'y', '0'),
('F', 'T', '0'),
('f', 't', '0'),
(' N', ' Y', ' 0'),
(' n', ' y', ' 0'),
(' F', ' T', ' 0'),
(' f', ' t', ' 0'),
]
# white space is stripped
expectedList = [
('N', 'Y', '0'),
('n', 'y', '0'),
('F', 'T', '0'),
('f', 't', '0'),
('N', 'Y', '0'),
('n', 'y', '0'),
('F', 'T', '0'),
('f', 't', '0'),
]
tryList = [
# colname, (min, 25th, 50th, 75th, max)
(100, 200, 'x.hex', choicesList[4], expectedList[4]),
(100, 200, 'x.hex', choicesList[5], expectedList[5]),
(100, 200, 'x.hex', choicesList[6], expectedList[6]),
(100, 200, 'x.hex', choicesList[7], expectedList[7]),
(100, 200, 'x.hex', choicesList[3], expectedList[3]),
(1000, 200, 'x.hex', choicesList[2], expectedList[2]),
(10000, 200, 'x.hex', choicesList[1], expectedList[1]),
(100000, 200, 'x.hex', choicesList[0], expectedList[0]),
]
timeoutSecs = 10
trial = 1
n = h2o.nodes[0]
lenNodes = len(h2o.nodes)
x = 0
timeoutSecs = 60
for (rowCount, colCount, hex_key, choices, expected) in tryList:
# max error = half the bin size?
SEEDPERFILE = random.randint(0, sys.maxint)
x += 1
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
csvPathnameFull = h2i.find_folder_and_filename(None, csvPathname, returnFullPath=True)
print "Creating random", csvPathname
expectedNaCnt = write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE, choices)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=10, doSummary=False)
pA = h2o_cmd.ParseObj(parseResult, expectedNumRows=rowCount, expectedNumCols=colCount)
print pA.numRows, pA.numCols, pA.parse_key
iA = h2o_cmd.InspectObj(pA.parse_key,
expectedNumRows=rowCount, expectedNumCols=colCount, expectedMissinglist=[])
print iA.missingList, iA.labelList, iA.numRows, iA.numCols
for i in range(colCount):
# walks across the columns triggering a summary on the col desired
# runSummary returns a column object now. inspect and parse don't. They return json.
# maybe eventually will make them return object? But I also pass expected stuff to them
# should I pass expected to summary? no, more complex?
co = h2o_cmd.runSummary(key=hex_key, column=i)
print co.label, co.type, co.missing_count, co.domain, sum(co.histogram_bins)
print "\nComparing column %s to expected" % i
self.assertEqual(expectedNaCnt[i], co.missing_count, "Column %s Expected %s. missing: %s is incorrect" % \
(i, expectedNaCnt[i], co.missing_count))
self.assertEqual(rowCount - expectedNaCnt[i], sum(co.histogram_bins))
h2p.green_print("\nDone with trial", trial)
trial += 1
h2i.delete_keys_at_all_nodes()
if __name__ == '__main__':
h2o.unit_main()
|
thenewguy/wagtail | refs/heads/tng_master | wagtail/wagtailimages/views/images.py | 1 | import json
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import permission_required
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import HttpResponse
from wagtail.wagtailcore.models import Site
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailimages.models import get_image_model, Filter
from wagtail.wagtailimages.forms import get_image_form, URLGeneratorForm
from wagtail.wagtailimages.utils.crypto import generate_signature
@permission_required('wagtailimages.add_image')
@vary_on_headers('X-Requested-With')
def index(request):
Image = get_image_model()
# Get images
images = Image.objects.order_by('-created_at')
# Permissions
if not request.user.has_perm('wagtailimages.change_image'):
# restrict to the user's own images
images = images.filter(uploaded_by_user=request.user)
# Search
query_string = None
if 'q' in request.GET:
form = SearchForm(request.GET, placeholder=_("Search images"))
if form.is_valid():
query_string = form.cleaned_data['q']
if not request.user.has_perm('wagtailimages.change_image'):
# restrict to the user's own images
images = Image.search(query_string, filters={'uploaded_by_user_id': request.user.id})
else:
images = Image.search(query_string)
else:
form = SearchForm(placeholder=_("Search images"))
# Pagination
p = request.GET.get('p', 1)
paginator = Paginator(images, 20)
try:
images = paginator.page(p)
except PageNotAnInteger:
images = paginator.page(1)
except EmptyPage:
images = paginator.page(paginator.num_pages)
# Create response
if request.is_ajax():
return render(request, 'wagtailimages/images/results.html', {
'images': images,
'query_string': query_string,
'is_searching': bool(query_string),
})
else:
return render(request, 'wagtailimages/images/index.html', {
'images': images,
'query_string': query_string,
'is_searching': bool(query_string),
'search_form': form,
'popular_tags': Image.popular_tags(),
})
@permission_required('wagtailadmin.access_admin') # more specific permission tests are applied within the view
def edit(request, image_id):
Image = get_image_model()
ImageForm = get_image_form()
image = get_object_or_404(Image, id=image_id)
if not image.is_editable_by_user(request.user):
raise PermissionDenied
if request.POST:
original_file = image.file
form = ImageForm(request.POST, request.FILES, instance=image)
if form.is_valid():
if 'file' in form.changed_data:
# if providing a new image file, delete the old one and all renditions.
# NB Doing this via original_file.delete() clears the file field,
# which definitely isn't what we want...
original_file.storage.delete(original_file.name)
image.renditions.all().delete()
form.save()
messages.success(request, _("Image '{0}' updated.").format(image.title))
return redirect('wagtailimages_index')
else:
messages.error(request, _("The image could not be saved due to errors."))
else:
form = ImageForm(instance=image)
# Check if we should enable the frontend url generator
try:
reverse('wagtailimages_serve', args=('foo', '1', 'bar'))
url_generator_enabled = True
except NoReverseMatch:
url_generator_enabled = False
return render(request, "wagtailimages/images/edit.html", {
'image': image,
'form': form,
'url_generator_enabled': url_generator_enabled,
})
@permission_required('wagtailadmin.access_admin') # more specific permission tests are applied within the view
def url_generator(request, image_id):
image = get_object_or_404(get_image_model(), id=image_id)
if not image.is_editable_by_user(request.user):
raise PermissionDenied
form = URLGeneratorForm(initial={
'filter_method': 'original',
'width': image.width,
'height': image.height,
})
return render(request, "wagtailimages/images/url_generator.html", {
'image': image,
'form': form,
})
def json_response(document, status=200):
return HttpResponse(json.dumps(document), content_type='application/json', status=status)
@permission_required('wagtailadmin.access_admin')
def generate_url(request, image_id, filter_spec):
# Get the image
Image = get_image_model()
try:
image = Image.objects.get(id=image_id)
except Image.DoesNotExist:
return json_response({
'error': "Cannot find image."
}, status=404)
# Check if this user has edit permission on this image
if not image.is_editable_by_user(request.user):
return json_response({
'error': "You do not have permission to generate a URL for this image."
}, status=403)
# Parse the filter spec to make sure its valid
if not Filter(spec=filter_spec).is_valid():
return json_response({
'error': "Invalid filter spec."
}, status=400)
# Generate url
signature = generate_signature(image_id, filter_spec)
url = reverse('wagtailimages_serve', args=(signature, image_id, filter_spec))
# Get site root url
try:
site_root_url = Site.objects.get(is_default_site=True).root_url
except Site.DoesNotExist:
site_root_url = Site.objects.first().root_url
return json_response({'url': site_root_url + url, 'local_url': url}, status=200)
@permission_required('wagtailadmin.access_admin') # more specific permission tests are applied within the view
def delete(request, image_id):
image = get_object_or_404(get_image_model(), id=image_id)
if not image.is_editable_by_user(request.user):
raise PermissionDenied
if request.POST:
image.delete()
messages.success(request, _("Image '{0}' deleted.").format(image.title))
return redirect('wagtailimages_index')
return render(request, "wagtailimages/images/confirm_delete.html", {
'image': image,
})
@permission_required('wagtailimages.add_image')
def add(request):
ImageForm = get_image_form()
ImageModel = get_image_model()
if request.POST:
image = ImageModel(uploaded_by_user=request.user)
form = ImageForm(request.POST, request.FILES, instance=image)
if form.is_valid():
form.save()
messages.success(request, _("Image '{0}' added.").format(image.title))
return redirect('wagtailimages_index')
else:
messages.error(request, _("The image could not be created due to errors."))
else:
form = ImageForm()
return render(request, "wagtailimages/images/add.html", {
'form': form,
})
@permission_required('wagtailadmin.access_admin')
def usage(request, image_id):
image = get_object_or_404(get_image_model(), id=image_id)
# Pagination
p = request.GET.get('p', 1)
paginator = Paginator(image.get_usage(), 20)
try:
used_by = paginator.page(p)
except PageNotAnInteger:
used_by = paginator.page(1)
except EmptyPage:
used_by = paginator.page(paginator.num_pages)
return render(request, "wagtailimages/images/usage.html", {
'image': image,
'used_by': used_by
})
|
cldershem/osf.io | refs/heads/develop | website/addons/box/tests/factories.py | 23 | # -*- coding: utf-8 -*-
"""Factory boy factories for the Box addon."""
import mock
from datetime import datetime
from framework.auth import Auth
from factory import SubFactory, Sequence, post_generation
from tests.factories import ModularOdmFactory, UserFactory, ProjectFactory
from website.addons.box.model import (
BoxOAuthSettings, BoxUserSettings,
BoxNodeSettings, BoxFile
)
# TODO(sloria): make an abstract UserSettingsFactory that just includes the owner field
class BoxOAuthSettingsFactory(ModularOdmFactory):
FACTORY_FOR = BoxOAuthSettings
username = 'Den'
user_id = 'b4rn311'
expires_at = datetime(2045, 1, 1)
access_token = Sequence(lambda n: 'abcdef{0}'.format(n))
refresh_token = Sequence(lambda n: 'abcdef{0}'.format(n))
class BoxUserSettingsFactory(ModularOdmFactory):
FACTORY_FOR = BoxUserSettings
owner = SubFactory(UserFactory)
oauth_settings = SubFactory(BoxOAuthSettingsFactory)
class BoxNodeSettingsFactory(ModularOdmFactory):
FACTORY_FOR = BoxNodeSettings
owner = SubFactory(ProjectFactory)
user_settings = SubFactory(BoxUserSettingsFactory)
with mock.patch('website.addons.box.model.BoxNodeSettings.fetch_folder_name') as mock_folder:
mock_folder.return_value = 'Camera Uploads'
class BoxFileFactory(ModularOdmFactory):
FACTORY_FOR = BoxFile
node = SubFactory(ProjectFactory)
path = 'foo.txt'
@post_generation
def add_box_addon(self, created, extracted):
self.node.add_addon('box', auth=Auth(user=self.node.creator))
self.node.save()
|
rven/odoo | refs/heads/14.0-fix-partner-merge-mail-activity | addons/payment_ingenico/data/ogone.py | 107 | # -*- coding: utf-8 -*-
OGONE_ERROR_MAP = {
'0020001001': "Authorization failed, please retry",
'0020001002': "Authorization failed, please retry",
'0020001003': "Authorization failed, please retry",
'0020001004': "Authorization failed, please retry",
'0020001005': "Authorization failed, please retry",
'0020001006': "Authorization failed, please retry",
'0020001007': "Authorization failed, please retry",
'0020001008': "Authorization failed, please retry",
'0020001009': "Authorization failed, please retry",
'0020001010': "Authorization failed, please retry",
'0030001999': "Our payment system is currently under maintenance, please try later",
'0050001005': "Expiration Date error",
'0050001007': "Requested Operation code not allowed",
'0050001008': "Invalid delay value",
'0050001010': "Input date in invalid format",
'0050001013': "Unable to parse socket input stream",
'0050001014': "Error in parsing stream content",
'0050001015': "Currency error",
'0050001016': "Transaction still posted at end of wait",
'0050001017': "Sync value not compatible with delay value",
'0050001019': "Transaction duplicate of a pre-existing transaction",
'0050001020': "Acceptation code empty while required for the transaction",
'0050001024': "Maintenance acquirer differs from original transaction acquirer",
'0050001025': "Maintenance merchant differs from original transaction merchant",
'0050001028': "Maintenance operation not accurate for the original transaction",
'0050001031': "Host application unknown for the transaction",
'0050001032': "Unable to perform requested operation with requested currency",
'0050001033': "Maintenance card number differs from original transaction card number",
'0050001034': "Operation code not allowed",
'0050001035': "Exception occurred in socket input stream treatment",
'0050001036': "Card length does not correspond to an acceptable value for the brand",
'0050001036': "Card length does not correspond to an acceptable value for the brand",
'0050001068': "A technical problem occurred, please contact helpdesk",
'0050001069': "Invalid check for CardID and Brand",
'0050001070': "A technical problem occurred, please contact helpdesk",
'0050001116': "Unknown origin IP",
'0050001117': "No origin IP detected",
'0050001118': "Merchant configuration problem, please contact support",
'10001001': "Communication failure",
'10001002': "Communication failure",
'10001003': "Communication failure",
'10001004': "Communication failure",
'10001005': "Communication failure",
'20001001': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001002': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001003': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001004': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001005': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001006': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001007': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001008': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001009': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001010': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001101': "A technical problem occurred, please contact helpdesk",
'20001105': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001111': "A technical problem occurred, please contact helpdesk",
'20002001': "Origin for the response of the bank can not be checked",
'20002002': "Beneficiary account number has been modified during processing",
'20002003': "Amount has been modified during processing",
'20002004': "Currency has been modified during processing",
'20002005': "No feedback from the bank server has been detected",
'30001001': "Payment refused by the acquirer",
'30001002': "Duplicate request",
'30001010': "A technical problem occurred, please contact helpdesk",
'30001011': "A technical problem occurred, please contact helpdesk",
'30001012': "Card black listed - Contact acquirer",
'30001015': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001051': "A technical problem occurred, please contact helpdesk",
'30001054': "A technical problem occurred, please contact helpdesk",
'30001057': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001058': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001060': "Aquirer indicates that a failure occured during payment processing",
'30001070': "RATEPAY Invalid Response Type (Failure)",
'30001071': "RATEPAY Missing Mandatory status code field (failure)",
'30001072': "RATEPAY Missing Mandatory Result code field (failure)",
'30001073': "RATEPAY Response parsing Failed",
'30001090': "CVC check required by front end and returned invalid by acquirer",
'30001091': "ZIP check required by front end and returned invalid by acquirer",
'30001092': "Address check required by front end and returned as invalid by acquirer.",
'30001100': "Unauthorized buyer's country",
'30001101': "IP country <> card country",
'30001102': "Number of different countries too high",
'30001103': "unauthorized card country",
'30001104': "unauthorized ip address country",
'30001105': "Anonymous proxy",
'30001110': "If the problem persists, please contact Support, or go to paysafecard's card balance page (https://customer.cc.at.paysafecard.com/psccustomer/GetWelcomePanelServlet?language=en) to see when the amount reserved on your card will be available again.",
'30001120': "IP address in merchant's black list",
'30001130': "BIN in merchant's black list",
'30001131': "Wrong BIN for 3xCB",
'30001140': "Card in merchant's card blacklist",
'30001141': "Email in blacklist",
'30001142': "Passenger name in blacklist",
'30001143': "Card holder name in blacklist",
'30001144': "Passenger name different from owner name",
'30001145': "Time to departure too short",
'30001149': "Card Configured in Card Supplier Limit for another relation (CSL)",
'30001150': "Card not configured in the system for this customer (CSL)",
'30001151': "REF1 not allowed for this relationship (Contract number",
'30001152': "Card/Supplier Amount limit reached (CSL)",
'30001153': "Card not allowed for this supplier (Date out of contract bounds)",
'30001154': "You have reached the usage limit allowed",
'30001155': "You have reached the usage limit allowed",
'30001156': "You have reached the usage limit allowed",
'30001157': "Unauthorized IP country for itinerary",
'30001158': "email usage limit reached",
'30001159': "Unauthorized card country/IP country combination",
'30001160': "Postcode in highrisk group",
'30001161': "generic blacklist match",
'30001162': "Billing Address is a PO Box",
'30001180': "maximum scoring reached",
'30001997': "Authorization canceled by simulation",
'30001998': "A technical problem occurred, please try again.",
'30001999': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30002001': "Payment refused by the financial institution",
'30002001': "Payment refused by the financial institution",
'30021001': "Call acquirer support call number.",
'30022001': "Payment must be approved by the acquirer before execution.",
'30031001': "Invalid merchant number.",
'30041001': "Retain card.",
'30051001': "Authorization declined",
'30071001': "Retain card - special conditions.",
'30121001': "Invalid transaction",
'30131001': "Invalid amount",
'30131002': "You have reached the total amount allowed",
'30141001': "Invalid card number",
'30151001': "Unknown acquiring institution.",
'30171001': "Payment method cancelled by the buyer",
'30171002': "The maximum time allowed is elapsed.",
'30191001': "Try again later.",
'30201001': "A technical problem occurred, please contact helpdesk",
'30301001': "Invalid format",
'30311001': "Unknown acquirer ID.",
'30331001': "Card expired.",
'30341001': "Suspicion of fraud.",
'30341002': "Suspicion of fraud (3rdMan)",
'30341003': "Suspicion of fraud (Perseuss)",
'30341004': "Suspicion of fraud (ETHOCA)",
'30381001': "A technical problem occurred, please contact helpdesk",
'30401001': "Invalid function.",
'30411001': "Lost card.",
'30431001': "Stolen card, pick up",
'30511001': "Insufficient funds.",
'30521001': "No Authorization. Contact the issuer of your card.",
'30541001': "Card expired.",
'30551001': "Invalid PIN.",
'30561001': "Card not in authorizer's database.",
'30571001': "Transaction not permitted on card.",
'30581001': "Transaction not allowed on this terminal",
'30591001': "Suspicion of fraud.",
'30601001': "The merchant must contact the acquirer.",
'30611001': "Amount exceeds card ceiling.",
'30621001': "Restricted card.",
'30631001': "Security policy not respected.",
'30641001': "Amount changed from ref. trn.",
'30681001': "Tardy response.",
'30751001': "PIN entered incorrectly too often",
'30761001': "Card holder already contesting.",
'30771001': "PIN entry required.",
'30811001': "Message flow error.",
'30821001': "Authorization center unavailable",
'30831001': "Authorization center unavailable",
'30901001': "Temporary system shutdown.",
'30911001': "Acquirer unavailable.",
'30921001': "Invalid card type for acquirer.",
'30941001': "Duplicate transaction",
'30961001': "Processing temporarily not possible",
'30971001': "A technical problem occurred, please contact helpdesk",
'30981001': "A technical problem occurred, please contact helpdesk",
'31011001': "Unknown acceptance code",
'31021001': "Invalid currency",
'31031001': "Acceptance code missing",
'31041001': "Inactive card",
'31051001': "Merchant not active",
'31061001': "Invalid expiration date",
'31071001': "Interrupted host communication",
'31081001': "Card refused",
'31091001': "Invalid password",
'31101001': "Plafond transaction (majoré du bonus) dépassé",
'31111001': "Plafond mensuel (majoré du bonus) dépassé",
'31121001': "Plafond centre de facturation dépassé",
'31131001': "Plafond entreprise dépassé",
'31141001': "Code MCC du fournisseur non autorisé pour la carte",
'31151001': "Numéro SIRET du fournisseur non autorisé pour la carte",
'31161001': "This is not a valid online banking account",
'32001004': "A technical problem occurred, please try again.",
'34011001': "Bezahlung mit RatePAY nicht möglich.",
'39991001': "A technical problem occurred, please contact the helpdesk of your acquirer",
'40001001': "A technical problem occurred, please try again.",
'40001002': "A technical problem occurred, please try again.",
'40001003': "A technical problem occurred, please try again.",
'40001004': "A technical problem occurred, please try again.",
'40001005': "A technical problem occurred, please try again.",
'40001006': "A technical problem occurred, please try again.",
'40001007': "A technical problem occurred, please try again.",
'40001008': "A technical problem occurred, please try again.",
'40001009': "A technical problem occurred, please try again.",
'40001010': "A technical problem occurred, please try again.",
'40001011': "A technical problem occurred, please contact helpdesk",
'40001012': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'40001013': "A technical problem occurred, please contact helpdesk",
'40001016': "A technical problem occurred, please contact helpdesk",
'40001018': "A technical problem occurred, please try again.",
'40001019': "Sorry, an error occurred during processing. Please retry the operation (use back button of the browser). If problem persists, contact your merchant's helpdesk.",
'40001020': "Sorry, an error occurred during processing. Please retry the operation (use back button of the browser). If problem persists, contact your merchant's helpdesk.",
'40001050': "A technical problem occurred, please contact helpdesk",
'40001133': "Authentication failed, the signature of your bank access control server is incorrect",
'40001134': "Authentication failed, please retry or cancel.",
'40001135': "Authentication temporary unavailable, please retry or cancel.",
'40001136': "Technical problem with your browser, please retry or cancel",
'40001137': "Your bank access control server is temporary unavailable, please retry or cancel",
'40001998': "Temporary technical problem. Please retry a little bit later.",
'50001001': "Unknown card type",
'50001002': "Card number format check failed for given card number.",
'50001003': "Merchant data error",
'50001004': "Merchant identification missing",
'50001005': "Expiration Date error",
'50001006': "Amount is not a number",
'50001007': "A technical problem occurred, please contact helpdesk",
'50001008': "A technical problem occurred, please contact helpdesk",
'50001009': "A technical problem occurred, please contact helpdesk",
'50001010': "A technical problem occurred, please contact helpdesk",
'50001011': "Brand not supported for that merchant",
'50001012': "A technical problem occurred, please contact helpdesk",
'50001013': "A technical problem occurred, please contact helpdesk",
'50001014': "A technical problem occurred, please contact helpdesk",
'50001015': "Invalid currency code",
'50001016': "A technical problem occurred, please contact helpdesk",
'50001017': "A technical problem occurred, please contact helpdesk",
'50001018': "A technical problem occurred, please contact helpdesk",
'50001019': "A technical problem occurred, please contact helpdesk",
'50001020': "A technical problem occurred, please contact helpdesk",
'50001021': "A technical problem occurred, please contact helpdesk",
'50001022': "A technical problem occurred, please contact helpdesk",
'50001023': "A technical problem occurred, please contact helpdesk",
'50001024': "A technical problem occurred, please contact helpdesk",
'50001025': "A technical problem occurred, please contact helpdesk",
'50001026': "A technical problem occurred, please contact helpdesk",
'50001027': "A technical problem occurred, please contact helpdesk",
'50001028': "A technical problem occurred, please contact helpdesk",
'50001029': "A technical problem occurred, please contact helpdesk",
'50001030': "A technical problem occurred, please contact helpdesk",
'50001031': "A technical problem occurred, please contact helpdesk",
'50001032': "A technical problem occurred, please contact helpdesk",
'50001033': "A technical problem occurred, please contact helpdesk",
'50001034': "A technical problem occurred, please contact helpdesk",
'50001035': "A technical problem occurred, please contact helpdesk",
'50001036': "Card length does not correspond to an acceptable value for the brand",
'50001037': "Purchasing card number for a regular merchant",
'50001038': "Non Purchasing card for a Purchasing card merchant",
'50001039': "Details sent for a non-Purchasing card merchant, please contact helpdesk",
'50001040': "Details not sent for a Purchasing card transaction, please contact helpdesk",
'50001041': "Payment detail validation failed",
'50001042': "Given transactions amounts (tax,discount,shipping,net,etc…) do not compute correctly together",
'50001043': "A technical problem occurred, please contact helpdesk",
'50001044': "No acquirer configured for this operation",
'50001045': "No UID configured for this operation",
'50001046': "Operation not allowed for the merchant",
'50001047': "A technical problem occurred, please contact helpdesk",
'50001048': "A technical problem occurred, please contact helpdesk",
'50001049': "A technical problem occurred, please contact helpdesk",
'50001050': "A technical problem occurred, please contact helpdesk",
'50001051': "A technical problem occurred, please contact helpdesk",
'50001052': "A technical problem occurred, please contact helpdesk",
'50001053': "A technical problem occurred, please contact helpdesk",
'50001054': "Card number incorrect or incompatible",
'50001055': "A technical problem occurred, please contact helpdesk",
'50001056': "A technical problem occurred, please contact helpdesk",
'50001057': "A technical problem occurred, please contact helpdesk",
'50001058': "A technical problem occurred, please contact helpdesk",
'50001059': "A technical problem occurred, please contact helpdesk",
'50001060': "A technical problem occurred, please contact helpdesk",
'50001061': "A technical problem occurred, please contact helpdesk",
'50001062': "A technical problem occurred, please contact helpdesk",
'50001063': "Card Issue Number does not correspond to range or not present",
'50001064': "Start Date not valid or not present",
'50001066': "Format of CVC code invalid",
'50001067': "The merchant is not enrolled for 3D-Secure",
'50001068': "The card number or account number (PAN) is invalid",
'50001069': "Invalid check for CardID and Brand",
'50001070': "The ECI value given is either not supported, or in conflict with other data in the transaction",
'50001071': "Incomplete TRN demat",
'50001072': "Incomplete PAY demat",
'50001073': "No demat APP",
'50001074': "Authorisation too old",
'50001075': "VERRes was an error message",
'50001076': "DCP amount greater than authorisation amount",
'50001077': "Details negative amount",
'50001078': "Details negative quantity",
'50001079': "Could not decode/decompress received PARes (3D-Secure)",
'50001080': "Received PARes was an erereor message from ACS (3D-Secure)",
'50001081': "Received PARes format was invalid according to the 3DS specifications (3D-Secure)",
'50001082': "PAReq/PARes reconciliation failure (3D-Secure)",
'50001084': "Maximum amount reached",
'50001087': "The transaction type requires authentication, please check with your bank.",
'50001090': "CVC missing at input, but CVC check asked",
'50001091': "ZIP missing at input, but ZIP check asked",
'50001092': "Address missing at input, but Address check asked",
'50001095': "Invalid date of birth",
'50001096': "Invalid commodity code",
'50001097': "The requested currency and brand are incompatible.",
'50001111': "Data validation error",
'50001113': "This order has already been processed",
'50001114': "Error pre-payment check page access",
'50001115': "Request not received in secure mode",
'50001116': "Unknown IP address origin",
'50001117': "NO IP address origin",
'50001118': "Pspid not found or not correct",
'50001119': "Password incorrect or disabled due to numbers of errors",
'50001120': "Invalid currency",
'50001121': "Invalid number of decimals for the currency",
'50001122': "Currency not accepted by the merchant",
'50001123': "Card type not active",
'50001124': "Number of lines don't match with number of payments",
'50001125': "Format validation error",
'50001126': "Overflow in data capture requests for the original order",
'50001127': "The original order is not in a correct status",
'50001128': "missing authorization code for unauthorized order",
'50001129': "Overflow in refunds requests",
'50001130': "Error access to original order",
'50001131': "Error access to original history item",
'50001132': "The Selected Catalog is empty",
'50001133': "Duplicate request",
'50001134': "Authentication failed, please retry or cancel.",
'50001135': "Authentication temporary unavailable, please retry or cancel.",
'50001136': "Technical problem with your browser, please retry or cancel",
'50001137': "Your bank access control server is temporary unavailable, please retry or cancel",
'50001150': "Fraud Detection, Technical error (IP not valid)",
'50001151': "Fraud detection : technical error (IPCTY unknown or error)",
'50001152': "Fraud detection : technical error (CCCTY unknown or error)",
'50001153': "Overflow in redo-authorisation requests",
'50001170': "Dynamic BIN check failed",
'50001171': "Dynamic country check failed",
'50001172': "Error in Amadeus signature",
'50001174': "Card Holder Name is too long",
'50001175': "Name contains invalid characters",
'50001176': "Card number is too long",
'50001177': "Card number contains non-numeric info",
'50001178': "Card Number Empty",
'50001179': "CVC too long",
'50001180': "CVC contains non-numeric info",
'50001181': "Expiration date contains non-numeric info",
'50001182': "Invalid expiration month",
'50001183': "Expiration date must be in the future",
'50001184': "SHA Mismatch",
'50001205': "Missing mandatory fields for billing address.",
'50001206': "Missing mandatory field date of birth.",
'50001207': "Missing required shopping basket details.",
'50001208': "Missing social security number",
'50001209': "Invalid country code",
'50001210': "Missing yearly salary",
'50001211': "Missing gender",
'50001212': "Missing email",
'50001213': "Missing IP address",
'50001214': "Missing part payment campaign ID",
'50001215': "Missing invoice number",
'50001216': "The alias must be different than the card number",
'60000001': "account number unknown",
'60000003': "not credited dd-mm-yy",
'60000005': "name/number do not correspond",
'60000007': "account number blocked",
'60000008': "specific direct debit block",
'60000009': "account number WKA",
'60000010': "administrative reason",
'60000011': "account number expired",
'60000012': "no direct debit authorisation given",
'60000013': "debit not approved",
'60000014': "double payment",
'60000018': "name/address/city not entered",
'60001001': "no original direct debit for revocation",
'60001002': "payer’s account number format error",
'60001004': "payer’s account at different bank",
'60001005': "payee’s account at different bank",
'60001006': "payee’s account number format error",
'60001007': "payer’s account number blocked",
'60001008': "payer’s account number expired",
'60001009': "payee’s account number expired",
'60001010': "direct debit not possible",
'60001011': "creditor payment not possible",
'60001012': "payer’s account number unknown WKA-number",
'60001013': "payee’s account number unknown WKA-number",
'60001014': "impermissible WKA transaction",
'60001015': "period for revocation expired",
'60001017': "reason for revocation not correct",
'60001018': "original run number not numeric",
'60001019': "payment ID incorrect",
'60001020': "amount not numeric",
'60001021': "amount zero not permitted",
'60001022': "negative amount not permitted",
'60001023': "payer and payee giro account number",
'60001025': "processing code (verwerkingscode) incorrect",
'60001028': "revocation not permitted",
'60001029': "guaranteed direct debit on giro account number",
'60001030': "NBC transaction type incorrect",
'60001031': "description too large",
'60001032': "book account number not issued",
'60001034': "book account number incorrect",
'60001035': "payer’s account number not numeric",
'60001036': "payer’s account number not eleven-proof",
'60001037': "payer’s account number not issued",
'60001039': "payer’s account number of DNB/BGC/BLA",
'60001040': "payee’s account number not numeric",
'60001041': "payee’s account number not eleven-proof",
'60001042': "payee’s account number not issued",
'60001044': "payee’s account number unknown",
'60001050': "payee’s name missing",
'60001051': "indicate payee’s bank account number instead of 3102",
'60001052': "no direct debit contract",
'60001053': "amount beyond bounds",
'60001054': "selective direct debit block",
'60001055': "original run number unknown",
'60001057': "payer’s name missing",
'60001058': "payee’s account number missing",
'60001059': "restore not permitted",
'60001060': "bank’s reference (navraaggegeven) missing",
'60001061': "BEC/GBK number incorrect",
'60001062': "BEC/GBK code incorrect",
'60001087': "book account number not numeric",
'60001090': "cancelled on request",
'60001091': "cancellation order executed",
'60001092': "cancelled instead of bended",
'60001093': "book account number is a shortened account number",
'60001094': "instructing party account number not identical with payer",
'60001095': "payee unknown GBK acceptor",
'60001097': "instructing party account number not identical with payee",
'60001099': "clearing not permitted",
'60001101': "payer’s account number not spaces",
'60001102': "PAN length not numeric",
'60001103': "PAN length outside limits",
'60001104': "track number not numeric",
'60001105': "track number not valid",
'60001106': "PAN sequence number not numeric",
'60001107': "domestic PAN not numeric",
'60001108': "domestic PAN not eleven-proof",
'60001109': "domestic PAN not issued",
'60001110': "foreign PAN not numeric",
'60001111': "card valid date not numeric",
'60001112': "book period number (boekperiodenr) not numeric",
'60001113': "transaction number not numeric",
'60001114': "transaction time not numeric",
'60001115': "transaction no valid time",
'60001116': "transaction date not numeric",
'60001117': "transaction no valid date",
'60001118': "STAN not numeric",
'60001119': "instructing party’s name missing",
'60001120': "foreign amount (bedrag-vv) not numeric",
'60001122': "rate (verrekenkoers) not numeric",
'60001125': "number of decimals (aantaldecimalen) incorrect",
'60001126': "tariff (tarifering) not B/O/S",
'60001127': "domestic costs (kostenbinnenland) not numeric",
'60001128': "domestic costs (kostenbinnenland) not higher than zero",
'60001129': "foreign costs (kostenbuitenland) not numeric",
'60001130': "foreign costs (kostenbuitenland) not higher than zero",
'60001131': "domestic costs (kostenbinnenland) not zero",
'60001132': "foreign costs (kostenbuitenland) not zero",
'60001134': "Euro record not fully filled in",
'60001135': "Client currency incorrect",
'60001136': "Amount NLG not numeric",
'60001137': "Amount NLG not higher than zero",
'60001138': "Amount NLG not equal to Amount",
'60001139': "Amount NLG incorrectly converted",
'60001140': "Amount EUR not numeric",
'60001141': "Amount EUR not greater than zero",
'60001142': "Amount EUR not equal to Amount",
'60001143': "Amount EUR incorrectly converted",
'60001144': "Client currency not NLG",
'60001145': "rate euro-vv (Koerseuro-vv) not numeric",
'60001146': "comma rate euro-vv (Kommakoerseuro-vv) incorrect",
'60001147': "acceptgiro distributor not valid",
'60001148': "Original run number and/or BRN are missing",
'60001149': "Amount/Account number/ BRN different",
'60001150': "Direct debit already revoked/restored",
'60001151': "Direct debit already reversed/revoked/restored",
'60001153': "Payer’s account number not known",
}
DATA_VALIDATION_ERROR = '50001111'
def retryable(error):
return error in [
'0020001001', '0020001002', '0020001003', '0020001004', '0020001005',
'0020001006', '0020001007', '0020001008', '0020001009', '0020001010',
'30001010', '30001011', '30001015',
'30001057', '30001058',
'30001998', '30001999',
#'30611001', # amount exceeds card limit
'30961001',
'40001001', '40001002', '40001003', '40001004', '40001005',
'40001006', '40001007', '40001008', '40001009', '40001010',
'40001012',
'40001018', '40001019', '40001020',
'40001134', '40001135', '40001136', '40001137',
#'50001174', # cardholder name too long
]
|
thundernet8/WRGameVideos-API | refs/heads/master | venv/lib/python2.7/site-packages/requests/packages/urllib3/util/request.py | 780 | from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
|
saurabh6790/pow-lib | refs/heads/master | core/doctype/event_user/event_user.py | 578 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl |
mglukhikh/intellij-community | refs/heads/master | python/testData/inspections/PyAttributeOutsideInitInspection/fromSuperClassWithoutInit.py | 166 | __author__ = 'ktisha'
class Base(object):
def __init__(self):
self.my = 1
class Child(Base):
def f(self):
self.my = 1 |
hezhenghao/GroundHog | refs/heads/master | groundhog/layers/__init__.py | 13 | from rec_layers import \
LSTMLayer, \
RecurrentLayer, \
RecurrentMultiLayer, \
RecurrentMultiLayerInp, \
RecurrentMultiLayerShortPath, \
RecurrentMultiLayerShortPathInp, \
RecurrentMultiLayerShortPathInpAll
from rconv_layers import RecursiveConvolutionalLayer
from ff_layers import DropOp
from ff_layers import MultiLayer, LastState, UnaryOp,\
MaxPooling, Shift, BinaryOp, GaussianNoise, Concatenate
from ff_layers import maxpool, maxpool_ntimes, minpool, minpool_ntimes, \
last, last_ntimes, \
tanh, sigmoid, rectifier, hard_sigmoid, hard_tanh
from cost_layers import SoftmaxLayer, SigmoidLayer, HierarchicalSoftmaxLayer
from basic import Layer, Operator
|
caiges/populous | refs/heads/master | populous/weather/management/commands/everyday.py | 1 | from django.core.management.base import BaseCommand, CommandError
from weather.utils import location
class Command(BaseCommand):
help = "Updates the long range forecasts. Run daily."
def handle(self, *args, **options):
location.get_forecast(location.get_location())
|
bilgili/Voreen | refs/heads/master | modules/python/ext/python27/modules/stat.py | 319 | """Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
return mode & 07777
def S_IFMT(mode):
return mode & 0170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0040000
S_IFCHR = 0020000
S_IFBLK = 0060000
S_IFREG = 0100000
S_IFIFO = 0010000
S_IFLNK = 0120000
S_IFSOCK = 0140000
# Functions to test for each file type
def S_ISDIR(mode):
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 04000
S_ISGID = 02000
S_ENFMT = S_ISGID
S_ISVTX = 01000
S_IREAD = 00400
S_IWRITE = 00200
S_IEXEC = 00100
S_IRWXU = 00700
S_IRUSR = 00400
S_IWUSR = 00200
S_IXUSR = 00100
S_IRWXG = 00070
S_IRGRP = 00040
S_IWGRP = 00020
S_IXGRP = 00010
S_IRWXO = 00007
S_IROTH = 00004
S_IWOTH = 00002
S_IXOTH = 00001
# Names for file flags
UF_NODUMP = 0x00000001
UF_IMMUTABLE = 0x00000002
UF_APPEND = 0x00000004
UF_OPAQUE = 0x00000008
UF_NOUNLINK = 0x00000010
UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
UF_HIDDEN = 0x00008000 # OS X: file should not be displayed
SF_ARCHIVED = 0x00010000
SF_IMMUTABLE = 0x00020000
SF_APPEND = 0x00040000
SF_NOUNLINK = 0x00100000
SF_SNAPSHOT = 0x00200000
|
joshbruning/selenium | refs/heads/master | py/test/selenium/webdriver/common/quit_tests.py | 30 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
@pytest.mark.no_driver_after_test
def test_quit(driver, pages):
driver.quit()
with pytest.raises(Exception):
pages.load('simpleTest.html')
|
XDATA-Year-3/EntityAlignLarge | refs/heads/master | service/listseeds.py | 4 | import bson
import pymongo
import json
from bson import ObjectId
from pymongo import MongoClient
import string
import tangelo
def run(host,database):
# Create an empty response object.
response = {}
collectionNames = ['select a dataset']
# look through the collections in the ivaan database and return the name of all collections
# that match the naming profile for tables. This is matching to see if the collection name
# begins with "table_"
client = MongoClient(host, 27017)
db = client[database]
# get a list of all collections (excluding system collections)
collection_list = db.collection_names(False)
for coll in collection_list:
# exclude the seeds collections
if coll[:6] == 'seeds_':
#print "found seeds:", coll
collectionNames.append(coll)
client.close()
# Pack the results into the response object, and return it.
response['result'] = collectionNames
# Return the response object.
tangelo.log(str(response))
return json.dumps(response)
|
tralamazza/micropython | refs/heads/master | tests/import/import1a.py | 104 | import import1b
print(import1b.var)
|
riteshshrv/django | refs/heads/master | django/contrib/admin/__init__.py | 562 | # ACTION_CHECKBOX_NAME is unused, but should stay since its import from here
# has been referenced in documentation.
from django.contrib.admin.decorators import register
from django.contrib.admin.filters import (
AllValuesFieldListFilter, BooleanFieldListFilter, ChoicesFieldListFilter,
DateFieldListFilter, FieldListFilter, ListFilter, RelatedFieldListFilter,
RelatedOnlyFieldListFilter, SimpleListFilter,
)
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.options import (
HORIZONTAL, VERTICAL, ModelAdmin, StackedInline, TabularInline,
)
from django.contrib.admin.sites import AdminSite, site
from django.utils.module_loading import autodiscover_modules
__all__ = [
"register", "ACTION_CHECKBOX_NAME", "ModelAdmin", "HORIZONTAL", "VERTICAL",
"StackedInline", "TabularInline", "AdminSite", "site", "ListFilter",
"SimpleListFilter", "FieldListFilter", "BooleanFieldListFilter",
"RelatedFieldListFilter", "ChoicesFieldListFilter", "DateFieldListFilter",
"AllValuesFieldListFilter", "RelatedOnlyFieldListFilter", "autodiscover",
]
def autodiscover():
autodiscover_modules('admin', register_to=site)
default_app_config = 'django.contrib.admin.apps.AdminConfig'
|
grehx/spark-tk | refs/heads/master | regression-tests/sparktkregtests/testcases/models/gmm_test.py | 1 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test guassian mixture models against known values"""
import unittest
from collections import Counter
from numpy.testing import assert_almost_equal
from sparktkregtests.lib import sparktk_test
class GMMModelTest(sparktk_test.SparkTKTestCase):
def setUp(self):
data_file = self.get_file("gmm_data.csv")
self.frame = self.context.frame.import_csv(
data_file, schema=[("x1", float), ("x2", float)])
def test_train(self):
""" Verify that model operates as expected in straightforward case"""
model = self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"],
column_scalings=[1.0, 1.0],
k=5,
max_iterations=500,
seed=20,
convergence_tol=0.0001)
actual_mu = [g.mu for g in model.gaussians]
actual_sigma = [g.sigma for g in model.gaussians]
expected_mu = \
[[7.0206, -10.1706],
[7.8322, -10.2383],
[-1.3816, 6.7215],
[-0.04184, 5.8039],
[-4.1743, 8.5564]]
expected_sigma = \
[[[0.2471, -0.3325],
[-0.3325, 0.5828]],
[[2.3005, 0.6906],
[0.6906, 2.1103]],
[[1.5941, -3.5325],
[-3.5325, 7.8424]],
[[0.9849, 0.04328],
[0.04328, 0.3736]],
[[0.1168, 0.1489],
[0.1489, 0.9757]]]
assert_almost_equal(actual_mu, expected_mu, decimal=3)
assert_almost_equal(actual_sigma, expected_sigma, decimal=3)
def test_predict(self):
""" Tests output of predict """
model = self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"],
column_scalings=[1.0, 1.0],
k=3,
max_iterations=100,
seed=15)
predicted_frame = model.predict(self.frame)
results_df = predicted_frame.to_pandas(self.frame.count())
actual_cluster_sizes = Counter(
results_df["predicted_cluster"].tolist())
expected_cluster_sizes = {2: 27, 0: 17, 1: 6}
self.assertItemsEqual(actual_cluster_sizes, expected_cluster_sizes)
def test_gmm_1_cluster(self):
"""Test gmm doesn't error on k=1"""
self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], [1.0, 1.0], k=1)
def test_gmm_1_iteration(self):
"""Train on 1 iteration only, shouldn't throw exception"""
self.context.models.clustering.gmm.train(
self.frame, ["x1"], column_scalings=[1.0],
max_iterations=1)
def test_gmm_high_convergence(self):
"""Train on high convergence, should not throw exception"""
self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[1.0, 1.0],
convergence_tol=1e6)
def test_gmm_negative_seed(self):
"""Train on negative seed, shouldn't throw exception"""
self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[1.0, 1.0],
seed=-20)
def test_gmm_0_scalings(self):
"""all-zero column scalings, shouldn't throw exception"""
self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[0.0, 0.0])
def test_gmm_negative_scalings(self):
"""negative column scalings, shouldn't throw exception"""
self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[-1.0, -1.0])
def test_gmm_empty_frame(self):
""" Verify that model operates as expected in straightforward case"""
# Train on an empty frame
block_data = []
frame = self.context.frame.create(
block_data,
[("x1", float)])
with self.assertRaisesRegexp(
Exception, "empty collection"):
self.context.models.clustering.gmm.train(
frame, ["x1"], column_scalings=[1.0])
def test_0_classes_errors(self):
"""Train on 0 classes, should error"""
with self.assertRaisesRegexp(
Exception, "k must be at least 1"):
self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[1.0, 1.0], k=0)
def test_negative_classes(self):
"""Train on negative classes, should error"""
with self.assertRaisesRegexp(
Exception, "k must be at least 1"):
self.context.models.clustering.gmm.train(
self.frame, ["x1"], column_scalings=[1.0], k=-5)
def test_0_iterations(self):
"""Train on 0 iterations, should error"""
with self.assertRaisesRegexp(
Exception, "maxIterations must be a positive value"):
self.context.models.clustering.gmm.train(
self.frame, ["x1"], column_scalings=[1.0],
max_iterations=0)
def test_negative_iterations(self):
"""Train on negative iterations, should error"""
with self.assertRaisesRegexp(
Exception, "maxIterations must be a positive value"):
self.context.models.clustering.gmm.train(
self.frame, ["x1"], column_scalings=[1.0],
max_iterations=-20)
def test_wrong_column_scalings(self):
"""Insufficient column scalings, should error"""
with self.assertRaisesRegexp(
Exception, "columnWeights must not be null or empty"):
self.context.models.clustering.gmm.train(
self.frame, ["x1"], column_scalings=[])
def test_too_many_column_scalings(self):
"""Extra column scalings, should error"""
with self.assertRaisesRegexp(
Exception,
"Length of columnWeights and observationColumns.*"):
self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[1.0, 1.0, 1.0])
def test_missing_column_scalings(self):
"""Missing column scalings, should error"""
with self.assertRaisesRegexp(
TypeError, "train\(\) takes at least 3 arguments.*"):
self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], k=2)
if __name__ == "__main__":
unittest.main()
|
azureplus/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/select_related_regress/tests.py | 44 | from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django.utils import six
from .models import (Building, Child, Device, Port, Item, Country, Connection,
ClientStatus, State, Client, SpecialClient, TUser, Person, Student,
Organizer, Class, Enrollment, Hen, Chick)
class SelectRelatedRegressTests(TestCase):
def test_regression_7110(self):
"""
Regression test for bug #7110.
When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that
both the "connections = ..." queries here should give the same results
without pulling in more than the absolute minimum number of tables
(history has shown that it's easy to make a mistake in the implementation
and include some unnecessary bonus joins).
"""
b=Building.objects.create(name='101')
dev1=Device.objects.create(name="router", building=b)
dev2=Device.objects.create(name="switch", building=b)
dev3=Device.objects.create(name="server", building=b)
port1=Port.objects.create(port_number='4',device=dev1)
port2=Port.objects.create(port_number='7',device=dev2)
port3=Port.objects.create(port_number='1',device=dev3)
c1=Connection.objects.create(start=port1, end=port2)
c2=Connection.objects.create(start=port2, end=port3)
connections=Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
connections=Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
# This final query should only have seven tables (port, device and building
# twice each, plus connection once). Thus, 6 joins plus the FROM table.
self.assertEqual(str(connections.query).count(" JOIN "), 6)
def test_regression_8106(self):
"""
Regression test for bug #8106.
Same sort of problem as the previous test, but this time there are
more extra tables to pull in as part of the select_related() and some
of them could potentially clash (so need to be kept separate).
"""
us = TUser.objects.create(name="std")
usp = Person.objects.create(user=us)
uo = TUser.objects.create(name="org")
uop = Person.objects.create(user=uo)
s = Student.objects.create(person = usp)
o = Organizer.objects.create(person = uop)
c = Class.objects.create(org=o)
e = Enrollment.objects.create(std=s, cls=c)
e_related = Enrollment.objects.all().select_related()[0]
self.assertEqual(e_related.std.person.user.name, "std")
self.assertEqual(e_related.cls.org.person.user.name, "org")
def test_regression_8036(self):
"""
Regression test for bug #8036
the first related model in the tests below
("state") is empty and we try to select the more remotely related
state__country. The regression here was not skipping the empty column results
for country before getting status.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
client = Client.objects.create(name='client', status=active)
self.assertEqual(client.status, active)
self.assertEqual(Client.objects.select_related()[0].status, active)
self.assertEqual(Client.objects.select_related('state')[0].status, active)
self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('status')[0].status, active)
def test_multi_table_inheritance(self):
""" Exercising select_related() with multi-table model inheritance. """
c1 = Child.objects.create(name="child1", value=42)
i1 = Item.objects.create(name="item1", child=c1)
i2 = Item.objects.create(name="item2")
self.assertQuerysetEqual(
Item.objects.select_related("child").order_by("name"),
["<Item: item1>", "<Item: item2>"]
)
def test_regression_12851(self):
"""
Regression for #12851
Deferred fields are used correctly if you select_related a subset
of fields.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
c1 = Client.objects.create(name='Brian Burke', state=wa, status=active)
burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke')
self.assertEqual(burke.name, 'Brian Burke')
self.assertEqual(burke.state.name, 'Western Australia')
# Still works if we're dealing with an inherited class
sc1 = SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42)
troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Still works if we defer an attribute on the inherited class
troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Also works if you use only, rather than defer
troy = SpecialClient.objects.select_related('state').only('name', 'state').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
def test_null_join_promotion(self):
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
bob = Client.objects.create(name='Bob', status=active)
jack = Client.objects.create(name='Jack', status=active, state=wa)
qs = Client.objects.filter(state=wa).select_related('state')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [jack])
self.assertEqual(qs[0].state, wa)
# The select_related join wasn't promoted as there was already an
# existing (even if trimmed) inner join to state.
self.assertFalse('LEFT OUTER' in str(qs.query))
qs = Client.objects.select_related('state').order_by('name')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [bob, jack])
self.assertIs(qs[0].state, None)
self.assertEqual(qs[1].state, wa)
# The select_related join was promoted as there is already an
# existing join.
self.assertTrue('LEFT OUTER' in str(qs.query))
def test_regression_19870(self):
"""
Regression for #19870
"""
hen = Hen.objects.create(name='Hen')
chick = Chick.objects.create(name='Chick', mother=hen)
self.assertEqual(Chick.objects.all()[0].mother.name, 'Hen')
self.assertEqual(Chick.objects.select_related()[0].mother.name, 'Hen')
def test_regression_22508(self):
building = Building.objects.create(name='101')
device = Device.objects.create(name="router", building=building)
Port.objects.create(port_number='1', device=device)
device = Device.objects.get()
port = device.port_set.select_related('device__building').get()
with self.assertNumQueries(0):
port.device.building
|
hmgaudecker/econ-project-templates | refs/heads/master | docs/bld/example/r/r_example/.mywaflib/waflib/Task.py | 5 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2018 (ita)
"""
Tasks represent atomic operations such as processes.
"""
import os, re, sys, tempfile, traceback
from waflib import Utils, Logs, Errors
# task states
NOT_RUN = 0
"""The task was not executed yet"""
MISSING = 1
"""The task has been executed but the files have not been created"""
CRASHED = 2
"""The task execution returned a non-zero exit status"""
EXCEPTION = 3
"""An exception occurred in the task execution"""
CANCELED = 4
"""A dependency for the task is missing so it was cancelled"""
SKIPPED = 8
"""The task did not have to be executed"""
SUCCESS = 9
"""The task was successfully executed"""
ASK_LATER = -1
"""The task is not ready to be executed"""
SKIP_ME = -2
"""The task does not need to be executed"""
RUN_ME = -3
"""The task must be executed"""
CANCEL_ME = -4
"""The task cannot be executed because of a dependency problem"""
COMPILE_TEMPLATE_SHELL = '''
def f(tsk):
env = tsk.env
gen = tsk.generator
bld = gen.bld
cwdx = tsk.get_cwd()
p = env.get_flat
tsk.last_cmd = cmd = \'\'\' %s \'\'\' % s
return tsk.exec_command(cmd, cwd=cwdx, env=env.env or None)
'''
COMPILE_TEMPLATE_NOSHELL = '''
def f(tsk):
env = tsk.env
gen = tsk.generator
bld = gen.bld
cwdx = tsk.get_cwd()
def to_list(xx):
if isinstance(xx, str): return [xx]
return xx
def merge(lst1, lst2):
if lst1 and lst2:
return lst1[:-1] + [lst1[-1] + lst2[0]] + lst2[1:]
return lst1 + lst2
lst = []
%s
if '' in lst:
lst = [x for x in lst if x]
tsk.last_cmd = lst
return tsk.exec_command(lst, cwd=cwdx, env=env.env or None)
'''
classes = {}
"""
The metaclass :py:class:`waflib.Task.store_task_type` stores all class tasks
created by user scripts or Waf tools to this dict. It maps class names to class objects.
"""
class store_task_type(type):
"""
Metaclass: store the task classes into the dict pointed by the
class attribute 'register' which defaults to :py:const:`waflib.Task.classes`,
The attribute 'run_str' is compiled into a method 'run' bound to the task class.
"""
def __init__(cls, name, bases, dict):
super(store_task_type, cls).__init__(name, bases, dict)
name = cls.__name__
if name != 'evil' and name != 'Task':
if getattr(cls, 'run_str', None):
# if a string is provided, convert it to a method
(f, dvars) = compile_fun(cls.run_str, cls.shell)
cls.hcode = Utils.h_cmd(cls.run_str)
cls.orig_run_str = cls.run_str
# change the name of run_str or it is impossible to subclass with a function
cls.run_str = None
cls.run = f
cls.vars = list(set(cls.vars + dvars))
cls.vars.sort()
elif getattr(cls, 'run', None) and not 'hcode' in cls.__dict__:
# getattr(cls, 'hcode') would look in the upper classes
cls.hcode = Utils.h_cmd(cls.run)
# be creative
getattr(cls, 'register', classes)[name] = cls
evil = store_task_type('evil', (object,), {})
"Base class provided to avoid writing a metaclass, so the code can run in python 2.6 and 3.x unmodified"
class Task(evil):
"""
This class deals with the filesystem (:py:class:`waflib.Node.Node`). The method :py:class:`waflib.Task.Task.runnable_status`
uses a hash value (from :py:class:`waflib.Task.Task.signature`) which is persistent from build to build. When the value changes,
the task has to be executed. The method :py:class:`waflib.Task.Task.post_run` will assign the task signature to the output
nodes (if present).
"""
vars = []
"""ConfigSet variables that should trigger a rebuild (class attribute used for :py:meth:`waflib.Task.Task.sig_vars`)"""
always_run = False
"""Specify whether task instances must always be executed or not (class attribute)"""
shell = False
"""Execute the command with the shell (class attribute)"""
color = 'GREEN'
"""Color for the console display, see :py:const:`waflib.Logs.colors_lst`"""
ext_in = []
"""File extensions that objects of this task class may use"""
ext_out = []
"""File extensions that objects of this task class may create"""
before = []
"""List of task class names to execute before instances of this class"""
after = []
"""List of task class names to execute after instances of this class"""
hcode = Utils.SIG_NIL
"""String representing an additional hash for the class representation"""
keep_last_cmd = False
"""Whether to keep the last command executed on the instance after execution.
This may be useful for certain extensions but it can a lot of memory.
"""
weight = 0
"""Optional weight to tune the priority for task instances.
The higher, the earlier. The weight only applies to single task objects."""
tree_weight = 0
"""Optional weight to tune the priority of task instances and whole subtrees.
The higher, the earlier."""
prio_order = 0
"""Priority order set by the scheduler on instances during the build phase.
You most likely do not need to set it.
"""
__slots__ = ('hasrun', 'generator', 'env', 'inputs', 'outputs', 'dep_nodes', 'run_after')
def __init__(self, *k, **kw):
self.hasrun = NOT_RUN
try:
self.generator = kw['generator']
except KeyError:
self.generator = self
self.env = kw['env']
""":py:class:`waflib.ConfigSet.ConfigSet` object (make sure to provide one)"""
self.inputs = []
"""List of input nodes, which represent the files used by the task instance"""
self.outputs = []
"""List of output nodes, which represent the files created by the task instance"""
self.dep_nodes = []
"""List of additional nodes to depend on"""
self.run_after = set()
"""Set of tasks that must be executed before this one"""
def __lt__(self, other):
return self.priority() > other.priority()
def __le__(self, other):
return self.priority() >= other.priority()
def __gt__(self, other):
return self.priority() < other.priority()
def __ge__(self, other):
return self.priority() <= other.priority()
def get_cwd(self):
"""
:return: current working directory
:rtype: :py:class:`waflib.Node.Node`
"""
bld = self.generator.bld
ret = getattr(self, 'cwd', None) or getattr(bld, 'cwd', bld.bldnode)
if isinstance(ret, str):
if os.path.isabs(ret):
ret = bld.root.make_node(ret)
else:
ret = self.generator.path.make_node(ret)
return ret
def quote_flag(self, x):
"""
Surround a process argument by quotes so that a list of arguments can be written to a file
:param x: flag
:type x: string
:return: quoted flag
:rtype: string
"""
old = x
if '\\' in x:
x = x.replace('\\', '\\\\')
if '"' in x:
x = x.replace('"', '\\"')
if old != x or ' ' in x or '\t' in x or "'" in x:
x = '"%s"' % x
return x
def priority(self):
"""
Priority of execution; the higher, the earlier
:return: the priority value
:rtype: a tuple of numeric values
"""
return (self.weight + self.prio_order, - getattr(self.generator, 'tg_idx_count', 0))
def split_argfile(self, cmd):
"""
Splits a list of process commands into the executable part and its list of arguments
:return: a tuple containing the executable first and then the rest of arguments
:rtype: tuple
"""
return ([cmd[0]], [self.quote_flag(x) for x in cmd[1:]])
def exec_command(self, cmd, **kw):
"""
Wrapper for :py:meth:`waflib.Context.Context.exec_command`.
This version set the current working directory (``build.variant_dir``),
applies PATH settings (if self.env.PATH is provided), and can run long
commands through a temporary ``@argfile``.
:param cmd: process command to execute
:type cmd: list of string (best) or string (process will use a shell)
:return: the return code
:rtype: int
Optional parameters:
#. cwd: current working directory (Node or string)
#. stdout: set to None to prevent waf from capturing the process standard output
#. stderr: set to None to prevent waf from capturing the process standard error
#. timeout: timeout value (Python 3)
"""
if not 'cwd' in kw:
kw['cwd'] = self.get_cwd()
if hasattr(self, 'timeout'):
kw['timeout'] = self.timeout
if self.env.PATH:
env = kw['env'] = dict(kw.get('env') or self.env.env or os.environ)
env['PATH'] = self.env.PATH if isinstance(self.env.PATH, str) else os.pathsep.join(self.env.PATH)
if hasattr(self, 'stdout'):
kw['stdout'] = self.stdout
if hasattr(self, 'stderr'):
kw['stderr'] = self.stderr
# workaround for command line length limit:
# http://support.microsoft.com/kb/830473
if not isinstance(cmd, str) and (len(repr(cmd)) >= 8192 if Utils.is_win32 else len(cmd) > 200000):
cmd, args = self.split_argfile(cmd)
try:
(fd, tmp) = tempfile.mkstemp()
os.write(fd, '\r\n'.join(args).encode())
os.close(fd)
if Logs.verbose:
Logs.debug('argfile: @%r -> %r', tmp, args)
return self.generator.bld.exec_command(cmd + ['@' + tmp], **kw)
finally:
try:
os.remove(tmp)
except OSError:
# anti-virus and indexers can keep files open -_-
pass
else:
return self.generator.bld.exec_command(cmd, **kw)
def process(self):
"""
Runs the task and handles errors
:return: 0 or None if everything is fine
:rtype: integer
"""
# remove the task signature immediately before it is executed
# so that the task will be executed again in case of failure
try:
del self.generator.bld.task_sigs[self.uid()]
except KeyError:
pass
try:
ret = self.run()
except Exception:
self.err_msg = traceback.format_exc()
self.hasrun = EXCEPTION
else:
if ret:
self.err_code = ret
self.hasrun = CRASHED
else:
try:
self.post_run()
except Errors.WafError:
pass
except Exception:
self.err_msg = traceback.format_exc()
self.hasrun = EXCEPTION
else:
self.hasrun = SUCCESS
if self.hasrun != SUCCESS and self.scan:
# rescan dependencies on next run
try:
del self.generator.bld.imp_sigs[self.uid()]
except KeyError:
pass
def log_display(self, bld):
"Writes the execution status on the context logger"
if self.generator.bld.progress_bar == 3:
return
s = self.display()
if s:
if bld.logger:
logger = bld.logger
else:
logger = Logs
if self.generator.bld.progress_bar == 1:
c1 = Logs.colors.cursor_off
c2 = Logs.colors.cursor_on
logger.info(s, extra={'stream': sys.stderr, 'terminator':'', 'c1': c1, 'c2' : c2})
else:
logger.info(s, extra={'terminator':'', 'c1': '', 'c2' : ''})
def display(self):
"""
Returns an execution status for the console, the progress bar, or the IDE output.
:rtype: string
"""
col1 = Logs.colors(self.color)
col2 = Logs.colors.NORMAL
master = self.generator.bld.producer
def cur():
# the current task position, computed as late as possible
return master.processed - master.ready.qsize()
if self.generator.bld.progress_bar == 1:
return self.generator.bld.progress_line(cur(), master.total, col1, col2)
if self.generator.bld.progress_bar == 2:
ela = str(self.generator.bld.timer)
try:
ins = ','.join([n.name for n in self.inputs])
except AttributeError:
ins = ''
try:
outs = ','.join([n.name for n in self.outputs])
except AttributeError:
outs = ''
return '|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n' % (master.total, cur(), ins, outs, ela)
s = str(self)
if not s:
return None
total = master.total
n = len(str(total))
fs = '[%%%dd/%%%dd] %%s%%s%%s%%s\n' % (n, n)
kw = self.keyword()
if kw:
kw += ' '
return fs % (cur(), total, kw, col1, s, col2)
def hash_constraints(self):
"""
Identifies a task type for all the constraints relevant for the scheduler: precedence, file production
:return: a hash value
:rtype: string
"""
return (tuple(self.before), tuple(self.after), tuple(self.ext_in), tuple(self.ext_out), self.__class__.__name__, self.hcode)
def format_error(self):
"""
Returns an error message to display the build failure reasons
:rtype: string
"""
if Logs.verbose:
msg = ': %r\n%r' % (self, getattr(self, 'last_cmd', ''))
else:
msg = ' (run with -v to display more information)'
name = getattr(self.generator, 'name', '')
if getattr(self, "err_msg", None):
return self.err_msg
elif not self.hasrun:
return 'task in %r was not executed for some reason: %r' % (name, self)
elif self.hasrun == CRASHED:
try:
return ' -> task in %r failed with exit status %r%s' % (name, self.err_code, msg)
except AttributeError:
return ' -> task in %r failed%s' % (name, msg)
elif self.hasrun == MISSING:
return ' -> missing files in %r%s' % (name, msg)
elif self.hasrun == CANCELED:
return ' -> %r canceled because of missing dependencies' % name
else:
return 'invalid status for task in %r: %r' % (name, self.hasrun)
def colon(self, var1, var2):
"""
Enable scriptlet expressions of the form ${FOO_ST:FOO}
If the first variable (FOO_ST) is empty, then an empty list is returned
The results will be slightly different if FOO_ST is a list, for example::
env.FOO = ['p1', 'p2']
env.FOO_ST = '-I%s'
# ${FOO_ST:FOO} returns
['-Ip1', '-Ip2']
env.FOO_ST = ['-a', '-b']
# ${FOO_ST:FOO} returns
['-a', '-b', 'p1', '-a', '-b', 'p2']
"""
tmp = self.env[var1]
if not tmp:
return []
if isinstance(var2, str):
it = self.env[var2]
else:
it = var2
if isinstance(tmp, str):
return [tmp % x for x in it]
else:
lst = []
for y in it:
lst.extend(tmp)
lst.append(y)
return lst
def __str__(self):
"string to display to the user"
name = self.__class__.__name__
if self.outputs:
if name.endswith(('lib', 'program')) or not self.inputs:
node = self.outputs[0]
return node.path_from(node.ctx.launch_node())
if not (self.inputs or self.outputs):
return self.__class__.__name__
if len(self.inputs) == 1:
node = self.inputs[0]
return node.path_from(node.ctx.launch_node())
src_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.inputs])
tgt_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.outputs])
if self.outputs:
sep = ' -> '
else:
sep = ''
return '%s: %s%s%s' % (self.__class__.__name__, src_str, sep, tgt_str)
def keyword(self):
"Display keyword used to prettify the console outputs"
name = self.__class__.__name__
if name.endswith(('lib', 'program')):
return 'Linking'
if len(self.inputs) == 1 and len(self.outputs) == 1:
return 'Compiling'
if not self.inputs:
if self.outputs:
return 'Creating'
else:
return 'Running'
return 'Processing'
def __repr__(self):
"for debugging purposes"
try:
ins = ",".join([x.name for x in self.inputs])
outs = ",".join([x.name for x in self.outputs])
except AttributeError:
ins = ",".join([str(x) for x in self.inputs])
outs = ",".join([str(x) for x in self.outputs])
return "".join(['\n\t{task %r: ' % id(self), self.__class__.__name__, " ", ins, " -> ", outs, '}'])
def uid(self):
"""
Returns an identifier used to determine if tasks are up-to-date. Since the
identifier will be stored between executions, it must be:
- unique for a task: no two tasks return the same value (for a given build context)
- the same for a given task instance
By default, the node paths, the class name, and the function are used
as inputs to compute a hash.
The pointer to the object (python built-in 'id') will change between build executions,
and must be avoided in such hashes.
:return: hash value
:rtype: string
"""
try:
return self.uid_
except AttributeError:
m = Utils.md5(self.__class__.__name__)
up = m.update
for x in self.inputs + self.outputs:
up(x.abspath())
self.uid_ = m.digest()
return self.uid_
def set_inputs(self, inp):
"""
Appends the nodes to the *inputs* list
:param inp: input nodes
:type inp: node or list of nodes
"""
if isinstance(inp, list):
self.inputs += inp
else:
self.inputs.append(inp)
def set_outputs(self, out):
"""
Appends the nodes to the *outputs* list
:param out: output nodes
:type out: node or list of nodes
"""
if isinstance(out, list):
self.outputs += out
else:
self.outputs.append(out)
def set_run_after(self, task):
"""
Run this task only after the given *task*.
:param task: task
:type task: :py:class:`waflib.Task.Task`
"""
assert isinstance(task, Task)
self.run_after.add(task)
def signature(self):
"""
Task signatures are stored between build executions, they are use to track the changes
made to the input nodes (not to the outputs!). The signature hashes data from various sources:
* explicit dependencies: files listed in the inputs (list of node objects) :py:meth:`waflib.Task.Task.sig_explicit_deps`
* implicit dependencies: list of nodes returned by scanner methods (when present) :py:meth:`waflib.Task.Task.sig_implicit_deps`
* hashed data: variables/values read from task.vars/task.env :py:meth:`waflib.Task.Task.sig_vars`
If the signature is expected to give a different result, clear the cache kept in ``self.cache_sig``::
from waflib import Task
class cls(Task.Task):
def signature(self):
sig = super(Task.Task, self).signature()
delattr(self, 'cache_sig')
return super(Task.Task, self).signature()
:return: the signature value
:rtype: string or bytes
"""
try:
return self.cache_sig
except AttributeError:
pass
self.m = Utils.md5(self.hcode)
# explicit deps
self.sig_explicit_deps()
# env vars
self.sig_vars()
# implicit deps / scanner results
if self.scan:
try:
self.sig_implicit_deps()
except Errors.TaskRescan:
return self.signature()
ret = self.cache_sig = self.m.digest()
return ret
def runnable_status(self):
"""
Returns the Task status
:return: a task state in :py:const:`waflib.Task.RUN_ME`,
:py:const:`waflib.Task.SKIP_ME`, :py:const:`waflib.Task.CANCEL_ME` or :py:const:`waflib.Task.ASK_LATER`.
:rtype: int
"""
bld = self.generator.bld
if bld.is_install < 0:
return SKIP_ME
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
elif t.hasrun < SKIPPED:
# a dependency has an error
return CANCEL_ME
# first compute the signature
try:
new_sig = self.signature()
except Errors.TaskNotReady:
return ASK_LATER
# compare the signature to a signature computed previously
key = self.uid()
try:
prev_sig = bld.task_sigs[key]
except KeyError:
Logs.debug('task: task %r must run: it was never run before or the task code changed', self)
return RUN_ME
if new_sig != prev_sig:
Logs.debug('task: task %r must run: the task signature changed', self)
return RUN_ME
# compare the signatures of the outputs
for node in self.outputs:
sig = bld.node_sigs.get(node)
if not sig:
Logs.debug('task: task %r must run: an output node has no signature', self)
return RUN_ME
if sig != key:
Logs.debug('task: task %r must run: an output node was produced by another task', self)
return RUN_ME
if not node.exists():
Logs.debug('task: task %r must run: an output node does not exist', self)
return RUN_ME
return (self.always_run and RUN_ME) or SKIP_ME
def post_run(self):
"""
Called after successful execution to record that the task has run by
updating the entry in :py:attr:`waflib.Build.BuildContext.task_sigs`.
"""
bld = self.generator.bld
for node in self.outputs:
if not node.exists():
self.hasrun = MISSING
self.err_msg = '-> missing file: %r' % node.abspath()
raise Errors.WafError(self.err_msg)
bld.node_sigs[node] = self.uid() # make sure this task produced the files in question
bld.task_sigs[self.uid()] = self.signature()
if not self.keep_last_cmd:
try:
del self.last_cmd
except AttributeError:
pass
def sig_explicit_deps(self):
"""
Used by :py:meth:`waflib.Task.Task.signature`; it hashes :py:attr:`waflib.Task.Task.inputs`
and :py:attr:`waflib.Task.Task.dep_nodes` signatures.
"""
bld = self.generator.bld
upd = self.m.update
# the inputs
for x in self.inputs + self.dep_nodes:
upd(x.get_bld_sig())
# manual dependencies, they can slow down the builds
if bld.deps_man:
additional_deps = bld.deps_man
for x in self.inputs + self.outputs:
try:
d = additional_deps[x]
except KeyError:
continue
for v in d:
try:
v = v.get_bld_sig()
except AttributeError:
if hasattr(v, '__call__'):
v = v() # dependency is a function, call it
upd(v)
def sig_deep_inputs(self):
"""
Enable rebuilds on input files task signatures. Not used by default.
Example: hashes of output programs can be unchanged after being re-linked,
despite the libraries being different. This method can thus prevent stale unit test
results (waf_unit_test.py).
Hashing input file timestamps is another possibility for the implementation.
This may cause unnecessary rebuilds when input tasks are frequently executed.
Here is an implementation example::
lst = []
for node in self.inputs + self.dep_nodes:
st = os.stat(node.abspath())
lst.append(st.st_mtime)
lst.append(st.st_size)
self.m.update(Utils.h_list(lst))
The downside of the implementation is that it absolutely requires all build directory
files to be declared within the current build.
"""
bld = self.generator.bld
lst = [bld.task_sigs[bld.node_sigs[node]] for node in (self.inputs + self.dep_nodes) if node.is_bld()]
self.m.update(Utils.h_list(lst))
def sig_vars(self):
"""
Used by :py:meth:`waflib.Task.Task.signature`; it hashes :py:attr:`waflib.Task.Task.env` variables/values
"""
sig = self.generator.bld.hash_env_vars(self.env, self.vars)
self.m.update(sig)
scan = None
"""
This method, when provided, returns a tuple containing:
* a list of nodes corresponding to real files
* a list of names for files not found in path_lst
For example::
from waflib.Task import Task
class mytask(Task):
def scan(self, node):
return ([], [])
The first and second lists in the tuple are stored in :py:attr:`waflib.Build.BuildContext.node_deps` and
:py:attr:`waflib.Build.BuildContext.raw_deps` respectively.
"""
def sig_implicit_deps(self):
"""
Used by :py:meth:`waflib.Task.Task.signature`; it hashes node signatures
obtained by scanning for dependencies (:py:meth:`waflib.Task.Task.scan`).
The exception :py:class:`waflib.Errors.TaskRescan` is thrown
when a file has changed. In this case, the method :py:meth:`waflib.Task.Task.signature` is called
once again, and return here to call :py:meth:`waflib.Task.Task.scan` and searching for dependencies.
"""
bld = self.generator.bld
# get the task signatures from previous runs
key = self.uid()
prev = bld.imp_sigs.get(key, [])
# for issue #379
if prev:
try:
if prev == self.compute_sig_implicit_deps():
return prev
except Errors.TaskNotReady:
raise
except EnvironmentError:
# when a file was renamed, remove the stale nodes (headers in folders without source files)
# this will break the order calculation for headers created during the build in the source directory (should be uncommon)
# the behaviour will differ when top != out
for x in bld.node_deps.get(self.uid(), []):
if not x.is_bld() and not x.exists():
try:
del x.parent.children[x.name]
except KeyError:
pass
del bld.imp_sigs[key]
raise Errors.TaskRescan('rescan')
# no previous run or the signature of the dependencies has changed, rescan the dependencies
(bld.node_deps[key], bld.raw_deps[key]) = self.scan()
if Logs.verbose:
Logs.debug('deps: scanner for %s: %r; unresolved: %r', self, bld.node_deps[key], bld.raw_deps[key])
# recompute the signature and return it
try:
bld.imp_sigs[key] = self.compute_sig_implicit_deps()
except EnvironmentError:
for k in bld.node_deps.get(self.uid(), []):
if not k.exists():
Logs.warn('Dependency %r for %r is missing: check the task declaration and the build order!', k, self)
raise
def compute_sig_implicit_deps(self):
"""
Used by :py:meth:`waflib.Task.Task.sig_implicit_deps` for computing the actual hash of the
:py:class:`waflib.Node.Node` returned by the scanner.
:return: a hash value for the implicit dependencies
:rtype: string or bytes
"""
upd = self.m.update
self.are_implicit_nodes_ready()
# scanner returns a node that does not have a signature
# just *ignore* the error and let them figure out from the compiler output
# waf -k behaviour
for k in self.generator.bld.node_deps.get(self.uid(), []):
upd(k.get_bld_sig())
return self.m.digest()
def are_implicit_nodes_ready(self):
"""
For each node returned by the scanner, see if there is a task that creates it,
and infer the build order
This has a low performance impact on null builds (1.86s->1.66s) thanks to caching (28s->1.86s)
"""
bld = self.generator.bld
try:
cache = bld.dct_implicit_nodes
except AttributeError:
bld.dct_implicit_nodes = cache = {}
# one cache per build group
try:
dct = cache[bld.current_group]
except KeyError:
dct = cache[bld.current_group] = {}
for tsk in bld.cur_tasks:
for x in tsk.outputs:
dct[x] = tsk
modified = False
for x in bld.node_deps.get(self.uid(), []):
if x in dct:
self.run_after.add(dct[x])
modified = True
if modified:
for tsk in self.run_after:
if not tsk.hasrun:
#print "task is not ready..."
raise Errors.TaskNotReady('not ready')
if sys.hexversion > 0x3000000:
def uid(self):
try:
return self.uid_
except AttributeError:
m = Utils.md5(self.__class__.__name__.encode('latin-1', 'xmlcharrefreplace'))
up = m.update
for x in self.inputs + self.outputs:
up(x.abspath().encode('latin-1', 'xmlcharrefreplace'))
self.uid_ = m.digest()
return self.uid_
uid.__doc__ = Task.uid.__doc__
Task.uid = uid
def is_before(t1, t2):
"""
Returns a non-zero value if task t1 is to be executed before task t2::
t1.ext_out = '.h'
t2.ext_in = '.h'
t2.after = ['t1']
t1.before = ['t2']
waflib.Task.is_before(t1, t2) # True
:param t1: Task object
:type t1: :py:class:`waflib.Task.Task`
:param t2: Task object
:type t2: :py:class:`waflib.Task.Task`
"""
to_list = Utils.to_list
for k in to_list(t2.ext_in):
if k in to_list(t1.ext_out):
return 1
if t1.__class__.__name__ in to_list(t2.after):
return 1
if t2.__class__.__name__ in to_list(t1.before):
return 1
return 0
def set_file_constraints(tasks):
"""
Updates the ``run_after`` attribute of all tasks based on the task inputs and outputs
:param tasks: tasks
:type tasks: list of :py:class:`waflib.Task.Task`
"""
ins = Utils.defaultdict(set)
outs = Utils.defaultdict(set)
for x in tasks:
for a in x.inputs:
ins[a].add(x)
for a in x.dep_nodes:
ins[a].add(x)
for a in x.outputs:
outs[a].add(x)
links = set(ins.keys()).intersection(outs.keys())
for k in links:
for a in ins[k]:
a.run_after.update(outs[k])
class TaskGroup(object):
"""
Wrap nxm task order constraints into a single object
to prevent the creation of large list/set objects
This is an optimization
"""
def __init__(self, prev, next):
self.prev = prev
self.next = next
self.done = False
def get_hasrun(self):
for k in self.prev:
if not k.hasrun:
return NOT_RUN
return SUCCESS
hasrun = property(get_hasrun, None)
def set_precedence_constraints(tasks):
"""
Updates the ``run_after`` attribute of all tasks based on the after/before/ext_out/ext_in attributes
:param tasks: tasks
:type tasks: list of :py:class:`waflib.Task.Task`
"""
cstr_groups = Utils.defaultdict(list)
for x in tasks:
h = x.hash_constraints()
cstr_groups[h].append(x)
keys = list(cstr_groups.keys())
maxi = len(keys)
# this list should be short
for i in range(maxi):
t1 = cstr_groups[keys[i]][0]
for j in range(i + 1, maxi):
t2 = cstr_groups[keys[j]][0]
# add the constraints based on the comparisons
if is_before(t1, t2):
a = i
b = j
elif is_before(t2, t1):
a = j
b = i
else:
continue
a = cstr_groups[keys[a]]
b = cstr_groups[keys[b]]
if len(a) < 2 or len(b) < 2:
for x in b:
x.run_after.update(a)
else:
group = TaskGroup(set(a), set(b))
for x in b:
x.run_after.add(group)
def funex(c):
"""
Compiles a scriptlet expression into a Python function
:param c: function to compile
:type c: string
:return: the function 'f' declared in the input string
:rtype: function
"""
dc = {}
exec(c, dc)
return dc['f']
re_cond = re.compile('(?P<var>\w+)|(?P<or>\|)|(?P<and>&)')
re_novar = re.compile(r'^(SRC|TGT)\W+.*?$')
reg_act = re.compile(r'(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})', re.M)
def compile_fun_shell(line):
"""
Creates a compiled function to execute a process through a sub-shell
"""
extr = []
def repl(match):
g = match.group
if g('dollar'):
return "$"
elif g('backslash'):
return '\\\\'
elif g('subst'):
extr.append((g('var'), g('code')))
return "%s"
return None
line = reg_act.sub(repl, line) or line
dvars = []
def replc(m):
# performs substitutions and populates dvars
if m.group('and'):
return ' and '
elif m.group('or'):
return ' or '
else:
x = m.group('var')
if x not in dvars:
dvars.append(x)
return 'env[%r]' % x
parm = []
app = parm.append
for (var, meth) in extr:
if var == 'SRC':
if meth:
app('tsk.inputs%s' % meth)
else:
app('" ".join([a.path_from(cwdx) for a in tsk.inputs])')
elif var == 'TGT':
if meth:
app('tsk.outputs%s' % meth)
else:
app('" ".join([a.path_from(cwdx) for a in tsk.outputs])')
elif meth:
if meth.startswith(':'):
if var not in dvars:
dvars.append(var)
m = meth[1:]
if m == 'SRC':
m = '[a.path_from(cwdx) for a in tsk.inputs]'
elif m == 'TGT':
m = '[a.path_from(cwdx) for a in tsk.outputs]'
elif re_novar.match(m):
m = '[tsk.inputs%s]' % m[3:]
elif re_novar.match(m):
m = '[tsk.outputs%s]' % m[3:]
elif m[:3] not in ('tsk', 'gen', 'bld'):
dvars.append(meth[1:])
m = '%r' % m
app('" ".join(tsk.colon(%r, %s))' % (var, m))
elif meth.startswith('?'):
# In A?B|C output env.A if one of env.B or env.C is non-empty
expr = re_cond.sub(replc, meth[1:])
app('p(%r) if (%s) else ""' % (var, expr))
else:
app('%s%s' % (var, meth))
else:
if var not in dvars:
dvars.append(var)
app("p('%s')" % var)
if parm:
parm = "%% (%s) " % (',\n\t\t'.join(parm))
else:
parm = ''
c = COMPILE_TEMPLATE_SHELL % (line, parm)
Logs.debug('action: %s', c.strip().splitlines())
return (funex(c), dvars)
reg_act_noshell = re.compile(r"(?P<space>\s+)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})|(?P<text>([^$ \t\n\r\f\v]|\$\$)+)", re.M)
def compile_fun_noshell(line):
"""
Creates a compiled function to execute a process without a sub-shell
"""
buf = []
dvars = []
merge = False
app = buf.append
def replc(m):
# performs substitutions and populates dvars
if m.group('and'):
return ' and '
elif m.group('or'):
return ' or '
else:
x = m.group('var')
if x not in dvars:
dvars.append(x)
return 'env[%r]' % x
for m in reg_act_noshell.finditer(line):
if m.group('space'):
merge = False
continue
elif m.group('text'):
app('[%r]' % m.group('text').replace('$$', '$'))
elif m.group('subst'):
var = m.group('var')
code = m.group('code')
if var == 'SRC':
if code:
app('[tsk.inputs%s]' % code)
else:
app('[a.path_from(cwdx) for a in tsk.inputs]')
elif var == 'TGT':
if code:
app('[tsk.outputs%s]' % code)
else:
app('[a.path_from(cwdx) for a in tsk.outputs]')
elif code:
if code.startswith(':'):
# a composed variable ${FOO:OUT}
if not var in dvars:
dvars.append(var)
m = code[1:]
if m == 'SRC':
m = '[a.path_from(cwdx) for a in tsk.inputs]'
elif m == 'TGT':
m = '[a.path_from(cwdx) for a in tsk.outputs]'
elif re_novar.match(m):
m = '[tsk.inputs%s]' % m[3:]
elif re_novar.match(m):
m = '[tsk.outputs%s]' % m[3:]
elif m[:3] not in ('tsk', 'gen', 'bld'):
dvars.append(m)
m = '%r' % m
app('tsk.colon(%r, %s)' % (var, m))
elif code.startswith('?'):
# In A?B|C output env.A if one of env.B or env.C is non-empty
expr = re_cond.sub(replc, code[1:])
app('to_list(env[%r] if (%s) else [])' % (var, expr))
else:
# plain code such as ${tsk.inputs[0].abspath()}
app('gen.to_list(%s%s)' % (var, code))
else:
# a plain variable such as # a plain variable like ${AR}
app('to_list(env[%r])' % var)
if not var in dvars:
dvars.append(var)
if merge:
tmp = 'merge(%s, %s)' % (buf[-2], buf[-1])
del buf[-1]
buf[-1] = tmp
merge = True # next turn
buf = ['lst.extend(%s)' % x for x in buf]
fun = COMPILE_TEMPLATE_NOSHELL % "\n\t".join(buf)
Logs.debug('action: %s', fun.strip().splitlines())
return (funex(fun), dvars)
def compile_fun(line, shell=False):
"""
Parses a string expression such as '${CC} ${SRC} -o ${TGT}' and returns a pair containing:
* The function created (compiled) for use as :py:meth:`waflib.Task.Task.run`
* The list of variables that must cause rebuilds when *env* data is modified
for example::
from waflib.Task import compile_fun
compile_fun('cxx', '${CXX} -o ${TGT[0]} ${SRC} -I ${SRC[0].parent.bldpath()}')
def build(bld):
bld(source='wscript', rule='echo "foo\\${SRC[0].name}\\bar"')
The env variables (CXX, ..) on the task must not hold dicts so as to preserve a consistent order.
The reserved keywords ``TGT`` and ``SRC`` represent the task input and output nodes
"""
if isinstance(line, str):
if line.find('<') > 0 or line.find('>') > 0 or line.find('&&') > 0:
shell = True
else:
dvars_lst = []
funs_lst = []
for x in line:
if isinstance(x, str):
fun, dvars = compile_fun(x, shell)
dvars_lst += dvars
funs_lst.append(fun)
else:
# assume a function to let through
funs_lst.append(x)
def composed_fun(task):
for x in funs_lst:
ret = x(task)
if ret:
return ret
return None
return composed_fun, dvars_lst
if shell:
return compile_fun_shell(line)
else:
return compile_fun_noshell(line)
def task_factory(name, func=None, vars=None, color='GREEN', ext_in=[], ext_out=[], before=[], after=[], shell=False, scan=None):
"""
Returns a new task subclass with the function ``run`` compiled from the line given.
:param func: method run
:type func: string or function
:param vars: list of variables to hash
:type vars: list of string
:param color: color to use
:type color: string
:param shell: when *func* is a string, enable/disable the use of the shell
:type shell: bool
:param scan: method scan
:type scan: function
:rtype: :py:class:`waflib.Task.Task`
"""
params = {
'vars': vars or [], # function arguments are static, and this one may be modified by the class
'color': color,
'name': name,
'shell': shell,
'scan': scan,
}
if isinstance(func, str) or isinstance(func, tuple):
params['run_str'] = func
else:
params['run'] = func
cls = type(Task)(name, (Task,), params)
classes[name] = cls
if ext_in:
cls.ext_in = Utils.to_list(ext_in)
if ext_out:
cls.ext_out = Utils.to_list(ext_out)
if before:
cls.before = Utils.to_list(before)
if after:
cls.after = Utils.to_list(after)
return cls
def deep_inputs(cls):
"""
Task class decorator to enable rebuilds on input files task signatures
"""
def sig_explicit_deps(self):
Task.sig_explicit_deps(self)
Task.sig_deep_inputs(self)
cls.sig_explicit_deps = sig_explicit_deps
return cls
TaskBase = Task
"Provided for compatibility reasons, TaskBase should not be used"
|
tchellomello/home-assistant | refs/heads/dev | tests/components/homekit/test_type_sensors.py | 9 | """Test different accessory types: Sensors."""
from homeassistant.components.homekit import get_accessory
from homeassistant.components.homekit.const import (
DEVICE_CLASS_MOTION,
PROP_CELSIUS,
THRESHOLD_CO,
THRESHOLD_CO2,
)
from homeassistant.components.homekit.type_sensors import (
BINARY_SENSOR_SERVICE_MAP,
AirQualitySensor,
BinarySensor,
CarbonDioxideSensor,
CarbonMonoxideSensor,
HumiditySensor,
LightSensor,
TemperatureSensor,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
EVENT_HOMEASSISTANT_START,
PERCENTAGE,
STATE_HOME,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import CoreState
from homeassistant.helpers import entity_registry
async def test_temperature(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.temperature"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = TemperatureSensor(hass, hk_driver, "Temperature", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_temp.value == 0.0
for key, value in PROP_CELSIUS.items():
assert acc.char_temp.properties[key] == value
hass.states.async_set(
entity_id, STATE_UNKNOWN, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
assert acc.char_temp.value == 0.0
hass.states.async_set(entity_id, "20", {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS})
await hass.async_block_till_done()
assert acc.char_temp.value == 20
hass.states.async_set(
entity_id, "75.2", {ATTR_UNIT_OF_MEASUREMENT: TEMP_FAHRENHEIT}
)
await hass.async_block_till_done()
assert acc.char_temp.value == 24
async def test_humidity(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.humidity"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = HumiditySensor(hass, hk_driver, "Humidity", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_humidity.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_humidity.value == 0
hass.states.async_set(entity_id, "20")
await hass.async_block_till_done()
assert acc.char_humidity.value == 20
async def test_air_quality(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.air_quality"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = AirQualitySensor(hass, hk_driver, "Air Quality", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_density.value == 0
assert acc.char_quality.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_density.value == 0
assert acc.char_quality.value == 0
hass.states.async_set(entity_id, "34")
await hass.async_block_till_done()
assert acc.char_density.value == 34
assert acc.char_quality.value == 1
hass.states.async_set(entity_id, "200")
await hass.async_block_till_done()
assert acc.char_density.value == 200
assert acc.char_quality.value == 5
async def test_co(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.co"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = CarbonMonoxideSensor(hass, hk_driver, "CO", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
value = 32
assert value > THRESHOLD_CO
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 32
assert acc.char_peak.value == 32
assert acc.char_detected.value == 1
value = 10
assert value < THRESHOLD_CO
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 10
assert acc.char_peak.value == 32
assert acc.char_detected.value == 0
async def test_co2(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.co2"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = CarbonDioxideSensor(hass, hk_driver, "CO2", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
value = 1100
assert value > THRESHOLD_CO2
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 1100
assert acc.char_peak.value == 1100
assert acc.char_detected.value == 1
value = 800
assert value < THRESHOLD_CO2
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 800
assert acc.char_peak.value == 1100
assert acc.char_detected.value == 0
async def test_light(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.light"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = LightSensor(hass, hk_driver, "Light", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_light.value == 0.0001
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_light.value == 0.0001
hass.states.async_set(entity_id, "300")
await hass.async_block_till_done()
assert acc.char_light.value == 300
async def test_binary(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "binary_sensor.opening"
hass.states.async_set(entity_id, STATE_UNKNOWN, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
acc = BinarySensor(hass, hk_driver, "Window Opening", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_ON, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 1
hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_HOME, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 1
hass.states.async_set(entity_id, STATE_NOT_HOME, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 0
hass.states.async_remove(entity_id)
await hass.async_block_till_done()
assert acc.char_detected.value == 0
async def test_motion_uses_bool(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "binary_sensor.motion"
hass.states.async_set(
entity_id, STATE_UNKNOWN, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
acc = BinarySensor(hass, hk_driver, "Motion Sensor", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_detected.value is False
hass.states.async_set(entity_id, STATE_ON, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION})
await hass.async_block_till_done()
assert acc.char_detected.value is True
hass.states.async_set(
entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
assert acc.char_detected.value is False
hass.states.async_set(
entity_id, STATE_HOME, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
assert acc.char_detected.value is True
hass.states.async_set(
entity_id, STATE_NOT_HOME, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
assert acc.char_detected.value is False
hass.states.async_remove(entity_id)
await hass.async_block_till_done()
assert acc.char_detected.value is False
async def test_binary_device_classes(hass, hk_driver):
"""Test if services and characteristics are assigned correctly."""
entity_id = "binary_sensor.demo"
for device_class, (service, char, _) in BINARY_SENSOR_SERVICE_MAP.items():
hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: device_class})
await hass.async_block_till_done()
acc = BinarySensor(hass, hk_driver, "Binary Sensor", entity_id, 2, None)
assert acc.get_service(service).display_name == service
assert acc.char_detected.display_name == char
async def test_sensor_restore(hass, hk_driver, events):
"""Test setting up an entity from state in the event registry."""
hass.state = CoreState.not_running
registry = await entity_registry.async_get_registry(hass)
registry.async_get_or_create(
"sensor",
"generic",
"1234",
suggested_object_id="temperature",
device_class="temperature",
)
registry.async_get_or_create(
"sensor",
"generic",
"12345",
suggested_object_id="humidity",
device_class="humidity",
unit_of_measurement=PERCENTAGE,
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START, {})
await hass.async_block_till_done()
acc = get_accessory(hass, hk_driver, hass.states.get("sensor.temperature"), 2, {})
assert acc.category == 10
acc = get_accessory(hass, hk_driver, hass.states.get("sensor.humidity"), 2, {})
assert acc.category == 10
|
seanwestfall/django | refs/heads/master | django/contrib/sites/migrations/0002_alter_domain_unique.py | 170 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.sites.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='site',
name='domain',
field=models.CharField(max_length=100, unique=True, validators=[django.contrib.sites.models._simple_domain_name_validator], verbose_name='domain name'),
),
]
|
CopeX/odoo | refs/heads/8.0 | addons/mrp/company.py | 381 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class company(osv.osv):
_inherit = 'res.company'
_columns = {
'manufacturing_lead': fields.float('Manufacturing Lead Time', required=True,
help="Security days for each manufacturing operation."),
}
_defaults = {
'manufacturing_lead': lambda *a: 1.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
harshilasu/LinkurApp | refs/heads/master | y/google-cloud-sdk/.install/.backup/lib/jinja2/sandbox.py | 637 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import operator
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, function_type, method_type, \
traceback_type, code_type, frame_type, generator_type, PY2
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
#: unsafe generator attirbutes.
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
if not PY2:
UNSAFE_FUNCTION_ATTRIBUTES = set()
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(lambda: None, "func_code")
True
>>> is_internal_attribute((lambda x:x).func_code, 'co_code')
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, function_type):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, method_type):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (code_type, traceback_type, frame_type)):
return True
elif isinstance(obj, generator_type):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
|
rossumai/keras-multi-gpu | refs/heads/master | setup.py | 1 | from setuptools import setup
setup(name='keras_tf_multigpu',
version='0.1',
description='Multi-GPU data-parallel training in Keras/TensorFlow',
url='https://github.com/rossumai/keras-multi-gpu',
author='Bohumir Zamecnik',
author_email='bohumir.zamecnik@gmail.com',
license='MIT',
packages=['keras_tf_multigpu'],
zip_safe=False,
install_requires=[
'h5py',
'Keras>=2.0.8',
'numpy',
'tensorflow-gpu>=1.3',
],
setup_requires=['setuptools-markdown'],
long_description_markdown_filename='README.md',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: POSIX :: Linux',
])
|
turon/openthread | refs/heads/master | tools/harness-automation/cases/leader_5_5_7.py | 1 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from autothreadharness.harness_case import HarnessCase
import unittest
class Leader_5_5_7(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '5 5 7'
golden_devices_required = 3
def on_dialog(self, dialog, title):
if title.startswith('Reset DUT'):
self.dut.stop()
return False
elif title.startswith('Rejoin Now'):
self.dut.start()
return False
if __name__ == '__main__':
unittest.main()
|
DamCB/tyssue | refs/heads/master | tyssue/behaviors/monolayer/actions.py | 2 | """
Basic monolayer event module
=======================
"""
import logging
logger = logging.getLogger(__name__)
# noqa
from ..sheet.actions import merge_vertices, detach_vertices
def grow(monolayer, cell, grow_rate):
"""Multiplies the equilibrium volume of face by a factor (1+shrink_rate)."""
factor = 1 + grow_rate
monolayer.cell_df.loc[cell, "prefered_vol"] *= factor
monolayer.cell_df.loc[cell, "prefered_area"] *= factor ** (2 / 3)
def shrink(monolayer, cell, shrink_rate):
"""Divides the equilibrium volume of the cell
by a factor (1+shrink_rate) and its equilibrium area
by (1+shrink_rate)^2/3
"""
factor = 1 + shrink_rate
monolayer.cell_df.loc[cell, "prefered_vol"] /= factor
monolayer.cell_df.loc[cell, "prefered_area"] /= factor ** (2 / 3)
def contract(
monolayer,
face,
contractile_increase,
multiply=False,
contraction_column="contractility",
):
"""
Contract the face by increasing the 'contractility' parameter
by contractile_increase
"""
if multiply:
monolayer.face_df.loc[face, contraction_column] *= contractile_increase
else:
monolayer.face_df.loc[face, contraction_column] += contractile_increase
def relax(monolayer, face, contractile_decrease, contraction_column="contractility"):
initial_contractility = 1.12
new_contractility = (
monolayer.face_df.loc[face, contraction_column] / contractile_decrease
)
if new_contractility >= (initial_contractility / 2):
monolayer.face_df.loc[face, contraction_column] = new_contractility
monolayer.face_df.loc[face, "prefered_area"] *= contractile_decrease
def contract_apical_face(
monolayer,
face_id,
contractile_increase=1.0,
critical_area=1e-2,
max_contractility=50,
multiply=False,
contraction_column="contractility",
):
"""Single step contraction event for apical face only."""
face = monolayer.idx_lookup(face_id, "face")
if face is None:
return
if (
(monolayer.face_df.loc[face, "segment"] != "apical")
or (monolayer.face_df.loc[face, "area"] < critical_area)
or (monolayer.face_df.loc[face, contraction_column] > max_contractility)
):
return
contract(monolayer, face, contractile_increase, multiply, contraction_column)
def ab_pull(monolayer, cell, radial_tension, distributed=False):
"""Adds a linear tension to the apical-to-basal edges
of a cell
"""
cell_edges = monolayer.edge_df[monolayer.edge_df["cell"] == cell]
lateral_edges = cell_edges[cell_edges["segment"] == "lateral"]
srce_segment = monolayer.upcast_srce(monolayer.vert_df["segment"]).loc[
lateral_edges.index
]
trgt_segment = monolayer.upcast_trgt(monolayer.vert_df["segment"]).loc[
lateral_edges.index
]
ab_edges = lateral_edges[
(srce_segment == "apical") & (trgt_segment == "basal")
].index
ba_edges = lateral_edges[
(trgt_segment == "apical") & (srce_segment == "basal")
].index
if distributed:
new_tension = radial_tension / (len(ab_edges) + len(ba_edges))
else:
new_tension = radial_tension
monolayer.edge_df.loc[ab_edges, "line_tension"] += new_tension
monolayer.edge_df.loc[ba_edges, "line_tension"] += new_tension
def ab_pull_edge(monolayer, cell_edges, radial_tension, distributed=False):
"""Adds a linear tension to the apical-to-basal edges
of a cell
"""
if distributed:
new_tension = radial_tension / (len(cell_edges))
else:
new_tension = radial_tension
monolayer.edge_df.loc[cell_edges, "line_tension"] += new_tension
|
andela-landia/adventures | refs/heads/develop | setup.py | 1 | from distutils.core import setup
setup(
name='adventures',
description='Checkpoint 3: Django Powered Bucketlist Application',
author='Loice Andia',
author_email='loice.andia@andela.com',
url='https://github.com/andela-landia/adventures',
version='1.0.0'
)
|
siddhika1889/Pydev-Editor | refs/heads/master | pysrc/pydevd_attach_to_process/winappdbg/win32/version.py | 102 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Detect the current architecture and operating system.
Some functions here are really from kernel32.dll, others from version.dll.
"""
__revision__ = "$Id$"
from winappdbg.win32.defines import *
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- NTDDI version ------------------------------------------------------------
NTDDI_WIN8 = 0x06020000
NTDDI_WIN7SP1 = 0x06010100
NTDDI_WIN7 = 0x06010000
NTDDI_WS08 = 0x06000100
NTDDI_VISTASP1 = 0x06000100
NTDDI_VISTA = 0x06000000
NTDDI_LONGHORN = NTDDI_VISTA
NTDDI_WS03SP2 = 0x05020200
NTDDI_WS03SP1 = 0x05020100
NTDDI_WS03 = 0x05020000
NTDDI_WINXPSP3 = 0x05010300
NTDDI_WINXPSP2 = 0x05010200
NTDDI_WINXPSP1 = 0x05010100
NTDDI_WINXP = 0x05010000
NTDDI_WIN2KSP4 = 0x05000400
NTDDI_WIN2KSP3 = 0x05000300
NTDDI_WIN2KSP2 = 0x05000200
NTDDI_WIN2KSP1 = 0x05000100
NTDDI_WIN2K = 0x05000000
NTDDI_WINNT4 = 0x04000000
OSVERSION_MASK = 0xFFFF0000
SPVERSION_MASK = 0x0000FF00
SUBVERSION_MASK = 0x000000FF
#--- OSVERSIONINFO and OSVERSIONINFOEX structures and constants ---------------
VER_PLATFORM_WIN32s = 0
VER_PLATFORM_WIN32_WINDOWS = 1
VER_PLATFORM_WIN32_NT = 2
VER_SUITE_BACKOFFICE = 0x00000004
VER_SUITE_BLADE = 0x00000400
VER_SUITE_COMPUTE_SERVER = 0x00004000
VER_SUITE_DATACENTER = 0x00000080
VER_SUITE_ENTERPRISE = 0x00000002
VER_SUITE_EMBEDDEDNT = 0x00000040
VER_SUITE_PERSONAL = 0x00000200
VER_SUITE_SINGLEUSERTS = 0x00000100
VER_SUITE_SMALLBUSINESS = 0x00000001
VER_SUITE_SMALLBUSINESS_RESTRICTED = 0x00000020
VER_SUITE_STORAGE_SERVER = 0x00002000
VER_SUITE_TERMINAL = 0x00000010
VER_SUITE_WH_SERVER = 0x00008000
VER_NT_DOMAIN_CONTROLLER = 0x0000002
VER_NT_SERVER = 0x0000003
VER_NT_WORKSTATION = 0x0000001
VER_BUILDNUMBER = 0x0000004
VER_MAJORVERSION = 0x0000002
VER_MINORVERSION = 0x0000001
VER_PLATFORMID = 0x0000008
VER_PRODUCT_TYPE = 0x0000080
VER_SERVICEPACKMAJOR = 0x0000020
VER_SERVICEPACKMINOR = 0x0000010
VER_SUITENAME = 0x0000040
VER_EQUAL = 1
VER_GREATER = 2
VER_GREATER_EQUAL = 3
VER_LESS = 4
VER_LESS_EQUAL = 5
VER_AND = 6
VER_OR = 7
# typedef struct _OSVERSIONINFO {
# DWORD dwOSVersionInfoSize;
# DWORD dwMajorVersion;
# DWORD dwMinorVersion;
# DWORD dwBuildNumber;
# DWORD dwPlatformId;
# TCHAR szCSDVersion[128];
# }OSVERSIONINFO;
class OSVERSIONINFOA(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", CHAR * 128),
]
class OSVERSIONINFOW(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", WCHAR * 128),
]
# typedef struct _OSVERSIONINFOEX {
# DWORD dwOSVersionInfoSize;
# DWORD dwMajorVersion;
# DWORD dwMinorVersion;
# DWORD dwBuildNumber;
# DWORD dwPlatformId;
# TCHAR szCSDVersion[128];
# WORD wServicePackMajor;
# WORD wServicePackMinor;
# WORD wSuiteMask;
# BYTE wProductType;
# BYTE wReserved;
# }OSVERSIONINFOEX, *POSVERSIONINFOEX, *LPOSVERSIONINFOEX;
class OSVERSIONINFOEXA(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", CHAR * 128),
("wServicePackMajor", WORD),
("wServicePackMinor", WORD),
("wSuiteMask", WORD),
("wProductType", BYTE),
("wReserved", BYTE),
]
class OSVERSIONINFOEXW(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", WCHAR * 128),
("wServicePackMajor", WORD),
("wServicePackMinor", WORD),
("wSuiteMask", WORD),
("wProductType", BYTE),
("wReserved", BYTE),
]
LPOSVERSIONINFOA = POINTER(OSVERSIONINFOA)
LPOSVERSIONINFOW = POINTER(OSVERSIONINFOW)
LPOSVERSIONINFOEXA = POINTER(OSVERSIONINFOEXA)
LPOSVERSIONINFOEXW = POINTER(OSVERSIONINFOEXW)
POSVERSIONINFOA = LPOSVERSIONINFOA
POSVERSIONINFOW = LPOSVERSIONINFOW
POSVERSIONINFOEXA = LPOSVERSIONINFOEXA
POSVERSIONINFOEXW = LPOSVERSIONINFOA
#--- GetSystemMetrics constants -----------------------------------------------
SM_CXSCREEN = 0
SM_CYSCREEN = 1
SM_CXVSCROLL = 2
SM_CYHSCROLL = 3
SM_CYCAPTION = 4
SM_CXBORDER = 5
SM_CYBORDER = 6
SM_CXDLGFRAME = 7
SM_CYDLGFRAME = 8
SM_CYVTHUMB = 9
SM_CXHTHUMB = 10
SM_CXICON = 11
SM_CYICON = 12
SM_CXCURSOR = 13
SM_CYCURSOR = 14
SM_CYMENU = 15
SM_CXFULLSCREEN = 16
SM_CYFULLSCREEN = 17
SM_CYKANJIWINDOW = 18
SM_MOUSEPRESENT = 19
SM_CYVSCROLL = 20
SM_CXHSCROLL = 21
SM_DEBUG = 22
SM_SWAPBUTTON = 23
SM_RESERVED1 = 24
SM_RESERVED2 = 25
SM_RESERVED3 = 26
SM_RESERVED4 = 27
SM_CXMIN = 28
SM_CYMIN = 29
SM_CXSIZE = 30
SM_CYSIZE = 31
SM_CXFRAME = 32
SM_CYFRAME = 33
SM_CXMINTRACK = 34
SM_CYMINTRACK = 35
SM_CXDOUBLECLK = 36
SM_CYDOUBLECLK = 37
SM_CXICONSPACING = 38
SM_CYICONSPACING = 39
SM_MENUDROPALIGNMENT = 40
SM_PENWINDOWS = 41
SM_DBCSENABLED = 42
SM_CMOUSEBUTTONS = 43
SM_CXFIXEDFRAME = SM_CXDLGFRAME # ;win40 name change
SM_CYFIXEDFRAME = SM_CYDLGFRAME # ;win40 name change
SM_CXSIZEFRAME = SM_CXFRAME # ;win40 name change
SM_CYSIZEFRAME = SM_CYFRAME # ;win40 name change
SM_SECURE = 44
SM_CXEDGE = 45
SM_CYEDGE = 46
SM_CXMINSPACING = 47
SM_CYMINSPACING = 48
SM_CXSMICON = 49
SM_CYSMICON = 50
SM_CYSMCAPTION = 51
SM_CXSMSIZE = 52
SM_CYSMSIZE = 53
SM_CXMENUSIZE = 54
SM_CYMENUSIZE = 55
SM_ARRANGE = 56
SM_CXMINIMIZED = 57
SM_CYMINIMIZED = 58
SM_CXMAXTRACK = 59
SM_CYMAXTRACK = 60
SM_CXMAXIMIZED = 61
SM_CYMAXIMIZED = 62
SM_NETWORK = 63
SM_CLEANBOOT = 67
SM_CXDRAG = 68
SM_CYDRAG = 69
SM_SHOWSOUNDS = 70
SM_CXMENUCHECK = 71 # Use instead of GetMenuCheckMarkDimensions()!
SM_CYMENUCHECK = 72
SM_SLOWMACHINE = 73
SM_MIDEASTENABLED = 74
SM_MOUSEWHEELPRESENT = 75
SM_XVIRTUALSCREEN = 76
SM_YVIRTUALSCREEN = 77
SM_CXVIRTUALSCREEN = 78
SM_CYVIRTUALSCREEN = 79
SM_CMONITORS = 80
SM_SAMEDISPLAYFORMAT = 81
SM_IMMENABLED = 82
SM_CXFOCUSBORDER = 83
SM_CYFOCUSBORDER = 84
SM_TABLETPC = 86
SM_MEDIACENTER = 87
SM_STARTER = 88
SM_SERVERR2 = 89
SM_MOUSEHORIZONTALWHEELPRESENT = 91
SM_CXPADDEDBORDER = 92
SM_CMETRICS = 93
SM_REMOTESESSION = 0x1000
SM_SHUTTINGDOWN = 0x2000
SM_REMOTECONTROL = 0x2001
SM_CARETBLINKINGENABLED = 0x2002
#--- SYSTEM_INFO structure, GetSystemInfo() and GetNativeSystemInfo() ---------
# Values used by Wine
# Documented values at MSDN are marked with an asterisk
PROCESSOR_ARCHITECTURE_UNKNOWN = 0xFFFF; # Unknown architecture.
PROCESSOR_ARCHITECTURE_INTEL = 0 # x86 (AMD or Intel) *
PROCESSOR_ARCHITECTURE_MIPS = 1 # MIPS
PROCESSOR_ARCHITECTURE_ALPHA = 2 # Alpha
PROCESSOR_ARCHITECTURE_PPC = 3 # Power PC
PROCESSOR_ARCHITECTURE_SHX = 4 # SHX
PROCESSOR_ARCHITECTURE_ARM = 5 # ARM
PROCESSOR_ARCHITECTURE_IA64 = 6 # Intel Itanium *
PROCESSOR_ARCHITECTURE_ALPHA64 = 7 # Alpha64
PROCESSOR_ARCHITECTURE_MSIL = 8 # MSIL
PROCESSOR_ARCHITECTURE_AMD64 = 9 # x64 (AMD or Intel) *
PROCESSOR_ARCHITECTURE_IA32_ON_WIN64 = 10 # IA32 on Win64
PROCESSOR_ARCHITECTURE_SPARC = 20 # Sparc (Wine)
# Values used by Wine
# PROCESSOR_OPTIL value found at http://code.google.com/p/ddab-lib/
# Documented values at MSDN are marked with an asterisk
PROCESSOR_INTEL_386 = 386 # Intel i386 *
PROCESSOR_INTEL_486 = 486 # Intel i486 *
PROCESSOR_INTEL_PENTIUM = 586 # Intel Pentium *
PROCESSOR_INTEL_IA64 = 2200 # Intel IA64 (Itanium) *
PROCESSOR_AMD_X8664 = 8664 # AMD X86 64 *
PROCESSOR_MIPS_R4000 = 4000 # MIPS R4000, R4101, R3910
PROCESSOR_ALPHA_21064 = 21064 # Alpha 210 64
PROCESSOR_PPC_601 = 601 # PPC 601
PROCESSOR_PPC_603 = 603 # PPC 603
PROCESSOR_PPC_604 = 604 # PPC 604
PROCESSOR_PPC_620 = 620 # PPC 620
PROCESSOR_HITACHI_SH3 = 10003 # Hitachi SH3 (Windows CE)
PROCESSOR_HITACHI_SH3E = 10004 # Hitachi SH3E (Windows CE)
PROCESSOR_HITACHI_SH4 = 10005 # Hitachi SH4 (Windows CE)
PROCESSOR_MOTOROLA_821 = 821 # Motorola 821 (Windows CE)
PROCESSOR_SHx_SH3 = 103 # SHx SH3 (Windows CE)
PROCESSOR_SHx_SH4 = 104 # SHx SH4 (Windows CE)
PROCESSOR_STRONGARM = 2577 # StrongARM (Windows CE)
PROCESSOR_ARM720 = 1824 # ARM 720 (Windows CE)
PROCESSOR_ARM820 = 2080 # ARM 820 (Windows CE)
PROCESSOR_ARM920 = 2336 # ARM 920 (Windows CE)
PROCESSOR_ARM_7TDMI = 70001 # ARM 7TDMI (Windows CE)
PROCESSOR_OPTIL = 0x494F # MSIL
# typedef struct _SYSTEM_INFO {
# union {
# DWORD dwOemId;
# struct {
# WORD wProcessorArchitecture;
# WORD wReserved;
# } ;
# } ;
# DWORD dwPageSize;
# LPVOID lpMinimumApplicationAddress;
# LPVOID lpMaximumApplicationAddress;
# DWORD_PTR dwActiveProcessorMask;
# DWORD dwNumberOfProcessors;
# DWORD dwProcessorType;
# DWORD dwAllocationGranularity;
# WORD wProcessorLevel;
# WORD wProcessorRevision;
# } SYSTEM_INFO;
class _SYSTEM_INFO_OEM_ID_STRUCT(Structure):
_fields_ = [
("wProcessorArchitecture", WORD),
("wReserved", WORD),
]
class _SYSTEM_INFO_OEM_ID(Union):
_fields_ = [
("dwOemId", DWORD),
("w", _SYSTEM_INFO_OEM_ID_STRUCT),
]
class SYSTEM_INFO(Structure):
_fields_ = [
("id", _SYSTEM_INFO_OEM_ID),
("dwPageSize", DWORD),
("lpMinimumApplicationAddress", LPVOID),
("lpMaximumApplicationAddress", LPVOID),
("dwActiveProcessorMask", DWORD_PTR),
("dwNumberOfProcessors", DWORD),
("dwProcessorType", DWORD),
("dwAllocationGranularity", DWORD),
("wProcessorLevel", WORD),
("wProcessorRevision", WORD),
]
def __get_dwOemId(self):
return self.id.dwOemId
def __set_dwOemId(self, value):
self.id.dwOemId = value
dwOemId = property(__get_dwOemId, __set_dwOemId)
def __get_wProcessorArchitecture(self):
return self.id.w.wProcessorArchitecture
def __set_wProcessorArchitecture(self, value):
self.id.w.wProcessorArchitecture = value
wProcessorArchitecture = property(__get_wProcessorArchitecture, __set_wProcessorArchitecture)
LPSYSTEM_INFO = ctypes.POINTER(SYSTEM_INFO)
# void WINAPI GetSystemInfo(
# __out LPSYSTEM_INFO lpSystemInfo
# );
def GetSystemInfo():
_GetSystemInfo = windll.kernel32.GetSystemInfo
_GetSystemInfo.argtypes = [LPSYSTEM_INFO]
_GetSystemInfo.restype = None
sysinfo = SYSTEM_INFO()
_GetSystemInfo(byref(sysinfo))
return sysinfo
# void WINAPI GetNativeSystemInfo(
# __out LPSYSTEM_INFO lpSystemInfo
# );
def GetNativeSystemInfo():
_GetNativeSystemInfo = windll.kernel32.GetNativeSystemInfo
_GetNativeSystemInfo.argtypes = [LPSYSTEM_INFO]
_GetNativeSystemInfo.restype = None
sysinfo = SYSTEM_INFO()
_GetNativeSystemInfo(byref(sysinfo))
return sysinfo
# int WINAPI GetSystemMetrics(
# __in int nIndex
# );
def GetSystemMetrics(nIndex):
_GetSystemMetrics = windll.user32.GetSystemMetrics
_GetSystemMetrics.argtypes = [ctypes.c_int]
_GetSystemMetrics.restype = ctypes.c_int
return _GetSystemMetrics(nIndex)
# SIZE_T WINAPI GetLargePageMinimum(void);
def GetLargePageMinimum():
_GetLargePageMinimum = windll.user32.GetLargePageMinimum
_GetLargePageMinimum.argtypes = []
_GetLargePageMinimum.restype = SIZE_T
return _GetLargePageMinimum()
# HANDLE WINAPI GetCurrentProcess(void);
def GetCurrentProcess():
## return 0xFFFFFFFFFFFFFFFFL
_GetCurrentProcess = windll.kernel32.GetCurrentProcess
_GetCurrentProcess.argtypes = []
_GetCurrentProcess.restype = HANDLE
return _GetCurrentProcess()
# HANDLE WINAPI GetCurrentThread(void);
def GetCurrentThread():
## return 0xFFFFFFFFFFFFFFFEL
_GetCurrentThread = windll.kernel32.GetCurrentThread
_GetCurrentThread.argtypes = []
_GetCurrentThread.restype = HANDLE
return _GetCurrentThread()
# BOOL WINAPI IsWow64Process(
# __in HANDLE hProcess,
# __out PBOOL Wow64Process
# );
def IsWow64Process(hProcess):
_IsWow64Process = windll.kernel32.IsWow64Process
_IsWow64Process.argtypes = [HANDLE, PBOOL]
_IsWow64Process.restype = bool
_IsWow64Process.errcheck = RaiseIfZero
Wow64Process = BOOL(FALSE)
_IsWow64Process(hProcess, byref(Wow64Process))
return bool(Wow64Process)
# DWORD WINAPI GetVersion(void);
def GetVersion():
_GetVersion = windll.kernel32.GetVersion
_GetVersion.argtypes = []
_GetVersion.restype = DWORD
_GetVersion.errcheck = RaiseIfZero
# See the example code here:
# http://msdn.microsoft.com/en-us/library/ms724439(VS.85).aspx
dwVersion = _GetVersion()
dwMajorVersion = dwVersion & 0x000000FF
dwMinorVersion = (dwVersion & 0x0000FF00) >> 8
if (dwVersion & 0x80000000) == 0:
dwBuild = (dwVersion & 0x7FFF0000) >> 16
else:
dwBuild = None
return int(dwMajorVersion), int(dwMinorVersion), int(dwBuild)
# BOOL WINAPI GetVersionEx(
# __inout LPOSVERSIONINFO lpVersionInfo
# );
def GetVersionExA():
_GetVersionExA = windll.kernel32.GetVersionExA
_GetVersionExA.argtypes = [POINTER(OSVERSIONINFOEXA)]
_GetVersionExA.restype = bool
_GetVersionExA.errcheck = RaiseIfZero
osi = OSVERSIONINFOEXA()
osi.dwOSVersionInfoSize = sizeof(osi)
try:
_GetVersionExA(byref(osi))
except WindowsError:
osi = OSVERSIONINFOA()
osi.dwOSVersionInfoSize = sizeof(osi)
_GetVersionExA.argtypes = [POINTER(OSVERSIONINFOA)]
_GetVersionExA(byref(osi))
return osi
def GetVersionExW():
_GetVersionExW = windll.kernel32.GetVersionExW
_GetVersionExW.argtypes = [POINTER(OSVERSIONINFOEXW)]
_GetVersionExW.restype = bool
_GetVersionExW.errcheck = RaiseIfZero
osi = OSVERSIONINFOEXW()
osi.dwOSVersionInfoSize = sizeof(osi)
try:
_GetVersionExW(byref(osi))
except WindowsError:
osi = OSVERSIONINFOW()
osi.dwOSVersionInfoSize = sizeof(osi)
_GetVersionExW.argtypes = [POINTER(OSVERSIONINFOW)]
_GetVersionExW(byref(osi))
return osi
GetVersionEx = GuessStringType(GetVersionExA, GetVersionExW)
# BOOL WINAPI GetProductInfo(
# __in DWORD dwOSMajorVersion,
# __in DWORD dwOSMinorVersion,
# __in DWORD dwSpMajorVersion,
# __in DWORD dwSpMinorVersion,
# __out PDWORD pdwReturnedProductType
# );
def GetProductInfo(dwOSMajorVersion, dwOSMinorVersion, dwSpMajorVersion, dwSpMinorVersion):
_GetProductInfo = windll.kernel32.GetProductInfo
_GetProductInfo.argtypes = [DWORD, DWORD, DWORD, DWORD, PDWORD]
_GetProductInfo.restype = BOOL
_GetProductInfo.errcheck = RaiseIfZero
dwReturnedProductType = DWORD(0)
_GetProductInfo(dwOSMajorVersion, dwOSMinorVersion, dwSpMajorVersion, dwSpMinorVersion, byref(dwReturnedProductType))
return dwReturnedProductType.value
# BOOL WINAPI VerifyVersionInfo(
# __in LPOSVERSIONINFOEX lpVersionInfo,
# __in DWORD dwTypeMask,
# __in DWORDLONG dwlConditionMask
# );
def VerifyVersionInfo(lpVersionInfo, dwTypeMask, dwlConditionMask):
if isinstance(lpVersionInfo, OSVERSIONINFOEXA):
return VerifyVersionInfoA(lpVersionInfo, dwTypeMask, dwlConditionMask)
if isinstance(lpVersionInfo, OSVERSIONINFOEXW):
return VerifyVersionInfoW(lpVersionInfo, dwTypeMask, dwlConditionMask)
raise TypeError("Bad OSVERSIONINFOEX structure")
def VerifyVersionInfoA(lpVersionInfo, dwTypeMask, dwlConditionMask):
_VerifyVersionInfoA = windll.kernel32.VerifyVersionInfoA
_VerifyVersionInfoA.argtypes = [LPOSVERSIONINFOEXA, DWORD, DWORDLONG]
_VerifyVersionInfoA.restype = bool
return _VerifyVersionInfoA(byref(lpVersionInfo), dwTypeMask, dwlConditionMask)
def VerifyVersionInfoW(lpVersionInfo, dwTypeMask, dwlConditionMask):
_VerifyVersionInfoW = windll.kernel32.VerifyVersionInfoW
_VerifyVersionInfoW.argtypes = [LPOSVERSIONINFOEXW, DWORD, DWORDLONG]
_VerifyVersionInfoW.restype = bool
return _VerifyVersionInfoW(byref(lpVersionInfo), dwTypeMask, dwlConditionMask)
# ULONGLONG WINAPI VerSetConditionMask(
# __in ULONGLONG dwlConditionMask,
# __in DWORD dwTypeBitMask,
# __in BYTE dwConditionMask
# );
def VerSetConditionMask(dwlConditionMask, dwTypeBitMask, dwConditionMask):
_VerSetConditionMask = windll.kernel32.VerSetConditionMask
_VerSetConditionMask.argtypes = [ULONGLONG, DWORD, BYTE]
_VerSetConditionMask.restype = ULONGLONG
return _VerSetConditionMask(dwlConditionMask, dwTypeBitMask, dwConditionMask)
#--- get_bits, get_arch and get_os --------------------------------------------
ARCH_UNKNOWN = "unknown"
ARCH_I386 = "i386"
ARCH_MIPS = "mips"
ARCH_ALPHA = "alpha"
ARCH_PPC = "ppc"
ARCH_SHX = "shx"
ARCH_ARM = "arm"
ARCH_ARM64 = "arm64"
ARCH_THUMB = "thumb"
ARCH_IA64 = "ia64"
ARCH_ALPHA64 = "alpha64"
ARCH_MSIL = "msil"
ARCH_AMD64 = "amd64"
ARCH_SPARC = "sparc"
# aliases
ARCH_IA32 = ARCH_I386
ARCH_X86 = ARCH_I386
ARCH_X64 = ARCH_AMD64
ARCH_ARM7 = ARCH_ARM
ARCH_ARM8 = ARCH_ARM64
ARCH_T32 = ARCH_THUMB
ARCH_AARCH32 = ARCH_ARM7
ARCH_AARCH64 = ARCH_ARM8
ARCH_POWERPC = ARCH_PPC
ARCH_HITACHI = ARCH_SHX
ARCH_ITANIUM = ARCH_IA64
# win32 constants -> our constants
_arch_map = {
PROCESSOR_ARCHITECTURE_INTEL : ARCH_I386,
PROCESSOR_ARCHITECTURE_MIPS : ARCH_MIPS,
PROCESSOR_ARCHITECTURE_ALPHA : ARCH_ALPHA,
PROCESSOR_ARCHITECTURE_PPC : ARCH_PPC,
PROCESSOR_ARCHITECTURE_SHX : ARCH_SHX,
PROCESSOR_ARCHITECTURE_ARM : ARCH_ARM,
PROCESSOR_ARCHITECTURE_IA64 : ARCH_IA64,
PROCESSOR_ARCHITECTURE_ALPHA64 : ARCH_ALPHA64,
PROCESSOR_ARCHITECTURE_MSIL : ARCH_MSIL,
PROCESSOR_ARCHITECTURE_AMD64 : ARCH_AMD64,
PROCESSOR_ARCHITECTURE_SPARC : ARCH_SPARC,
}
OS_UNKNOWN = "Unknown"
OS_NT = "Windows NT"
OS_W2K = "Windows 2000"
OS_XP = "Windows XP"
OS_XP_64 = "Windows XP (64 bits)"
OS_W2K3 = "Windows 2003"
OS_W2K3_64 = "Windows 2003 (64 bits)"
OS_W2K3R2 = "Windows 2003 R2"
OS_W2K3R2_64 = "Windows 2003 R2 (64 bits)"
OS_W2K8 = "Windows 2008"
OS_W2K8_64 = "Windows 2008 (64 bits)"
OS_W2K8R2 = "Windows 2008 R2"
OS_W2K8R2_64 = "Windows 2008 R2 (64 bits)"
OS_VISTA = "Windows Vista"
OS_VISTA_64 = "Windows Vista (64 bits)"
OS_W7 = "Windows 7"
OS_W7_64 = "Windows 7 (64 bits)"
OS_SEVEN = OS_W7
OS_SEVEN_64 = OS_W7_64
OS_WINDOWS_NT = OS_NT
OS_WINDOWS_2000 = OS_W2K
OS_WINDOWS_XP = OS_XP
OS_WINDOWS_XP_64 = OS_XP_64
OS_WINDOWS_2003 = OS_W2K3
OS_WINDOWS_2003_64 = OS_W2K3_64
OS_WINDOWS_2003_R2 = OS_W2K3R2
OS_WINDOWS_2003_R2_64 = OS_W2K3R2_64
OS_WINDOWS_2008 = OS_W2K8
OS_WINDOWS_2008_64 = OS_W2K8_64
OS_WINDOWS_2008_R2 = OS_W2K8R2
OS_WINDOWS_2008_R2_64 = OS_W2K8R2_64
OS_WINDOWS_VISTA = OS_VISTA
OS_WINDOWS_VISTA_64 = OS_VISTA_64
OS_WINDOWS_SEVEN = OS_W7
OS_WINDOWS_SEVEN_64 = OS_W7_64
def _get_bits():
"""
Determines the current integer size in bits.
This is useful to know if we're running in a 32 bits or a 64 bits machine.
@rtype: int
@return: Returns the size of L{SIZE_T} in bits.
"""
return sizeof(SIZE_T) * 8
def _get_arch():
"""
Determines the current processor architecture.
@rtype: str
@return:
On error, returns:
- L{ARCH_UNKNOWN} (C{"unknown"}) meaning the architecture could not be detected or is not known to WinAppDbg.
On success, returns one of the following values:
- L{ARCH_I386} (C{"i386"}) for Intel 32-bit x86 processor or compatible.
- L{ARCH_AMD64} (C{"amd64"}) for Intel 64-bit x86_64 processor or compatible.
May also return one of the following values if you get both Python and
WinAppDbg to work in such machines... let me know if you do! :)
- L{ARCH_MIPS} (C{"mips"}) for MIPS compatible processors.
- L{ARCH_ALPHA} (C{"alpha"}) for Alpha processors.
- L{ARCH_PPC} (C{"ppc"}) for PowerPC compatible processors.
- L{ARCH_SHX} (C{"shx"}) for Hitachi SH processors.
- L{ARCH_ARM} (C{"arm"}) for ARM compatible processors.
- L{ARCH_IA64} (C{"ia64"}) for Intel Itanium processor or compatible.
- L{ARCH_ALPHA64} (C{"alpha64"}) for Alpha64 processors.
- L{ARCH_MSIL} (C{"msil"}) for the .NET virtual machine.
- L{ARCH_SPARC} (C{"sparc"}) for Sun Sparc processors.
Probably IronPython returns C{ARCH_MSIL} but I haven't tried it. Python
on Windows CE and Windows Mobile should return C{ARCH_ARM}. Python on
Solaris using Wine would return C{ARCH_SPARC}. Python in an Itanium
machine should return C{ARCH_IA64} both on Wine and proper Windows.
All other values should only be returned on Linux using Wine.
"""
try:
si = GetNativeSystemInfo()
except Exception:
si = GetSystemInfo()
try:
return _arch_map[si.id.w.wProcessorArchitecture]
except KeyError:
return ARCH_UNKNOWN
def _get_wow64():
"""
Determines if the current process is running in Windows-On-Windows 64 bits.
@rtype: bool
@return: C{True} of the current process is a 32 bit program running in a
64 bit version of Windows, C{False} if it's either a 32 bit program
in a 32 bit Windows or a 64 bit program in a 64 bit Windows.
"""
# Try to determine if the debugger itself is running on WOW64.
# On error assume False.
if bits == 64:
wow64 = False
else:
try:
wow64 = IsWow64Process( GetCurrentProcess() )
except Exception:
wow64 = False
return wow64
def _get_os(osvi = None):
"""
Determines the current operating system.
This function allows you to quickly tell apart major OS differences.
For more detailed information call L{GetVersionEx} instead.
@note:
Wine reports itself as Windows XP 32 bits
(even if the Linux host is 64 bits).
ReactOS may report itself as Windows 2000 or Windows XP,
depending on the version of ReactOS.
@type osvi: L{OSVERSIONINFOEXA}
@param osvi: Optional. The return value from L{GetVersionEx}.
@rtype: str
@return:
One of the following values:
- L{OS_UNKNOWN} (C{"Unknown"})
- L{OS_NT} (C{"Windows NT"})
- L{OS_W2K} (C{"Windows 2000"})
- L{OS_XP} (C{"Windows XP"})
- L{OS_XP_64} (C{"Windows XP (64 bits)"})
- L{OS_W2K3} (C{"Windows 2003"})
- L{OS_W2K3_64} (C{"Windows 2003 (64 bits)"})
- L{OS_W2K3R2} (C{"Windows 2003 R2"})
- L{OS_W2K3R2_64} (C{"Windows 2003 R2 (64 bits)"})
- L{OS_W2K8} (C{"Windows 2008"})
- L{OS_W2K8_64} (C{"Windows 2008 (64 bits)"})
- L{OS_W2K8R2} (C{"Windows 2008 R2"})
- L{OS_W2K8R2_64} (C{"Windows 2008 R2 (64 bits)"})
- L{OS_VISTA} (C{"Windows Vista"})
- L{OS_VISTA_64} (C{"Windows Vista (64 bits)"})
- L{OS_W7} (C{"Windows 7"})
- L{OS_W7_64} (C{"Windows 7 (64 bits)"})
"""
# rough port of http://msdn.microsoft.com/en-us/library/ms724429%28VS.85%29.aspx
if not osvi:
osvi = GetVersionEx()
if osvi.dwPlatformId == VER_PLATFORM_WIN32_NT and osvi.dwMajorVersion > 4:
if osvi.dwMajorVersion == 6:
if osvi.dwMinorVersion == 0:
if osvi.wProductType == VER_NT_WORKSTATION:
if bits == 64 or wow64:
return 'Windows Vista (64 bits)'
return 'Windows Vista'
else:
if bits == 64 or wow64:
return 'Windows 2008 (64 bits)'
return 'Windows 2008'
if osvi.dwMinorVersion == 1:
if osvi.wProductType == VER_NT_WORKSTATION:
if bits == 64 or wow64:
return 'Windows 7 (64 bits)'
return 'Windows 7'
else:
if bits == 64 or wow64:
return 'Windows 2008 R2 (64 bits)'
return 'Windows 2008 R2'
if osvi.dwMajorVersion == 5:
if osvi.dwMinorVersion == 2:
if GetSystemMetrics(SM_SERVERR2):
if bits == 64 or wow64:
return 'Windows 2003 R2 (64 bits)'
return 'Windows 2003 R2'
if osvi.wSuiteMask in (VER_SUITE_STORAGE_SERVER, VER_SUITE_WH_SERVER):
if bits == 64 or wow64:
return 'Windows 2003 (64 bits)'
return 'Windows 2003'
if osvi.wProductType == VER_NT_WORKSTATION and arch == ARCH_AMD64:
return 'Windows XP (64 bits)'
else:
if bits == 64 or wow64:
return 'Windows 2003 (64 bits)'
return 'Windows 2003'
if osvi.dwMinorVersion == 1:
return 'Windows XP'
if osvi.dwMinorVersion == 0:
return 'Windows 2000'
if osvi.dwMajorVersion == 4:
return 'Windows NT'
return 'Unknown'
def _get_ntddi(osvi):
"""
Determines the current operating system.
This function allows you to quickly tell apart major OS differences.
For more detailed information call L{kernel32.GetVersionEx} instead.
@note:
Wine reports itself as Windows XP 32 bits
(even if the Linux host is 64 bits).
ReactOS may report itself as Windows 2000 or Windows XP,
depending on the version of ReactOS.
@type osvi: L{OSVERSIONINFOEXA}
@param osvi: Optional. The return value from L{kernel32.GetVersionEx}.
@rtype: int
@return: NTDDI version number.
"""
if not osvi:
osvi = GetVersionEx()
ntddi = 0
ntddi += (osvi.dwMajorVersion & 0xFF) << 24
ntddi += (osvi.dwMinorVersion & 0xFF) << 16
ntddi += (osvi.wServicePackMajor & 0xFF) << 8
ntddi += (osvi.wServicePackMinor & 0xFF)
return ntddi
# The order of the following definitions DOES matter!
# Current integer size in bits. See L{_get_bits} for more details.
bits = _get_bits()
# Current processor architecture. See L{_get_arch} for more details.
arch = _get_arch()
# Set to C{True} if the current process is running in WOW64. See L{_get_wow64} for more details.
wow64 = _get_wow64()
_osvi = GetVersionEx()
# Current operating system. See L{_get_os} for more details.
os = _get_os(_osvi)
# Current operating system as an NTDDI constant. See L{_get_ntddi} for more details.
NTDDI_VERSION = _get_ntddi(_osvi)
# Upper word of L{NTDDI_VERSION}, contains the OS major and minor version number.
WINVER = NTDDI_VERSION >> 16
#--- version.dll --------------------------------------------------------------
VS_FF_DEBUG = 0x00000001
VS_FF_PRERELEASE = 0x00000002
VS_FF_PATCHED = 0x00000004
VS_FF_PRIVATEBUILD = 0x00000008
VS_FF_INFOINFERRED = 0x00000010
VS_FF_SPECIALBUILD = 0x00000020
VOS_UNKNOWN = 0x00000000
VOS__WINDOWS16 = 0x00000001
VOS__PM16 = 0x00000002
VOS__PM32 = 0x00000003
VOS__WINDOWS32 = 0x00000004
VOS_DOS = 0x00010000
VOS_OS216 = 0x00020000
VOS_OS232 = 0x00030000
VOS_NT = 0x00040000
VOS_DOS_WINDOWS16 = 0x00010001
VOS_DOS_WINDOWS32 = 0x00010004
VOS_NT_WINDOWS32 = 0x00040004
VOS_OS216_PM16 = 0x00020002
VOS_OS232_PM32 = 0x00030003
VFT_UNKNOWN = 0x00000000
VFT_APP = 0x00000001
VFT_DLL = 0x00000002
VFT_DRV = 0x00000003
VFT_FONT = 0x00000004
VFT_VXD = 0x00000005
VFT_RESERVED = 0x00000006 # undocumented
VFT_STATIC_LIB = 0x00000007
VFT2_UNKNOWN = 0x00000000
VFT2_DRV_PRINTER = 0x00000001
VFT2_DRV_KEYBOARD = 0x00000002
VFT2_DRV_LANGUAGE = 0x00000003
VFT2_DRV_DISPLAY = 0x00000004
VFT2_DRV_MOUSE = 0x00000005
VFT2_DRV_NETWORK = 0x00000006
VFT2_DRV_SYSTEM = 0x00000007
VFT2_DRV_INSTALLABLE = 0x00000008
VFT2_DRV_SOUND = 0x00000009
VFT2_DRV_COMM = 0x0000000A
VFT2_DRV_RESERVED = 0x0000000B # undocumented
VFT2_DRV_VERSIONED_PRINTER = 0x0000000C
VFT2_FONT_RASTER = 0x00000001
VFT2_FONT_VECTOR = 0x00000002
VFT2_FONT_TRUETYPE = 0x00000003
# typedef struct tagVS_FIXEDFILEINFO {
# DWORD dwSignature;
# DWORD dwStrucVersion;
# DWORD dwFileVersionMS;
# DWORD dwFileVersionLS;
# DWORD dwProductVersionMS;
# DWORD dwProductVersionLS;
# DWORD dwFileFlagsMask;
# DWORD dwFileFlags;
# DWORD dwFileOS;
# DWORD dwFileType;
# DWORD dwFileSubtype;
# DWORD dwFileDateMS;
# DWORD dwFileDateLS;
# } VS_FIXEDFILEINFO;
class VS_FIXEDFILEINFO(Structure):
_fields_ = [
("dwSignature", DWORD),
("dwStrucVersion", DWORD),
("dwFileVersionMS", DWORD),
("dwFileVersionLS", DWORD),
("dwProductVersionMS", DWORD),
("dwProductVersionLS", DWORD),
("dwFileFlagsMask", DWORD),
("dwFileFlags", DWORD),
("dwFileOS", DWORD),
("dwFileType", DWORD),
("dwFileSubtype", DWORD),
("dwFileDateMS", DWORD),
("dwFileDateLS", DWORD),
]
PVS_FIXEDFILEINFO = POINTER(VS_FIXEDFILEINFO)
LPVS_FIXEDFILEINFO = PVS_FIXEDFILEINFO
# BOOL WINAPI GetFileVersionInfo(
# _In_ LPCTSTR lptstrFilename,
# _Reserved_ DWORD dwHandle,
# _In_ DWORD dwLen,
# _Out_ LPVOID lpData
# );
# DWORD WINAPI GetFileVersionInfoSize(
# _In_ LPCTSTR lptstrFilename,
# _Out_opt_ LPDWORD lpdwHandle
# );
def GetFileVersionInfoA(lptstrFilename):
_GetFileVersionInfoA = windll.version.GetFileVersionInfoA
_GetFileVersionInfoA.argtypes = [LPSTR, DWORD, DWORD, LPVOID]
_GetFileVersionInfoA.restype = bool
_GetFileVersionInfoA.errcheck = RaiseIfZero
_GetFileVersionInfoSizeA = windll.version.GetFileVersionInfoSizeA
_GetFileVersionInfoSizeA.argtypes = [LPSTR, LPVOID]
_GetFileVersionInfoSizeA.restype = DWORD
_GetFileVersionInfoSizeA.errcheck = RaiseIfZero
dwLen = _GetFileVersionInfoSizeA(lptstrFilename, None)
lpData = ctypes.create_string_buffer(dwLen)
_GetFileVersionInfoA(lptstrFilename, 0, dwLen, byref(lpData))
return lpData
def GetFileVersionInfoW(lptstrFilename):
_GetFileVersionInfoW = windll.version.GetFileVersionInfoW
_GetFileVersionInfoW.argtypes = [LPWSTR, DWORD, DWORD, LPVOID]
_GetFileVersionInfoW.restype = bool
_GetFileVersionInfoW.errcheck = RaiseIfZero
_GetFileVersionInfoSizeW = windll.version.GetFileVersionInfoSizeW
_GetFileVersionInfoSizeW.argtypes = [LPWSTR, LPVOID]
_GetFileVersionInfoSizeW.restype = DWORD
_GetFileVersionInfoSizeW.errcheck = RaiseIfZero
dwLen = _GetFileVersionInfoSizeW(lptstrFilename, None)
lpData = ctypes.create_string_buffer(dwLen) # not a string!
_GetFileVersionInfoW(lptstrFilename, 0, dwLen, byref(lpData))
return lpData
GetFileVersionInfo = GuessStringType(GetFileVersionInfoA, GetFileVersionInfoW)
# BOOL WINAPI VerQueryValue(
# _In_ LPCVOID pBlock,
# _In_ LPCTSTR lpSubBlock,
# _Out_ LPVOID *lplpBuffer,
# _Out_ PUINT puLen
# );
def VerQueryValueA(pBlock, lpSubBlock):
_VerQueryValueA = windll.version.VerQueryValueA
_VerQueryValueA.argtypes = [LPVOID, LPSTR, LPVOID, POINTER(UINT)]
_VerQueryValueA.restype = bool
_VerQueryValueA.errcheck = RaiseIfZero
lpBuffer = LPVOID(0)
uLen = UINT(0)
_VerQueryValueA(pBlock, lpSubBlock, byref(lpBuffer), byref(uLen))
return lpBuffer, uLen.value
def VerQueryValueW(pBlock, lpSubBlock):
_VerQueryValueW = windll.version.VerQueryValueW
_VerQueryValueW.argtypes = [LPVOID, LPWSTR, LPVOID, POINTER(UINT)]
_VerQueryValueW.restype = bool
_VerQueryValueW.errcheck = RaiseIfZero
lpBuffer = LPVOID(0)
uLen = UINT(0)
_VerQueryValueW(pBlock, lpSubBlock, byref(lpBuffer), byref(uLen))
return lpBuffer, uLen.value
VerQueryValue = GuessStringType(VerQueryValueA, VerQueryValueW)
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
|
graik/biskit | refs/heads/master | archive_biskit2/scripts/Mod/modelling_example.py | 1 | #!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
from Biskit.Mod import SequenceSearcher
from Biskit.Mod import TemplateSearcher
from Biskit.Mod import TemplateCleaner
from Biskit.Mod import Aligner
from Biskit.Mod import Modeller
import Biskit.Mod.modUtils as modUtils
import Biskit.tools as tools
from Biskit import EHandler
from Biskit import LogFile
import Biskit.Trajectory as Trajectory
import Biskit.Pymoler as Pymoler
import glob
import sys
options = {'q':None,
'o':'.'}
def _use():
print """
Biskit.Mod example script that models a structure from a fasta
formated sequence file in 4 steps:
1) Searches for homologe sequences and clusters the found
sequences to a representative set using NCBI-Tools.
2) Searches for temptale structures for the homology modeling.
Similar structures are removed by clustering.
3) Build a combined sequence/structure alignment using T-Coffee.
4) Build models using Modeller.
Syntax: modelling_example.py -q |query file| -o |outputFolder|
[-h |host| -log -view ]
Options:
-q file; fasta formated sequence file to model
-o folder; directory in which all project files will be
written
-h host name; the quite cpu consuming stasks of aligning
and modeling can be sent to a remote host that also
has access to the output directory
-log write stdOut messages to log file (~project/modelling.log)
-view show the superimposed models in PyMol
HINT: If you want to inspect the alignment used for modeling:
~project/t_coffee/final.score_html
Default options:"""
for key in options.keys():
print "\t-",key, "\t",options[key]
sys.exit(0)
def testOptions():
options = {}
options['q'] = tools.testRoot()+ '/Mod/project/target.fasta'
options['o'] = tools.testRoot()+ '/Mod/project'
options['log'] = '1'
options['view'] = '1'
return options
###########################
# MAIN
###########################
if len(sys.argv) < 3:
_use()
options = tools.cmdDict( options )
#options = testOptions()
outFolder = tools.absfile( options['o'] )
f_target = tools.absfile( options['q'] )
f_target = f_target or outFolder + SequenceSearcher.F_FASTA_TARGET
log = None
if 'log' in options:
log = LogFile( outFolder + '/modelling.log' )
## databases used
seq_db = 'swissprot'
tmp_db = 'pdbaa'
###############
## SequenceSearcher
##
## Find homologues to the target sequence using blast against "seq_db"
## Cluster the seuences and write the result to nr.fasta
## input: target.fasta
##
## output: sequences/all.fasta
## /blast.out
## /cluster_result.out
## /nr.fasta (input for Aligner)
tools.flushPrint('Searching for homologues ...')
try:
# initiate
searcher = SequenceSearcher( outFolder=outFolder, verbose=1, log=log )
# ## local PSIBlast - not fully implemented!!
# searcher.localPSIBlast( target, seq_db, e=0.1, alignments=1000)
## local Blast
searcher.localBlast( f_target, seq_db, 'blastp', alignments=500, e=0.0001 )
## cluster blast results. Defaults: simCut=1.75, lenCut=0.9, ncpu=1
## expects all.fasta
# searcher.clusterFastaIterative( )
searcher.clusterFasta()
searcher.writeFastaClustered()
tools.flushPrint('Done.\n')
except:
EHandler.error( 'Error while searching for homologues.')
###############
## TemplateSearcher
##
## Find modelling templates, blasting the target sequence against "tmp_db"
## Cluster the sequences and download the pdbs to templates/all
## input: target.fasta
##
## output: templates/blast.out
## templates/all.fasta
## templates/cluster_result.out
## templates/nr.fasta (input for Aligner)
## templates/all/*.pdb
## templates/nr/chain_index.txt (input for TemplateCleaner)
## /*.pdb ( " " " )
tools.flushPrint('Searching for templates ...')
try:
searcher = TemplateSearcher( outFolder, verbose=1, log=log )
searcher.localBlast(f_target, tmp_db, 'blastp', alignments=200, e=0.001)
searcher.retrievePDBs()
## expects all.fasta
searcher.clusterFasta( simCut=1.75, lenCut=0.9, ncpu=1 )
searcher.writeFastaClustered()
fn = searcher.saveClustered()
tools.flushPrint('Done.\n')
except:
EHandler.error( 'Error while searching for templates')
###################
## TemplateCleaner
##
## Prepare pdb files in templates/nr for T-coffee and modeller
## (replace nonstandard residues, remove hydrogens,
## remove atoms with multiple configurations, etc.)
## input: templates/nr/*.pdb
## templates/nr/chain_index.txt
##
## output: templates/t_coffee/*.alpha (input for Alignar)
## templates/modeller/*.pdb (input for Modeller)
tools.flushPrint('Cleaning template structures...')
try:
cleaner = TemplateCleaner( outFolder, log=log )
inp_dic = modUtils.parse_tabbed_file( outFolder +
TemplateSearcher.F_NR +
TemplateSearcher.F_CHAIN_INDEX )
cleaner.process_all( inp_dic )
tools.flushPrint('Done.\n')
except:
EHandler.error( 'Error while cleaning templates')
###################
## Aligner
##
## Create a sequence-structure alignment using T-coffee.
## Convert the alignment into Modeller compatible format
## input: sequences/nr.fasta
## templates/templates.fasta
## templates/t_cofee/*.alpha
##
## output: t_coffee/fast_pair.lib
## /final.score_html
## /struct.aln
## /t_coffee.log_*
## /final.aln
## /lalign_id_pair.lib
## /struct.aln_original
## /final.phylip
## /sap_pair.lib
## /t_coffee.inp
## /final.pir_aln (input for Modeller)
## /sap_pair.lib_original
## note 1: If there are more than approximately 50 sequences overall
## t_coffe will eat all the memory and the job will not finish
## This should be fixed in more recent versions of T-Coffee
## (v > 3.2) where T-Coffee, according to the manual "switches
## to a heuristic mode, named DPA, where DPA stands for Double
## Progressive Alignment."
## Remedy: Use clusterFastaIterative() when searching for sequences
## note 2: If there is only one template structure step 2 of T-coffee
## will not work. Solution, skipp the structural alignment if
## only one template structure is provided.
## note 3: In quite som cases the sequence retrieved from the nrpdb
## sequence database is different from the sequence extracted
## from the coordinates in the pdb-file. This will sometimes
## cause t-coffee to terminate with an error (two sequences
## with the same name but with different sequences). Temporary
## solution: Choose another structure from the same cluster
## as the troublemaker.
tools.flushPrint('Creating sequence/structure alignment...')
try:
a = Aligner( outFolder, log=log )
a.align_for_modeller_inp()
if options.has_key('host'):
a.go(options['host'])
else:
a.go( )
tools.flushPrint('Done.\n')
except:
EHandler.error( 'Error while building alingnments.')
###################
## Modeller
##
## Build model using Modeller.
## input: templates/modeller/*.pdb
## t_coffee/final.pir_aln
##
## output: modeller/modeller.log
## /*.B9999000?? <- models
tools.flushPrint('Building model...')
try:
m = Modeller( outFolder, log=log )
r = m.prepare_modeller( )
if options.has_key('host'):
m.go(options['host'])
else:
m.go()
m.postProcess()
tools.flushPrint('Done.\n')
except:
EHandler.error( 'Error while modelling.')
###################
##
## Superimpose the models and look at the result in Pymol
##
def __printMatrix( matrix ):
"""
Print the part right of the diagonal in a matrix
"""
nr = len( matrix )
for i in range(nr): print '%5i'%(i+1),
for i in range(nr):
print '\n%2i'%(i+1),
for k in range(i):
print ' '*5,
for j in range(i, nr):
print '%5.2f'%matrix[i,j],
## get filenames of all models
models = glob.glob( '%s/modeller/%s*.pdb'%(outFolder,
tools.stripFilename(f_target)) )
## create a Trajectory object with the models
traj = Trajectory( pdbs=models )
## fit the models against the average structure iteratively
traj.blockFit2ref()
## calculate and print rmsd matrix
rmsHeavy = traj.pairwiseRmsd()
print '\nHEAVY ATOM RMSD BETWEEN MODELS::'
__printMatrix( rmsHeavy )
## same thing for backbone atoms
BBMask = traj[0].maskBB()
traj.blockFit2ref( mask = BBMask )
rmsBB = traj.pairwiseRmsd( aMask = BBMask )
print '\nBACKBONE RMSD BETWEEN MODELS:'
__printMatrix( rmsBB )
if options.has_key('view'):
## show backbone superimposed structures in pymol
pm = Pymoler( )
for t in traj:
pm.addPdb( t )
pm.show()
|
helicontech/zoo | refs/heads/master | Zoocmd/core/models/__init__.py | 4 | __author__ = 'Helicon Tech'
|
hahalml/bigcouch | refs/heads/master | couchjs/scons/scons-local-2.0.1/SCons/Tool/sunf77.py | 61 | """SCons.Tool.sunf77
Tool-specific initialization for sunf77, the Sun Studio F77 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf77.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf77', 'f77']
def generate(env):
"""Add Builders and construction variables for sunf77 to an Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f77'
env['FORTRAN'] = fcomp
env['F77'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF77'] = '$F77'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF77FLAGS'] = SCons.Util.CLVar('$F77FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
GorK-ChO/selenium | refs/heads/master | py/selenium/webdriver/support/color.py | 45 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
RGB_PATTERN = r"^\s*rgb\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*\)\s*$"
RGB_PCT_PATTERN = r"^\s*rgb\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*\)\s*$"
RGBA_PATTERN = r"^\s*rgba\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
RGBA_PCT_PATTERN = r"^\s*rgba\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
HEX_PATTERN = r"#([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})"
HEX3_PATTERN = r"#([A-Fa-f0-9])([A-Fa-f0-9])([A-Fa-f0-9])"
HSL_PATTERN = r"^\s*hsl\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*\)\s*$"
HSLA_PATTERN = r"^\s*hsla\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
class Color(object):
"""
Color conversion support class
Example:
.. code-block:: python
from selenium.webdriver.support.color import Color
print(Color.from_string('#00ff33').rgba)
print(Color.from_string('rgb(1, 255, 3)').hex)
print(Color.from_string('blue').rgba)
"""
@staticmethod
def from_string(str_):
import re
class Matcher(object):
def __init__(self):
self.match_obj = None
def match(self, pattern, str_):
self.match_obj = re.match(pattern, str_)
return self.match_obj
@property
def groups(self):
return () if self.match_obj is None else self.match_obj.groups()
m = Matcher()
if m.match(RGB_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGB_PCT_PATTERN, str_):
rgb = tuple([float(each) / 100 * 255 for each in m.groups])
return Color(*rgb)
elif m.match(RGBA_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGBA_PCT_PATTERN, str_):
rgba = tuple([float(each) / 100 * 255 for each in m.groups[:3]] + [m.groups[3]])
return Color(*rgba)
elif m.match(HEX_PATTERN, str_):
rgb = tuple([int(each, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HEX3_PATTERN, str_):
rgb = tuple([int(each * 2, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HSL_PATTERN, str_) or m.match(HSLA_PATTERN, str_):
return Color._from_hsl(*m.groups)
elif str_.upper() in Colors.keys():
return Colors[str_.upper()]
else:
raise ValueError("Could not convert %s into color" % str_)
@staticmethod
def _from_hsl(h, s, l, a=1):
h = float(h) / 360
s = float(s) / 100
l = float(l) / 100
if s == 0:
r = l
g = r
b = r
else:
luminocity2 = l * (1 + s) if l < 0.5 else l + s - l * s
luminocity1 = 2 * l - luminocity2
def hue_to_rgb(lum1, lum2, hue):
if hue < 0.0:
hue += 1
if hue > 1.0:
hue -= 1
if hue < 1.0 / 6.0:
return (lum1 + (lum2 - lum1) * 6.0 * hue)
elif hue < 1.0 / 2.0:
return lum2
elif hue < 2.0 / 3.0:
return lum1 + (lum2 - lum1) * ((2.0 / 3.0) - hue) * 6.0
else:
return lum1
r = hue_to_rgb(luminocity1, luminocity2, h + 1.0 / 3.0)
g = hue_to_rgb(luminocity1, luminocity2, h)
b = hue_to_rgb(luminocity1, luminocity2, h - 1.0 / 3.0)
return Color(round(r * 255), round(g * 255), round(b * 255), a)
def __init__(self, red, green, blue, alpha=1):
self.red = int(red)
self.green = int(green)
self.blue = int(blue)
self.alpha = "1" if float(alpha) == 1 else str(float(alpha) or 0)
@property
def rgb(self):
return "rgb(%d, %d, %d)" % (self.red, self.green, self.blue)
@property
def rgba(self):
return "rgba(%d, %d, %d, %s)" % (self.red, self.green, self.blue, self.alpha)
@property
def hex(self):
return "#%02x%02x%02x" % (self.red, self.green, self.blue)
def __eq__(self, other):
if isinstance(other, Color):
return self.rgba == other.rgba
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return hash((self.red, self.green, self.blue, self.alpha))
def __repr__(self):
return "Color(red=%d, green=%d, blue=%d, alpha=%s)" % (self.red, self.green, self.blue, self.alpha)
def __str__(self):
return "Color: %s" % self.rgba
# Basic, extended and transparent colour keywords as defined by the W3C HTML4 spec
# See http://www.w3.org/TR/css3-color/#html4
Colors = {
"TRANSPARENT": Color(0, 0, 0, 0),
"ALICEBLUE": Color(240, 248, 255),
"ANTIQUEWHITE": Color(250, 235, 215),
"AQUA": Color(0, 255, 255),
"AQUAMARINE": Color(127, 255, 212),
"AZURE": Color(240, 255, 255),
"BEIGE": Color(245, 245, 220),
"BISQUE": Color(255, 228, 196),
"BLACK": Color(0, 0, 0),
"BLANCHEDALMOND": Color(255, 235, 205),
"BLUE": Color(0, 0, 255),
"BLUEVIOLET": Color(138, 43, 226),
"BROWN": Color(165, 42, 42),
"BURLYWOOD": Color(222, 184, 135),
"CADETBLUE": Color(95, 158, 160),
"CHARTREUSE": Color(127, 255, 0),
"CHOCOLATE": Color(210, 105, 30),
"CORAL": Color(255, 127, 80),
"CORNFLOWERBLUE": Color(100, 149, 237),
"CORNSILK": Color(255, 248, 220),
"CRIMSON": Color(220, 20, 60),
"CYAN": Color(0, 255, 255),
"DARKBLUE": Color(0, 0, 139),
"DARKCYAN": Color(0, 139, 139),
"DARKGOLDENROD": Color(184, 134, 11),
"DARKGRAY": Color(169, 169, 169),
"DARKGREEN": Color(0, 100, 0),
"DARKGREY": Color(169, 169, 169),
"DARKKHAKI": Color(189, 183, 107),
"DARKMAGENTA": Color(139, 0, 139),
"DARKOLIVEGREEN": Color(85, 107, 47),
"DARKORANGE": Color(255, 140, 0),
"DARKORCHID": Color(153, 50, 204),
"DARKRED": Color(139, 0, 0),
"DARKSALMON": Color(233, 150, 122),
"DARKSEAGREEN": Color(143, 188, 143),
"DARKSLATEBLUE": Color(72, 61, 139),
"DARKSLATEGRAY": Color(47, 79, 79),
"DARKSLATEGREY": Color(47, 79, 79),
"DARKTURQUOISE": Color(0, 206, 209),
"DARKVIOLET": Color(148, 0, 211),
"DEEPPINK": Color(255, 20, 147),
"DEEPSKYBLUE": Color(0, 191, 255),
"DIMGRAY": Color(105, 105, 105),
"DIMGREY": Color(105, 105, 105),
"DODGERBLUE": Color(30, 144, 255),
"FIREBRICK": Color(178, 34, 34),
"FLORALWHITE": Color(255, 250, 240),
"FORESTGREEN": Color(34, 139, 34),
"FUCHSIA": Color(255, 0, 255),
"GAINSBORO": Color(220, 220, 220),
"GHOSTWHITE": Color(248, 248, 255),
"GOLD": Color(255, 215, 0),
"GOLDENROD": Color(218, 165, 32),
"GRAY": Color(128, 128, 128),
"GREY": Color(128, 128, 128),
"GREEN": Color(0, 128, 0),
"GREENYELLOW": Color(173, 255, 47),
"HONEYDEW": Color(240, 255, 240),
"HOTPINK": Color(255, 105, 180),
"INDIANRED": Color(205, 92, 92),
"INDIGO": Color(75, 0, 130),
"IVORY": Color(255, 255, 240),
"KHAKI": Color(240, 230, 140),
"LAVENDER": Color(230, 230, 250),
"LAVENDERBLUSH": Color(255, 240, 245),
"LAWNGREEN": Color(124, 252, 0),
"LEMONCHIFFON": Color(255, 250, 205),
"LIGHTBLUE": Color(173, 216, 230),
"LIGHTCORAL": Color(240, 128, 128),
"LIGHTCYAN": Color(224, 255, 255),
"LIGHTGOLDENRODYELLOW": Color(250, 250, 210),
"LIGHTGRAY": Color(211, 211, 211),
"LIGHTGREEN": Color(144, 238, 144),
"LIGHTGREY": Color(211, 211, 211),
"LIGHTPINK": Color(255, 182, 193),
"LIGHTSALMON": Color(255, 160, 122),
"LIGHTSEAGREEN": Color(32, 178, 170),
"LIGHTSKYBLUE": Color(135, 206, 250),
"LIGHTSLATEGRAY": Color(119, 136, 153),
"LIGHTSLATEGREY": Color(119, 136, 153),
"LIGHTSTEELBLUE": Color(176, 196, 222),
"LIGHTYELLOW": Color(255, 255, 224),
"LIME": Color(0, 255, 0),
"LIMEGREEN": Color(50, 205, 50),
"LINEN": Color(250, 240, 230),
"MAGENTA": Color(255, 0, 255),
"MAROON": Color(128, 0, 0),
"MEDIUMAQUAMARINE": Color(102, 205, 170),
"MEDIUMBLUE": Color(0, 0, 205),
"MEDIUMORCHID": Color(186, 85, 211),
"MEDIUMPURPLE": Color(147, 112, 219),
"MEDIUMSEAGREEN": Color(60, 179, 113),
"MEDIUMSLATEBLUE": Color(123, 104, 238),
"MEDIUMSPRINGGREEN": Color(0, 250, 154),
"MEDIUMTURQUOISE": Color(72, 209, 204),
"MEDIUMVIOLETRED": Color(199, 21, 133),
"MIDNIGHTBLUE": Color(25, 25, 112),
"MINTCREAM": Color(245, 255, 250),
"MISTYROSE": Color(255, 228, 225),
"MOCCASIN": Color(255, 228, 181),
"NAVAJOWHITE": Color(255, 222, 173),
"NAVY": Color(0, 0, 128),
"OLDLACE": Color(253, 245, 230),
"OLIVE": Color(128, 128, 0),
"OLIVEDRAB": Color(107, 142, 35),
"ORANGE": Color(255, 165, 0),
"ORANGERED": Color(255, 69, 0),
"ORCHID": Color(218, 112, 214),
"PALEGOLDENROD": Color(238, 232, 170),
"PALEGREEN": Color(152, 251, 152),
"PALETURQUOISE": Color(175, 238, 238),
"PALEVIOLETRED": Color(219, 112, 147),
"PAPAYAWHIP": Color(255, 239, 213),
"PEACHPUFF": Color(255, 218, 185),
"PERU": Color(205, 133, 63),
"PINK": Color(255, 192, 203),
"PLUM": Color(221, 160, 221),
"POWDERBLUE": Color(176, 224, 230),
"PURPLE": Color(128, 0, 128),
"REBECCAPURPLE": Color(128, 51, 153),
"RED": Color(255, 0, 0),
"ROSYBROWN": Color(188, 143, 143),
"ROYALBLUE": Color(65, 105, 225),
"SADDLEBROWN": Color(139, 69, 19),
"SALMON": Color(250, 128, 114),
"SANDYBROWN": Color(244, 164, 96),
"SEAGREEN": Color(46, 139, 87),
"SEASHELL": Color(255, 245, 238),
"SIENNA": Color(160, 82, 45),
"SILVER": Color(192, 192, 192),
"SKYBLUE": Color(135, 206, 235),
"SLATEBLUE": Color(106, 90, 205),
"SLATEGRAY": Color(112, 128, 144),
"SLATEGREY": Color(112, 128, 144),
"SNOW": Color(255, 250, 250),
"SPRINGGREEN": Color(0, 255, 127),
"STEELBLUE": Color(70, 130, 180),
"TAN": Color(210, 180, 140),
"TEAL": Color(0, 128, 128),
"THISTLE": Color(216, 191, 216),
"TOMATO": Color(255, 99, 71),
"TURQUOISE": Color(64, 224, 208),
"VIOLET": Color(238, 130, 238),
"WHEAT": Color(245, 222, 179),
"WHITE": Color(255, 255, 255),
"WHITESMOKE": Color(245, 245, 245),
"YELLOW": Color(255, 255, 0),
"YELLOWGREEN": Color(154, 205, 50)
}
|
deepmind/dm_construction | refs/heads/master | dm_construction/__init__.py | 1 | #!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DeepMind Construction tasks."""
from dm_construction import _environment_factory
ALL_TASKS = _environment_factory.ALL_TASKS
ALL_WRAPPERS = _environment_factory.ALL_WRAPPERS
get_unity_environment = _environment_factory.get_unity_environment
get_task_environment = _environment_factory.get_task_environment
get_wrapped_environment = _environment_factory.get_wrapped_environment
get_environment = _environment_factory.get_environment
|
balister/GNU-Radio | refs/heads/adap | gr-trellis/examples/python/test_tcm.py | 13 | #!/usr/bin/env python
from gnuradio import gr
from gnuradio import trellis, digital, blocks
from gnuradio import eng_notation
import math
import sys
import random
import fsm_utils
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
def run_test (f,Kb,bitspersymbol,K,dimensionality,constellation,N0,seed):
tb = gr.top_block ()
# TX
#packet = [0]*Kb
#for i in range(Kb-1*16): # last 16 bits = 0 to drive the final state to 0
#packet[i] = random.randint(0, 1) # random 0s and 1s
#src = blocks.vector_source_s(packet,False)
src = blocks.lfsr_32k_source_s()
src_head = blocks.head(gr.sizeof_short,Kb/16) # packet size in shorts
#b2s = blocks.unpacked_to_packed_ss(1,gr.GR_MSB_FIRST) # pack bits in shorts
s2fsmi = blocks.packed_to_unpacked_ss(bitspersymbol,gr.GR_MSB_FIRST) # unpack shorts to symbols compatible with the FSM input cardinality
enc = trellis.encoder_ss(f,0) # initial state = 0
mod = digital.chunks_to_symbols_sf(constellation,dimensionality)
# CHANNEL
add = blocks.add_ff()
noise = analog.noise_source_f(analog.GR_GAUSSIAN,math.sqrt(N0/2),seed)
# RX
metrics = trellis.metrics_f(f.O(),dimensionality,constellation,digital.TRELLIS_EUCLIDEAN) # data preprocessing to generate metrics for Viterbi
va = trellis.viterbi_s(f,K,0,-1) # Put -1 if the Initial/Final states are not set.
fsmi2s = blocks.unpacked_to_packed_ss(bitspersymbol,gr.GR_MSB_FIRST) # pack FSM input symbols to shorts
#s2b = blocks.packed_to_unpacked_ss(1,gr.GR_MSB_FIRST) # unpack shorts to bits
#dst = blocks.vector_sink_s();
dst = blocks.check_lfsr_32k_s()
tb.connect (src,src_head,s2fsmi,enc,mod)
#tb.connect (src,b2s,s2fsmi,enc,mod)
tb.connect (mod,(add,0))
tb.connect (noise,(add,1))
tb.connect (add,metrics)
tb.connect (metrics,va,fsmi2s,dst)
#tb.connect (metrics,va,fsmi2s,s2b,dst)
tb.run()
# A bit of cheating: run the program once and print the
# final encoder state..
# Then put it as the last argument in the viterbi block
#print "final state = " , enc.ST()
ntotal = dst.ntotal ()
nright = dst.nright ()
runlength = dst.runlength ()
#ntotal = len(packet)
#if len(dst.data()) != ntotal:
#print "Error: not enough data\n"
#nright = 0;
#for i in range(ntotal):
#if packet[i]==dst.data()[i]:
#nright=nright+1
#else:
#print "Error in ", i
return (ntotal,ntotal-nright)
def main():
parser = OptionParser(option_class=eng_option)
parser.add_option("-f", "--fsm_file", type="string", default="fsm_files/awgn1o2_4.fsm", help="Filename containing the fsm specification, e.g. -f fsm_files/awgn1o2_4.fsm (default=fsm_files/awgn1o2_4.fsm)")
parser.add_option("-e", "--esn0", type="eng_float", default=10.0, help="Symbol energy to noise PSD level ratio in dB, e.g., -e 10.0 (default=10.0)")
parser.add_option("-r", "--repetitions", type="int", default=100, help="Number of packets to be generated for the simulation, e.g., -r 100 (default=100)")
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
fname=options.fsm_file
esn0_db=float(options.esn0)
rep=int(options.repetitions)
# system parameters
f=trellis.fsm(fname) # get the FSM specification from a file
# alternatively you can specify the fsm from its generator matrix
#f=trellis.fsm(1,2,[5,7])
Kb=1024*16 # packet size in bits (make it multiple of 16 so it can be packed in a short)
bitspersymbol = int(round(math.log(f.I())/math.log(2))) # bits per FSM input symbol
K=Kb/bitspersymbol # packet size in trellis steps
modulation = fsm_utils.psk4 # see fsm_utlis.py for available predefined modulations
dimensionality = modulation[0]
constellation = modulation[1]
if len(constellation)/dimensionality != f.O():
sys.stderr.write ('Incompatible FSM output cardinality and modulation size.\n')
sys.exit (1)
# calculate average symbol energy
Es = 0
for i in range(len(constellation)):
Es = Es + constellation[i]**2
Es = Es / (len(constellation)/dimensionality)
N0=Es/pow(10.0,esn0_db/10.0); # calculate noise variance
tot_s=0 # total number of transmitted shorts
terr_s=0 # total number of shorts in error
terr_p=0 # total number of packets in error
for i in range(rep):
(s,e)=run_test(f,Kb,bitspersymbol,K,dimensionality,constellation,N0,-long(666+i)) # run experiment with different seed to get different noise realizations
tot_s=tot_s+s
terr_s=terr_s+e
terr_p=terr_p+(terr_s!=0)
if ((i+1)%100==0) : # display progress
print i+1,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
# estimate of the (short or bit) error rate
print rep,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
if __name__ == '__main__':
main()
|
vcpe-io/vcpe-hub | refs/heads/master | qos/rest_member_register.py | 2 | """Project for Rest API (Group Setting)."""
import json
from ryu.base import app_manager
from ryu.app.wsgi import ControllerBase, WSGIApplication, route
from webob import Response
from setting.db import data_collection
from setting.db import collection
from route import urls
url = '/handle_member_info/member/{memberid}'
get_member_info_instance_name = 'get_member_info_api_app'
class member_register(app_manager.RyuApp):
"""Get_Member_Info class."""
_CONTEXTS = {'wsgi': WSGIApplication}
def __init__(self, *args, **kwargs):
"""Initial Setting method."""
super(member_register, self).__init__(*args, **kwargs)
self.switches = {}
wsgi = kwargs['wsgi']
wsgi.register(member_register_rest,
{get_member_info_instance_name: self})
def save_member_to_database(self, memberid, groupid):
"""Save Member data to database method."""
if data_collection.member_list.get(memberid) is None:
print 1
member = collection.Member(memberid, groupid)
data_collection.member_list.update({memberid: member})
data_collection.group_list.get(groupid).members.append(memberid)
print member.name, member.group_id
else:
print 2
member = data_collection.member_list.get(memberid)
member.name = memberid
if member.group_id != groupid:
if member.group_id != "whole":
data_collection.group_list.get(member.group_id).members.remove(memberid)
member.group_id = groupid
data_collection.group_list.get(groupid).members.append(memberid)
print member.name, member.group_id
# curl -X PUT -d '{"group_id" : "group_1"}'
# http://127.0.0.1:8080/handle_member_info/member/user1
class member_register_rest(ControllerBase):
"""Get_Member_Info_Rest class."""
def __init__(self, req, link, data, **config):
"""Initial Setting method."""
super(member_register_rest, self).__init__(req, link, data, **config)
self.get_member_info = data[get_member_info_instance_name]
@route('member_data', url, methods=['PUT'])
def put_member_data_(self, req, **kwargs):
"""Put Member data method."""
memberid = str(kwargs['memberid'])
group_id = req.body
json_link = json.loads(group_id)
groupid = str(json_link.get('group_id'))
if data_collection.group_list.get(groupid) is None:
return Response(status=404, body=str('not ok'))
else:
self.get_member_info.save_member_to_database(memberid, groupid)
return Response(content_type='application/json',
body=str('Success'))
@route('member_list', urls.url_member_list, methods=['GET'])
def get_member_list(self, req, **kwargs):
dic = {}
for key in data_collection.member_list.keys():
member_info = {}
member_data = data_collection.member_list[key]
member_info.update({'Hostname': member_data.hostname})
member_info.update({'IP': member_data.ip})
member_info.update({'MAC': key})
member_info.update({'Group': member_data.group_id})
dic.update({key: member_info})
body = json.dumps(dic)
return Response(content_type='application/json', body=body)
|
illicitonion/givabit | refs/heads/master | lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_2/tests/regressiontests/datatypes/models.py | 92 | """
This is a basic model to test saving and loading boolean and date-related
types, which in the past were problematic for some database backends.
"""
from django.db import models
class Donut(models.Model):
name = models.CharField(max_length=100)
is_frosted = models.BooleanField(default=False)
has_sprinkles = models.NullBooleanField()
baked_date = models.DateField(null=True)
baked_time = models.TimeField(null=True)
consumed_at = models.DateTimeField(null=True)
review = models.TextField()
class Meta:
ordering = ('consumed_at',)
def __str__(self):
return self.name
class RumBaba(models.Model):
baked_date = models.DateField(auto_now_add=True)
baked_timestamp = models.DateTimeField(auto_now_add=True)
|
Pr0jectX/O2 | refs/heads/master | opos/admin.py | 2 | from django.contrib import admin
from opos.models import ( Customers )
class CustomerAdmin (admin.ModelAdmin):
list_display = ('name', 'maxdebt', 'curdebt', )
fields = ('name', 'card', 'id', 'searchkey', ('maxdebt', 'curdebt',), )
readonly_fields = ('curdebt',)
pass
admin.site.register (Customers, CustomerAdmin)
|
amith01994/intellij-community | refs/heads/master | python/testData/formatter/commentAfterBlock_after.py | 79 | def test():
if bar:
foo()
# comment
foobar()
|
klmitch/keystone | refs/heads/master | keystone/tests/unit/catalog/test_backends.py | 2 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
import mock
from six.moves import range
from testtools import matchers
from keystone.catalog import core
from keystone.common import driver_hints
from keystone import exception
from keystone.tests import unit
class CatalogTests(object):
_legacy_endpoint_id_in_endpoint = True
_enabled_default_to_true_when_creating_endpoint = False
def test_region_crud(self):
# create
region_id = '0' * 255
new_region = unit.new_region_ref(id=region_id)
res = self.catalog_api.create_region(new_region)
# Ensure that we don't need to have a
# parent_region_id in the original supplied
# ref dict, but that it will be returned from
# the endpoint, with None value.
expected_region = new_region.copy()
expected_region['parent_region_id'] = None
self.assertDictEqual(expected_region, res)
# Test adding another region with the one above
# as its parent. We will check below whether deleting
# the parent successfully deletes any child regions.
parent_region_id = region_id
new_region = unit.new_region_ref(parent_region_id=parent_region_id)
region_id = new_region['id']
res = self.catalog_api.create_region(new_region)
self.assertDictEqual(new_region, res)
# list
regions = self.catalog_api.list_regions()
self.assertThat(regions, matchers.HasLength(2))
region_ids = [x['id'] for x in regions]
self.assertIn(parent_region_id, region_ids)
self.assertIn(region_id, region_ids)
# update
region_desc_update = {'description': uuid.uuid4().hex}
res = self.catalog_api.update_region(region_id, region_desc_update)
expected_region = new_region.copy()
expected_region['description'] = region_desc_update['description']
self.assertDictEqual(expected_region, res)
# delete
self.catalog_api.delete_region(parent_region_id)
self.assertRaises(exception.RegionNotFound,
self.catalog_api.delete_region,
parent_region_id)
self.assertRaises(exception.RegionNotFound,
self.catalog_api.get_region,
parent_region_id)
# Ensure the child is also gone...
self.assertRaises(exception.RegionNotFound,
self.catalog_api.get_region,
region_id)
def _create_region_with_parent_id(self, parent_id=None):
new_region = unit.new_region_ref(parent_region_id=parent_id)
self.catalog_api.create_region(new_region)
return new_region
def test_list_regions_filtered_by_parent_region_id(self):
new_region = self._create_region_with_parent_id()
parent_id = new_region['id']
new_region = self._create_region_with_parent_id(parent_id)
new_region = self._create_region_with_parent_id(parent_id)
# filter by parent_region_id
hints = driver_hints.Hints()
hints.add_filter('parent_region_id', parent_id)
regions = self.catalog_api.list_regions(hints)
for region in regions:
self.assertEqual(parent_id, region['parent_region_id'])
@unit.skip_if_cache_disabled('catalog')
def test_cache_layer_region_crud(self):
new_region = unit.new_region_ref()
region_id = new_region['id']
self.catalog_api.create_region(new_region.copy())
updated_region = copy.deepcopy(new_region)
updated_region['description'] = uuid.uuid4().hex
# cache the result
self.catalog_api.get_region(region_id)
# update the region bypassing catalog_api
self.catalog_api.driver.update_region(region_id, updated_region)
self.assertDictContainsSubset(new_region,
self.catalog_api.get_region(region_id))
self.catalog_api.get_region.invalidate(self.catalog_api, region_id)
self.assertDictContainsSubset(updated_region,
self.catalog_api.get_region(region_id))
# delete the region
self.catalog_api.driver.delete_region(region_id)
# still get the old region
self.assertDictContainsSubset(updated_region,
self.catalog_api.get_region(region_id))
self.catalog_api.get_region.invalidate(self.catalog_api, region_id)
self.assertRaises(exception.RegionNotFound,
self.catalog_api.get_region, region_id)
@unit.skip_if_cache_disabled('catalog')
def test_invalidate_cache_when_updating_region(self):
new_region = unit.new_region_ref()
region_id = new_region['id']
self.catalog_api.create_region(new_region)
# cache the region
self.catalog_api.get_region(region_id)
# update the region via catalog_api
new_description = {'description': uuid.uuid4().hex}
self.catalog_api.update_region(region_id, new_description)
# assert that we can get the new region
current_region = self.catalog_api.get_region(region_id)
self.assertEqual(new_description['description'],
current_region['description'])
def test_create_region_with_duplicate_id(self):
new_region = unit.new_region_ref()
self.catalog_api.create_region(new_region)
# Create region again with duplicate id
self.assertRaises(exception.Conflict,
self.catalog_api.create_region,
new_region)
def test_get_region_returns_not_found(self):
self.assertRaises(exception.RegionNotFound,
self.catalog_api.get_region,
uuid.uuid4().hex)
def test_delete_region_returns_not_found(self):
self.assertRaises(exception.RegionNotFound,
self.catalog_api.delete_region,
uuid.uuid4().hex)
def test_create_region_invalid_parent_region_returns_not_found(self):
new_region = unit.new_region_ref(parent_region_id='nonexisting')
self.assertRaises(exception.RegionNotFound,
self.catalog_api.create_region,
new_region)
def test_avoid_creating_circular_references_in_regions_update(self):
region_one = self._create_region_with_parent_id()
# self circle: region_one->region_one
self.assertRaises(exception.CircularRegionHierarchyError,
self.catalog_api.update_region,
region_one['id'],
{'parent_region_id': region_one['id']})
# region_one->region_two->region_one
region_two = self._create_region_with_parent_id(region_one['id'])
self.assertRaises(exception.CircularRegionHierarchyError,
self.catalog_api.update_region,
region_one['id'],
{'parent_region_id': region_two['id']})
# region_one region_two->region_three->region_four->region_two
region_three = self._create_region_with_parent_id(region_two['id'])
region_four = self._create_region_with_parent_id(region_three['id'])
self.assertRaises(exception.CircularRegionHierarchyError,
self.catalog_api.update_region,
region_two['id'],
{'parent_region_id': region_four['id']})
@mock.patch.object(core.CatalogDriverV8,
"_ensure_no_circle_in_hierarchical_regions")
def test_circular_regions_can_be_deleted(self, mock_ensure_on_circle):
# turn off the enforcement so that cycles can be created for the test
mock_ensure_on_circle.return_value = None
region_one = self._create_region_with_parent_id()
# self circle: region_one->region_one
self.catalog_api.update_region(
region_one['id'],
{'parent_region_id': region_one['id']})
self.catalog_api.delete_region(region_one['id'])
self.assertRaises(exception.RegionNotFound,
self.catalog_api.get_region,
region_one['id'])
# region_one->region_two->region_one
region_one = self._create_region_with_parent_id()
region_two = self._create_region_with_parent_id(region_one['id'])
self.catalog_api.update_region(
region_one['id'],
{'parent_region_id': region_two['id']})
self.catalog_api.delete_region(region_one['id'])
self.assertRaises(exception.RegionNotFound,
self.catalog_api.get_region,
region_one['id'])
self.assertRaises(exception.RegionNotFound,
self.catalog_api.get_region,
region_two['id'])
# region_one->region_two->region_three->region_one
region_one = self._create_region_with_parent_id()
region_two = self._create_region_with_parent_id(region_one['id'])
region_three = self._create_region_with_parent_id(region_two['id'])
self.catalog_api.update_region(
region_one['id'],
{'parent_region_id': region_three['id']})
self.catalog_api.delete_region(region_two['id'])
self.assertRaises(exception.RegionNotFound,
self.catalog_api.get_region,
region_two['id'])
self.assertRaises(exception.RegionNotFound,
self.catalog_api.get_region,
region_one['id'])
self.assertRaises(exception.RegionNotFound,
self.catalog_api.get_region,
region_three['id'])
def test_service_crud(self):
# create
new_service = unit.new_service_ref()
service_id = new_service['id']
res = self.catalog_api.create_service(service_id, new_service)
self.assertDictEqual(new_service, res)
# list
services = self.catalog_api.list_services()
self.assertIn(service_id, [x['id'] for x in services])
# update
service_name_update = {'name': uuid.uuid4().hex}
res = self.catalog_api.update_service(service_id, service_name_update)
expected_service = new_service.copy()
expected_service['name'] = service_name_update['name']
self.assertDictEqual(expected_service, res)
# delete
self.catalog_api.delete_service(service_id)
self.assertRaises(exception.ServiceNotFound,
self.catalog_api.delete_service,
service_id)
self.assertRaises(exception.ServiceNotFound,
self.catalog_api.get_service,
service_id)
def _create_random_service(self):
new_service = unit.new_service_ref()
service_id = new_service['id']
return self.catalog_api.create_service(service_id, new_service)
def test_service_filtering(self):
target_service = self._create_random_service()
unrelated_service1 = self._create_random_service()
unrelated_service2 = self._create_random_service()
# filter by type
hint_for_type = driver_hints.Hints()
hint_for_type.add_filter(name="type", value=target_service['type'])
services = self.catalog_api.list_services(hint_for_type)
self.assertEqual(1, len(services))
filtered_service = services[0]
self.assertEqual(target_service['type'], filtered_service['type'])
self.assertEqual(target_service['id'], filtered_service['id'])
# filter should have been removed, since it was already used by the
# backend
self.assertEqual(0, len(hint_for_type.filters))
# the backend shouldn't filter by name, since this is handled by the
# front end
hint_for_name = driver_hints.Hints()
hint_for_name.add_filter(name="name", value=target_service['name'])
services = self.catalog_api.list_services(hint_for_name)
self.assertEqual(3, len(services))
# filter should still be there, since it wasn't used by the backend
self.assertEqual(1, len(hint_for_name.filters))
self.catalog_api.delete_service(target_service['id'])
self.catalog_api.delete_service(unrelated_service1['id'])
self.catalog_api.delete_service(unrelated_service2['id'])
@unit.skip_if_cache_disabled('catalog')
def test_cache_layer_service_crud(self):
new_service = unit.new_service_ref()
service_id = new_service['id']
res = self.catalog_api.create_service(service_id, new_service)
self.assertDictEqual(new_service, res)
self.catalog_api.get_service(service_id)
updated_service = copy.deepcopy(new_service)
updated_service['description'] = uuid.uuid4().hex
# update bypassing catalog api
self.catalog_api.driver.update_service(service_id, updated_service)
self.assertDictContainsSubset(new_service,
self.catalog_api.get_service(service_id))
self.catalog_api.get_service.invalidate(self.catalog_api, service_id)
self.assertDictContainsSubset(updated_service,
self.catalog_api.get_service(service_id))
# delete bypassing catalog api
self.catalog_api.driver.delete_service(service_id)
self.assertDictContainsSubset(updated_service,
self.catalog_api.get_service(service_id))
self.catalog_api.get_service.invalidate(self.catalog_api, service_id)
self.assertRaises(exception.ServiceNotFound,
self.catalog_api.delete_service,
service_id)
self.assertRaises(exception.ServiceNotFound,
self.catalog_api.get_service,
service_id)
@unit.skip_if_cache_disabled('catalog')
def test_invalidate_cache_when_updating_service(self):
new_service = unit.new_service_ref()
service_id = new_service['id']
self.catalog_api.create_service(service_id, new_service)
# cache the service
self.catalog_api.get_service(service_id)
# update the service via catalog api
new_type = {'type': uuid.uuid4().hex}
self.catalog_api.update_service(service_id, new_type)
# assert that we can get the new service
current_service = self.catalog_api.get_service(service_id)
self.assertEqual(new_type['type'], current_service['type'])
def test_delete_service_with_endpoint(self):
# create a service
service = unit.new_service_ref()
self.catalog_api.create_service(service['id'], service)
# create an endpoint attached to the service
endpoint = unit.new_endpoint_ref(service_id=service['id'],
region_id=None)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
# deleting the service should also delete the endpoint
self.catalog_api.delete_service(service['id'])
self.assertRaises(exception.EndpointNotFound,
self.catalog_api.get_endpoint,
endpoint['id'])
self.assertRaises(exception.EndpointNotFound,
self.catalog_api.delete_endpoint,
endpoint['id'])
def test_cache_layer_delete_service_with_endpoint(self):
service = unit.new_service_ref()
self.catalog_api.create_service(service['id'], service)
# create an endpoint attached to the service
endpoint = unit.new_endpoint_ref(service_id=service['id'],
region_id=None)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
# cache the result
self.catalog_api.get_service(service['id'])
self.catalog_api.get_endpoint(endpoint['id'])
# delete the service bypassing catalog api
self.catalog_api.driver.delete_service(service['id'])
self.assertDictContainsSubset(endpoint,
self.catalog_api.
get_endpoint(endpoint['id']))
self.assertDictContainsSubset(service,
self.catalog_api.
get_service(service['id']))
self.catalog_api.get_endpoint.invalidate(self.catalog_api,
endpoint['id'])
self.assertRaises(exception.EndpointNotFound,
self.catalog_api.get_endpoint,
endpoint['id'])
self.assertRaises(exception.EndpointNotFound,
self.catalog_api.delete_endpoint,
endpoint['id'])
# multiple endpoints associated with a service
second_endpoint = unit.new_endpoint_ref(service_id=service['id'],
region_id=None)
self.catalog_api.create_service(service['id'], service)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
self.catalog_api.create_endpoint(second_endpoint['id'],
second_endpoint)
self.catalog_api.delete_service(service['id'])
self.assertRaises(exception.EndpointNotFound,
self.catalog_api.get_endpoint,
endpoint['id'])
self.assertRaises(exception.EndpointNotFound,
self.catalog_api.delete_endpoint,
endpoint['id'])
self.assertRaises(exception.EndpointNotFound,
self.catalog_api.get_endpoint,
second_endpoint['id'])
self.assertRaises(exception.EndpointNotFound,
self.catalog_api.delete_endpoint,
second_endpoint['id'])
def test_get_service_returns_not_found(self):
self.assertRaises(exception.ServiceNotFound,
self.catalog_api.get_service,
uuid.uuid4().hex)
def test_delete_service_returns_not_found(self):
self.assertRaises(exception.ServiceNotFound,
self.catalog_api.delete_service,
uuid.uuid4().hex)
def test_create_endpoint_nonexistent_service(self):
endpoint = unit.new_endpoint_ref(service_id=uuid.uuid4().hex,
region_id=None)
self.assertRaises(exception.ValidationError,
self.catalog_api.create_endpoint,
endpoint['id'],
endpoint)
def test_update_endpoint_nonexistent_service(self):
dummy_service, enabled_endpoint, dummy_disabled_endpoint = (
self._create_endpoints())
new_endpoint = unit.new_endpoint_ref(service_id=uuid.uuid4().hex)
self.assertRaises(exception.ValidationError,
self.catalog_api.update_endpoint,
enabled_endpoint['id'],
new_endpoint)
def test_create_endpoint_nonexistent_region(self):
service = unit.new_service_ref()
self.catalog_api.create_service(service['id'], service)
endpoint = unit.new_endpoint_ref(service_id=service['id'])
self.assertRaises(exception.ValidationError,
self.catalog_api.create_endpoint,
endpoint['id'],
endpoint)
def test_update_endpoint_nonexistent_region(self):
dummy_service, enabled_endpoint, dummy_disabled_endpoint = (
self._create_endpoints())
new_endpoint = unit.new_endpoint_ref(service_id=uuid.uuid4().hex)
self.assertRaises(exception.ValidationError,
self.catalog_api.update_endpoint,
enabled_endpoint['id'],
new_endpoint)
def test_get_endpoint_returns_not_found(self):
self.assertRaises(exception.EndpointNotFound,
self.catalog_api.get_endpoint,
uuid.uuid4().hex)
def test_delete_endpoint_returns_not_found(self):
self.assertRaises(exception.EndpointNotFound,
self.catalog_api.delete_endpoint,
uuid.uuid4().hex)
def test_create_endpoint(self):
service = unit.new_service_ref()
self.catalog_api.create_service(service['id'], service)
endpoint = unit.new_endpoint_ref(service_id=service['id'],
region_id=None)
self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
def test_update_endpoint(self):
dummy_service_ref, endpoint_ref, dummy_disabled_endpoint_ref = (
self._create_endpoints())
res = self.catalog_api.update_endpoint(endpoint_ref['id'],
{'interface': 'private'})
expected_endpoint = endpoint_ref.copy()
expected_endpoint['enabled'] = True
expected_endpoint['interface'] = 'private'
if self._legacy_endpoint_id_in_endpoint:
expected_endpoint['legacy_endpoint_id'] = None
if self._enabled_default_to_true_when_creating_endpoint:
expected_endpoint['enabled'] = True
self.assertDictEqual(expected_endpoint, res)
def _create_endpoints(self):
# Creates a service and 2 endpoints for the service in the same region.
# The 'public' interface is enabled and the 'internal' interface is
# disabled.
def create_endpoint(service_id, region, **kwargs):
ref = unit.new_endpoint_ref(
service_id=service_id,
region_id=region,
url='http://localhost/%s' % uuid.uuid4().hex,
**kwargs)
self.catalog_api.create_endpoint(ref['id'], ref)
return ref
# Create a service for use with the endpoints.
service_ref = unit.new_service_ref()
service_id = service_ref['id']
self.catalog_api.create_service(service_id, service_ref)
region = unit.new_region_ref()
self.catalog_api.create_region(region)
# Create endpoints
enabled_endpoint_ref = create_endpoint(service_id, region['id'])
disabled_endpoint_ref = create_endpoint(
service_id, region['id'], enabled=False, interface='internal')
return service_ref, enabled_endpoint_ref, disabled_endpoint_ref
def test_list_endpoints(self):
service = unit.new_service_ref()
self.catalog_api.create_service(service['id'], service)
expected_ids = set([uuid.uuid4().hex for _ in range(3)])
for endpoint_id in expected_ids:
endpoint = unit.new_endpoint_ref(service_id=service['id'],
id=endpoint_id,
region_id=None)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
endpoints = self.catalog_api.list_endpoints()
self.assertEqual(expected_ids, set(e['id'] for e in endpoints))
def test_get_catalog_endpoint_disabled(self):
"""Get back only enabled endpoints when get the v2 catalog."""
service_ref, enabled_endpoint_ref, dummy_disabled_endpoint_ref = (
self._create_endpoints())
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
catalog = self.catalog_api.get_catalog(user_id, project_id)
exp_entry = {
'id': enabled_endpoint_ref['id'],
'name': service_ref['name'],
'publicURL': enabled_endpoint_ref['url'],
}
region = enabled_endpoint_ref['region_id']
self.assertEqual(exp_entry, catalog[region][service_ref['type']])
def test_get_v3_catalog_endpoint_disabled(self):
"""Get back only enabled endpoints when get the v3 catalog."""
enabled_endpoint_ref = self._create_endpoints()[1]
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
catalog = self.catalog_api.get_v3_catalog(user_id, project_id)
endpoint_ids = [x['id'] for x in catalog[0]['endpoints']]
self.assertEqual([enabled_endpoint_ref['id']], endpoint_ids)
@unit.skip_if_cache_disabled('catalog')
def test_invalidate_cache_when_updating_endpoint(self):
service = unit.new_service_ref()
self.catalog_api.create_service(service['id'], service)
# create an endpoint attached to the service
endpoint = unit.new_endpoint_ref(service_id=service['id'],
region_id=None)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
# cache the endpoint
self.catalog_api.get_endpoint(endpoint['id'])
# update the endpoint via catalog api
new_url = {'url': uuid.uuid4().hex}
self.catalog_api.update_endpoint(endpoint['id'], new_url)
# assert that we can get the new endpoint
current_endpoint = self.catalog_api.get_endpoint(endpoint['id'])
self.assertEqual(new_url['url'], current_endpoint['url'])
|
HRZaheri/sklearn-theano | refs/heads/master | sklearn_theano/sandbox/overfeat_wrapper.py | 9 | import numpy as np
import Image
import sys
import os
import StringIO
import subprocess
def get_overfeat_dir(overfeat_dir=None):
if overfeat_dir is None:
overfeat_dir = os.environ.get("OVERFEAT_DIR", None)
return overfeat_dir
def get_overfeat_cmd(overfeat_cmd=None, overfeat_dir=None, architecture=None):
if overfeat_cmd is None:
overfeat_cmd = os.environ.get("OVERFEAT_CMD", None)
if overfeat_cmd is None:
overfeat_dir = get_overfeat_dir(overfeat_dir)
if overfeat_dir is None or architecture is None:
raise Exception(
'Please set the environment variable OVERFEAT_CMD'
' to point to the file /path/to/overfeat/bin/(system'
')/overfeatcmd, or set OVERFEAT_DIR and provide architecture')
overfeat_cmd = os.path.join(overfeat_dir, "bin", architecture,
"overfeatcmd")
return overfeat_cmd
def get_net_weight_dir(net_weight_dir=None, overfeat_dir=None):
if net_weight_dir is None:
net_weight_dir = os.environ.get("OVERFEAT_NET_WEIGHT_DIR", None)
if net_weight_dir is None:
overfeat_dir = get_overfeat_dir(overfeat_dir)
if overfeat_dir is not None:
net_weight_dir = os.path.join(overfeat_dir, "data/default/")
else:
raise Exception("Please provide net_weight_dir or set "
"OVERFEAT_NET_WEIGHT_DIR or OVERFEAT_DIR")
return net_weight_dir
def get_net_weights(net_weight_file=None, large_net=None,
net_weight_dir=None, overfeat_dir=None):
if net_weight_file is None:
net_weight_file = os.environ.get("OVERFEAT_NET_WEIGHT_FILE", None)
if net_weight_file is None:
net_weight_dir = get_net_weight_dir(net_weight_dir, overfeat_dir)
if large_net is not None:
net_weight_file = os.path.join(net_weight_dir, "net_weight_%d"
% int(large_net))
else:
raise Exception("Please specify large_net=0/1/False/True")
return net_weight_file
def get_overfeat_output_raw(img_arr, layer_id, largenet, overfeatcmd=None,
net_weight_file=None, overfeat_dir=None,
architecture='linux_64'):
if img_arr.dtype != np.uint8:
raise ValueError('Please convert image to uint8')
if img_arr.shape[2] != 3:
raise ValueError('Last dimension must index color')
overfeatcmd = get_overfeat_cmd(overfeatcmd, overfeat_dir, architecture)
net_weight_file = get_net_weights(net_weight_file, largenet,
overfeat_dir=overfeat_dir)
image = Image.fromarray(img_arr)
buf = StringIO.StringIO()
image.save(buf, format='ppm')
buf.seek(0)
command = overfeatcmd + " " + net_weight_file + " -1 %d %d" % (
int(largenet), layer_id)
p = subprocess.Popen(
command.split(' '), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = p.communicate(input=buf.buf)[0]
return output
def _parse_overfeat_output(output):
buf = StringIO.StringIO(output)
shape_ = buf.readline()
content_ = buf.readline()
shape = map(int, shape_.strip().split(" "))
content = [map(float, content_.strip().split(" "))]
return np.array(content).reshape(shape)
def get_output(img_arr, layer_id, largenet, overfeatcmd=None,
net_weight_file=None, overfeat_dir=None,
architecture='linux_64'):
output = get_overfeat_output_raw(img_arr, layer_id, largenet,
overfeatcmd, net_weight_file,
overfeat_dir, architecture)
return _parse_overfeat_output(output)
|
Rambatino/CHAID | refs/heads/master | setup.py | 1 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/Rambatino/CHAID
"""
import re
from os import path
from setuptools import setup, find_packages
def get_version():
"""
Read version from __init__.py
"""
version_regex = re.compile(
'__version__\\s*=\\s*(?P<q>[\'"])(?P<version>\\d+(\\.\\d+)*(-(alpha|beta|rc)(\\.\\d+)?)?)(?P=q)'
)
here = path.abspath(path.dirname(__file__))
init_location = path.join(here, "CHAID/__init__.py")
with open(init_location) as init_file:
for line in init_file:
match = version_regex.search(line)
if not match:
raise Exception(
"Couldn't read version information from '{0}'".format(init_location)
)
return match.group('version')
setup(
name='CHAID',
version=get_version(),
description='A CHAID tree building algorithm',
long_description="This package provides a python implementation of the Chi-Squared Automatic Inference Detection (CHAID) decision tree",
url='https://github.com/Rambatino/CHAID',
author='Mark Ramotowski, Richard Fitzgerald',
author_email='mark.tint.ramotowski@gmail.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='CHAID pandas numpy scipy statistics statistical analysis',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'cython',
'numpy',
'pandas',
'treelib',
'pytest',
'scipy',
'savReaderWriter',
'graphviz',
'plotly',
'colorlover',
'enum34; python_version == "2.7"'
],
extras_require={
'test': ['codecov', 'tox', 'tox-pyenv', 'detox', 'pytest', 'pytest-cov', 'psutil'],
}
)
|
LiveZenLK/CeygateERP | refs/heads/master | openerp/report/interface.py | 49 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import os
import re
from lxml import etree
import openerp
import openerp.tools as tools
import openerp.modules
import print_xml
import render
import urllib
from openerp import SUPERUSER_ID
from openerp.report.render.rml2pdf import customfonts
#
# coerce any type to a unicode string (to preserve non-ascii characters)
# and escape XML entities
#
def toxml(value):
unicode_value = tools.ustr(value)
return unicode_value.replace('&', '&').replace('<','<').replace('>','>')
class report_int(object):
_reports = {}
def __init__(self, name, register=True):
if register:
assert openerp.conf.deprecation.allow_report_int_registration
assert name.startswith('report.'), 'Report names should start with "report.".'
assert name not in self._reports, 'The report "%s" already exists.' % name
self._reports[name] = self
else:
# The report is instanciated at each use site, which is ok.
pass
self.__name = name
self.name = name
self.id = 0
self.name2 = '.'.join(name.split('.')[1:])
# TODO the reports have methods with a 'title' kwarg that is redundant with this attribute
self.title = None
def create(self, cr, uid, ids, datas, context=None):
return False
class report_rml(report_int):
"""
Automatically builds a document using the transformation process:
XML -> DATAS -> RML -> PDF -> HTML
using a XSL:RML transformation
"""
def __init__(self, name, table, tmpl, xsl, register=True):
super(report_rml, self).__init__(name, register=register)
self.table = table
self.internal_header=False
self.tmpl = tmpl
self.xsl = xsl
self.bin_datas = {}
self.generators = {
'pdf': self.create_pdf,
'html': self.create_html,
'raw': self.create_raw,
'sxw': self.create_sxw,
'txt': self.create_txt,
'odt': self.create_odt,
'html2html' : self.create_html2html,
'makohtml2html' :self.create_makohtml2html,
}
def create(self, cr, uid, ids, datas, context):
registry = openerp.registry(cr.dbname)
xml = self.create_xml(cr, uid, ids, datas, context)
xml = tools.ustr(xml).encode('utf8')
report_type = datas.get('report_type', 'pdf')
if report_type == 'raw':
return xml, report_type
registry['res.font'].font_scan(cr, SUPERUSER_ID, lazy=True, context=context)
rml = self.create_rml(cr, xml, uid, context)
ir_actions_report_xml_obj = registry['ir.actions.report.xml']
report_xml_ids = ir_actions_report_xml_obj.search(cr, uid, [('report_name', '=', self.name[7:])], context=context)
self.title = report_xml_ids and ir_actions_report_xml_obj.browse(cr,uid,report_xml_ids)[0].name or 'OpenERP Report'
create_doc = self.generators[report_type]
pdf = create_doc(rml, title=self.title)
return pdf, report_type
def create_xml(self, cr, uid, ids, datas, context=None):
if not context:
context={}
doc = print_xml.document(cr, uid, datas, {})
self.bin_datas.update( doc.bin_datas or {})
doc.parse(self.tmpl, ids, self.table, context)
xml = doc.xml_get()
doc.close()
return self.post_process_xml_data(cr, uid, xml, context)
def post_process_xml_data(self, cr, uid, xml, context=None):
if not context:
context={}
# find the position of the 3rd tag
# (skip the <?xml ...?> and the "root" tag)
iter = re.finditer('<[^>]*>', xml)
i = iter.next()
i = iter.next()
pos_xml = i.end()
doc = print_xml.document(cr, uid, {}, {})
tmpl_path = openerp.modules.get_module_resource('base', 'report', 'corporate_defaults.xml')
doc.parse(tmpl_path, [uid], 'res.users', context)
corporate_header = doc.xml_get()
doc.close()
# find the position of the tag after the <?xml ...?> tag
iter = re.finditer('<[^>]*>', corporate_header)
i = iter.next()
pos_header = i.end()
return xml[:pos_xml] + corporate_header[pos_header:] + xml[pos_xml:]
#
# TODO: The translation doesn't work for "<tag t="1">textext<tag> tex</tag>text</tag>"
#
def create_rml(self, cr, xml, uid, context=None):
if self.tmpl=='' and not self.internal_header:
self.internal_header=True
if not context:
context={}
registry = openerp.registry(cr.dbname)
ir_translation_obj = registry['ir.translation']
# In some case we might not use xsl ...
if not self.xsl:
return xml
stylesheet_file = tools.file_open(self.xsl)
try:
stylesheet = etree.parse(stylesheet_file)
xsl_path, _ = os.path.split(self.xsl)
for import_child in stylesheet.findall('./import'):
if 'href' in import_child.attrib:
imp_file = import_child.get('href')
_, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True)
import_child.set('href', urllib.quote(str(imp_file)))
imp_file.close()
finally:
stylesheet_file.close()
#TODO: get all the translation in one query. That means we have to:
# * build a list of items to translate,
# * issue the query to translate them,
# * (re)build/update the stylesheet with the translated items
def translate(doc, lang):
translate_aux(doc, lang, False)
def translate_aux(doc, lang, t):
for node in doc:
t = t or node.get("t")
if t:
text = None
tail = None
if node.text:
text = node.text.strip().replace('\n',' ')
if node.tail:
tail = node.tail.strip().replace('\n',' ')
if text:
translation1 = ir_translation_obj._get_source(cr, uid, self.name2, 'xsl', lang, text)
if translation1:
node.text = node.text.replace(text, translation1)
if tail:
translation2 = ir_translation_obj._get_source(cr, uid, self.name2, 'xsl', lang, tail)
if translation2:
node.tail = node.tail.replace(tail, translation2)
translate_aux(node, lang, t)
if context.get('lang', False):
translate(stylesheet.iter(), context['lang'])
transform = etree.XSLT(stylesheet)
xml = etree.tostring(
transform(etree.fromstring(xml)))
return xml
def create_pdf(self, rml, localcontext = None, logo=None, title=None):
if not localcontext:
localcontext = {}
localcontext.update({'internal_header':self.internal_header})
if logo:
self.bin_datas['logo'] = logo
else:
if 'logo' in self.bin_datas:
del self.bin_datas['logo']
obj = render.rml(rml, localcontext, self.bin_datas, self._get_path(), title)
obj.render()
return obj.get()
def create_html(self, rml, localcontext = None, logo=None, title=None):
obj = render.rml2html(rml, localcontext, self.bin_datas)
obj.render()
return obj.get()
def create_txt(self, rml,localcontext, logo=None, title=None):
obj = render.rml2txt(rml, localcontext, self.bin_datas)
obj.render()
return obj.get().encode('utf-8')
def create_html2html(self, rml, localcontext = None, logo=None, title=None):
obj = render.html2html(rml, localcontext, self.bin_datas)
obj.render()
return obj.get()
def create_raw(self,rml, localcontext = None, logo=None, title=None):
obj = render.odt2odt(etree.XML(rml),localcontext)
obj.render()
return etree.tostring(obj.get())
def create_sxw(self,rml,localcontext = None):
obj = render.odt2odt(rml,localcontext)
obj.render()
return obj.get()
def create_odt(self,rml,localcontext = None):
obj = render.odt2odt(rml,localcontext)
obj.render()
return obj.get()
def create_makohtml2html(self,html,localcontext = None):
obj = render.makohtml2html(html,localcontext)
obj.render()
return obj.get()
def _get_path(self):
return [
self.tmpl.replace(os.path.sep, '/').rsplit('/', 1)[0],
'addons',
tools.config['root_path']
]
|
DimensionDataCBUSydney/libcloud | refs/heads/trunk | libcloud/utils/escape.py | 30 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'sanitize_object_name',
]
def sanitize_object_name(object_name):
return object_name.replace('\\', '/')
|
lampitosgames/ProjectC | refs/heads/master | Python Scripting/Download Data From Crunchbase/GetCbCompNames.py | 1 | import urllib2
import json
import os
#store company data in one big, big-ass array
companies = []
wantedCompanies = []
outputFile = ""
numOfFiles = 1
numOfLines = 0
#get output location
while True:
outputFolder = raw_input("Where would you like this script to output (folder)\n")
if not os.path.exists(outputFolder):
print "Not a valid directory"
continue
outputFile = "%s\\CompanyList1.txt" % (outputFolder)
break
print "Looking for companies..."
#Create the file if it doesn't exist already
if not os.path.isfile(outputFile):
with open(outputFile, "w") as file:
file.write("")
#get all the company data
try:
print "Looking for companies at url location..."
response = urllib2.urlopen("http://api.crunchbase.com/v/2/organizations?user_key=<omitted>&page=1")
firstPage = json.load(response)
except urllib2.HTTPError, ex:
print "Error loading data!"
numberOfPages = firstPage["data"]["paging"]["number_of_pages"]
print "Found %s pages" % (numberOfPages)
for page in range(numberOfPages):
try:
print "Looking for page " + str(page+1) + " companies..."
response = urllib2.urlopen("http://api.crunchbase.com/v/2/organizations?user_key=<omitted>&page=" + str(page+1))
thisPage = json.load(response)
except urllib2.HTTPError, ex:
print "Error loading data! Continuing to next page"
continue
for co in thisPage["data"]["items"]:
companies.append(co)
#print the length of how many companies were found
print "Found " + str(len(companies)) + " companies"
#loop through all companies, and filter out the ones the user wants
for co in companies:
print "Saving " + repr(co['path'])
#append the company's name to the array
wantedCompanies.append(co['path'])
#check that splits the list into files of 4000 companies each
if numOfLines < 150000:
#write the company's name to a file
try:
with open(outputFile, 'a') as file:
file.write(co['path'] + "\n")
numOfLines += 1
except UnicodeEncodeError, ex:
print "Company with name " + repr(co['path']) + " had odd characters, cannot add to text file"
continue
except KeyError, ex:
continue
else:
#increase the number of files
numOfFiles += 1
#rename the output file
outputFile = "%s\\CompanyList%s.txt" % (outputFolder, numOfFiles)
#Create the file if it doesn't exist already
if not os.path.isfile(outputFile):
with open(outputFile, "w") as file:
file.write("")
#reset the number of lines
numOfLines = 0
#write the company's name to a file
try:
with open(outputFile, 'a') as file:
file.write(co['path'] + "\n")
numOfLines += 1
except UnicodeEncodeError, ex:
print "Company with name " + repr(co['path']) + " had odd characters, cannot add to text file" |
default1406/PhyLab | refs/heads/master | PythonExperimentDataHandle/1010113.py | 1 | # -*- coding: utf-8 -*-
from math import sqrt
from math import pi
import phylab
from jinja2 import Environment
from handler import texdir
#texdir = "./tex/"
env = Environment()
def SteelWire(m, C_plus, C_sub, D, L, H, b, source):
# m为等差数列,一般从10到24 单位:kg
# C 单位:cm
# D 单位:mm
# L 单位:cm
# H 单位:cm
# b 单位:mm
C = []
for i in range(0,len(C_plus),1):
C.append((C_plus[i] + C_sub[i]) / 2)
delta_C = []
for i in range(4):
delta_C.append(C[i+4] - C[i])
ave_delta_C = sum(delta_C) / 4
delta_C.append(round(ave_delta_C,2))
ave_D = sum(D) / 5
D.append(ave_D)
delta_m = m[4] - m[0];
E = 16 * 9.8 * L * delta_m * H * pow(10,6) / (pi * pow(ave_D,2) * b * ave_delta_C)
ua_delta_C = phylab.Ua(delta_C,ave_delta_C,4)#cm
ub_delta_C = 0.05 / sqrt(3)#cm
u_delta_C = sqrt(ua_delta_C**2 + ub_delta_C**2)#cm
ua_D = phylab.Ua(D,ave_D,5)#mm
ub_D = 0.005 / sqrt(3)#mm
u_D = sqrt(ua_D**2 + ub_D**2)#mm
u_b = 0.02 / sqrt(3)#cm
u_L = 0.3 / sqrt(3)#cm
u_H = 0.5 / sqrt(3)#cm
u_E_E = sqrt(pow(u_L / L,2)+pow(u_H / H,2)+pow(2 * u_D / ave_D,2)+pow(u_b / b,2)+pow(u_delta_C / ave_delta_C,2))
u_E = u_E_E * E
final = phylab.BitAdapt(E,u_E)
return env.from_string(source).render(
L = L,
H = H,
b = b,
D = D,
ave_D = ave_D,
m = m,
delta_m = delta_m,
C_plus = C_plus,
C_sub = C_sub,
C = C,
ave_delta_C = ave_delta_C,
E = phylab.ToScience(E),
ua_D = ua_D,
u_D = u_D,
ua_C = ua_delta_C,
u_C = u_delta_C,
u_E_E = u_E_E,
u_E = u_E,
final = final
)
def handler(XML):
file_object = open(texdir + "1010113.tex","r")
#将模板作为字符串存储在template文件中
source = file_object.read().decode('utf-8', 'ignore')
file_object.close()
m = [10.000,12.000,14.000,16.000,18.000,20.000,22.000,24.000]
C_plus = [6.72, 7.21, 7.65, 8.11, 8.55, 8.99, 9.47, 9.91]
C_sub = [6.74, 7.22, 7.64, 8.12, 8.57, 9.02, 9.48, 9.90]
D = [0.796, 0.796, 0.796, 0.796, 0.796]
L = 39.61
H = 111.12
b = 8.50
res = SteelWire(m, C_plus, C_sub, D, L, H, b, source)
# result = env.from_string(source).render(
# L = 1,
# H = 2,
# b = 3,
# D = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
# ave_D = 0.35
# )
#print(res)
return res
if __name__ == '__main__':
handler('') |
devsim/devsim | refs/heads/main | testing/ssac_cap.py | 1 | # Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cap device to test displacement current at contact
import devsim
import test_common
device="MyDevice"
region="MyRegion"
test_common.CreateSimpleMesh(device, region)
###
### Set parameters on the region
###
devsim.set_parameter(device=device, region=region, name="Permittivity", value=3.9*8.85e-14)
###
### Create the Potential solution variable
###
devsim.node_solution(device=device, region=region, name="Potential")
###
### Creates the Potential@n0 and Potential@n1 edge model
###
devsim.edge_from_node_model(device=device, region=region, node_model="Potential")
###
### Electric field on each edge, as well as its derivatives with respect to
### the potential at each node
###
devsim.edge_model(device=device, region=region, name="ElectricField",
equation="(Potential@n0 - Potential@n1)*EdgeInverseLength")
devsim.edge_model(device=device, region=region, name="ElectricField:Potential@n0",
equation="EdgeInverseLength")
devsim.edge_model(device=device, region=region, name="ElectricField:Potential@n1",
equation="-EdgeInverseLength")
###
### Model the D Field
###
devsim.edge_model(device=device, region=region, name="DField",
equation="Permittivity*ElectricField")
devsim.edge_model(device=device, region=region, name="DField:Potential@n0",
equation="diff(Permittivity*ElectricField, Potential@n0)")
devsim.edge_model(device=device, region=region, name="DField:Potential@n1",
equation="-DField:Potential@n0")
###
### Create the bulk equation
###
devsim.equation(device=device, region=region, name="PotentialEquation", variable_name="Potential",
edge_model="DField", variable_update="default")
# the topbias is a circuit node, and we want to prevent it from being overridden by a parameter
devsim.set_parameter(device=device, region=region, name="botbias", value=0.0)
for name, equation in (
("topnode_model", "Potential - topbias"),
("topnode_model:Potential", "1"),
("topnode_model:topbias", "-1"),
("botnode_model", "Potential - botbias"),
("botnode_model:Potential", "1"),
):
devsim.node_model(device=device, region=region, name=name, equation=equation)
# attached to circuit node
devsim.contact_equation(device=device, contact="top", name="PotentialEquation", variable_name="Potential",
node_model="topnode_model", edge_charge_model="DField", circuit_node="topbias")
# attached to ground
devsim.contact_equation(device=device, contact="bot", name="PotentialEquation", variable_name="Potential",
node_model="botnode_model", edge_charge_model="DField")
#
# Voltage source
#
devsim.circuit_element(name="V1", n1=1, n2=0, value=1.0, acreal=1.0)
devsim.circuit_element(name="R1", n1="topbias", n2=1, value=1e3)
#
devsim.solve(type="dc", absolute_error=1.0, relative_error=1e-10, maximum_iterations=30)
#
print(devsim.get_contact_charge(device=device, contact="top", equation="PotentialEquation"))
print(devsim.get_contact_charge(device=device, contact="bot", equation="PotentialEquation"))
#
devsim.solve(type="ac", frequency=1e10)
devsim.solve(type="ac", frequency=1e15)
|
Illemius/telegram_types | refs/heads/master | scripts/out_files.py | 1 | from util import scan_files
def main():
print('## Files')
for _, file_name in scan_files():
part_name = file_name.rpartition('.')[0]
print(f"### {part_name}")
print(f"File: `res/{file_name}`", end='\n\n')
print(f"Sender: <N/A>", end='\n\n')
print(f"Update object with {part_name} field.", end='\n\n')
if __name__ == '__main__':
main()
|
hiei23/servo | refs/heads/master | tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/xhr_benchmark_handler.py | 415 | # Copyright 2014 Google Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the COPYING file or at
# https://developers.google.com/open-source/licenses/bsd
from mod_pywebsocket import util
class XHRBenchmarkHandler(object):
def __init__(self, headers, rfile, wfile):
self._logger = util.get_class_logger(self)
self.headers = headers
self.rfile = rfile
self.wfile = wfile
def do_send(self):
content_length = int(self.headers.getheader('Content-Length'))
self._logger.debug('Requested to receive %s bytes', content_length)
RECEIVE_BLOCK_SIZE = 1024 * 1024
bytes_to_receive = content_length
while bytes_to_receive > 0:
bytes_to_receive_in_this_loop = bytes_to_receive
if bytes_to_receive_in_this_loop > RECEIVE_BLOCK_SIZE:
bytes_to_receive_in_this_loop = RECEIVE_BLOCK_SIZE
received_data = self.rfile.read(bytes_to_receive_in_this_loop)
if received_data != ('a' * bytes_to_receive_in_this_loop):
self._logger.debug('Request body verification failed')
return
bytes_to_receive -= len(received_data)
if bytes_to_receive < 0:
self._logger.debug('Received %d more bytes than expected' %
(-bytes_to_receive))
return
# Return the number of received bytes back to the client.
response_body = '%d' % content_length
self.wfile.write(
'HTTP/1.1 200 OK\r\n'
'Content-Type: text/html\r\n'
'Content-Length: %d\r\n'
'\r\n%s' % (len(response_body), response_body))
self.wfile.flush()
def do_receive(self):
content_length = int(self.headers.getheader('Content-Length'))
request_body = self.rfile.read(content_length)
request_array = request_body.split(' ')
if len(request_array) < 2:
self._logger.debug('Malformed request body: %r', request_body)
return
# Parse the size parameter.
bytes_to_send = request_array[0]
try:
bytes_to_send = int(bytes_to_send)
except ValueError, e:
self._logger.debug('Malformed size parameter: %r', bytes_to_send)
return
self._logger.debug('Requested to send %s bytes', bytes_to_send)
# Parse the transfer encoding parameter.
chunked_mode = False
mode_parameter = request_array[1]
if mode_parameter == 'chunked':
self._logger.debug('Requested chunked transfer encoding')
chunked_mode = True
elif mode_parameter != 'none':
self._logger.debug('Invalid mode parameter: %r', mode_parameter)
return
# Write a header
response_header = (
'HTTP/1.1 200 OK\r\n'
'Content-Type: application/octet-stream\r\n')
if chunked_mode:
response_header += 'Transfer-Encoding: chunked\r\n\r\n'
else:
response_header += (
'Content-Length: %d\r\n\r\n' % bytes_to_send)
self.wfile.write(response_header)
self.wfile.flush()
# Write a body
SEND_BLOCK_SIZE = 1024 * 1024
while bytes_to_send > 0:
bytes_to_send_in_this_loop = bytes_to_send
if bytes_to_send_in_this_loop > SEND_BLOCK_SIZE:
bytes_to_send_in_this_loop = SEND_BLOCK_SIZE
if chunked_mode:
self.wfile.write('%x\r\n' % bytes_to_send_in_this_loop)
self.wfile.write('a' * bytes_to_send_in_this_loop)
if chunked_mode:
self.wfile.write('\r\n')
self.wfile.flush()
bytes_to_send -= bytes_to_send_in_this_loop
if chunked_mode:
self.wfile.write('0\r\n\r\n')
self.wfile.flush()
|
Nicop06/ansible | refs/heads/devel | lib/ansible/modules/network/aci/aci_tenant_span_dst_group.py | 22 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_tenant_span_dst_group
short_description: Manage SPAN destination groups on Cisco ACI fabrics (span:DestGrp)
description:
- Manage SPAN destination groups on Cisco ACI fabrics.
- More information from the internal APIC class
I(span:DestGrp) at U(https://developer.cisco.com/media/mim-ref/MO-spanDestGrp.html).
author:
- Swetha Chunduri (@schunduri)
- Dag Wieers (@dagwieers)
- Jacob McGill (@jmcgill298)
version_added: '2.4'
requirements:
- ACI Fabric 1.0(3f)+
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
options:
dst_group:
description:
- The name of the SPAN destination group.
required: yes
aliases: [ name ]
description:
description:
- The description of the SPAN destination group.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_tenant_span_dst_group:
hostname: '{{ inventory_hostname }}'
username: '{{ username }}'
password: '{{ password }}'
dst_group: '{{ dst_group }}'
description: '{{ descr }}'
tenant: '{{ tenant }}'
'''
RETURN = r'''
#
'''
from ansible.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec
argument_spec.update(
dst_group=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['dst_group', 'tenant']],
['state', 'present', ['dst_group', 'tenant']],
],
)
dst_group = module.params['dst_group']
description = module.params['description']
state = module.params['state']
# Add tenant_span_dst_grp to module.params for URL building
module.params['tenant_span_dst_grp'] = dst_group
aci = ACIModule(module)
aci.construct_url(root_class='tenant', subclass_1='tenant_span_dst_grp')
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='spanDestGrp',
class_config=dict(
name=dst_group,
descr=description,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='spanDestGrp')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
# Remove tenant_span_dst_grp that was used to build URL from module.params
module.params.pop('tenant_span_dst_grp')
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
|
erickt/hue | refs/heads/master | desktop/core/ext-py/South-1.0.2/south/tests/deps_a/models.py | 12133432 | |
ChenJunor/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/conf/locale/is/__init__.py | 12133432 | |
CodeforHawaii/froide | refs/heads/master | froide/redaction/models.py | 12133432 | |
caot/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/conf/locale/en_GB/__init__.py | 12133432 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.