repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
BruceDai/crosswalk-test-suite
|
refs/heads/master
|
stability/stability-iterative-android-tests/iterative/Switch_Between_NativeAndWebApp.py
|
18
|
#!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Li, Hao<haox.li@intel.com>
import unittest
import os
import sys
import commands
import shutil
import time
import subprocess
from TestApp import *
reload(sys)
sys.setdefaultencoding('utf-8')
SCRIPT_PATH = os.path.realpath(__file__)
ConstPath = os.path.dirname(SCRIPT_PATH)
def setUp():
global device
#device = 'E6OKCY411012'
device = os.environ.get('DEVICE_ID')
if not device:
print 'Get env error\n'
sys.exit(1)
class TestSwitchBetweenNativeAndWebApp(unittest.TestCase):
global testapp
def test_swith_between_nativeandwebapp(self):
setUp()
testapp = TestApp(device, ConstPath + "/../iterative.apk",
"org.xwalk.iterative", "IterativeActivity")
try:
if not testapp.isInstalled():
testapp.install()
testapp.launch()
# Pause and Resume 50 times
for i in range(50):
time.sleep(2)
# swtich to native home app
self.switchToHomeapp()
self.assertFalse(testapp.isActivity())
time.sleep(2)
# swtich back
self.assertTrue(testapp.switch())
testapp.stop()
except Exception as e:
print "Error: %s" % e
testapp.stop()
self.assertTrue(False)
def switchToHomeapp(self):
action_status = False
# Android Home App
homeappname = "android.intent.category.HOME"
cmd = "%s -s %s shell dumpsys activity|grep %s|awk -F \"cmp=\" '{print $2}'|awk '{print $1}'" % (ADB_CMD, device, homeappname)
(return_code, output) = doCMD(cmd)
if len(output) > 0:
cmd = "%s -s %s shell am start -n %s" % (ADB_CMD, device, output[0])
(return_code, output) = doCMD(cmd)
action_status = True
else:
print "-->> Fail to find %s." % homeappname
return action_status
if __name__ == '__main__':
unittest.main()
|
chrismeyersfsu/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_mtu.py
|
12
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_mtu
version_added: "2.2"
short_description: Manages MTU settings on Nexus switch.
description:
- Manages MTU settings on Nexus switch.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
notes:
- Either C(sysmtu) param is required or C(interface) AND C(mtu) params are req'd.
- C(state=absent) unconfigures a given MTU if that value is currently present.
options:
interface:
description:
- Full name of interface, i.e. Ethernet1/1.
required: false
default: null
mtu:
description:
- MTU for a specific interface.
required: false
default: null
sysmtu:
description:
- System jumbo MTU.
required: false
default: null
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Ensure system mtu is 9126
- nxos_mtu:
sysmtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Config mtu on Eth1/1 (routed interface)
- nxos_mtu:
interface: Ethernet1/1
mtu: 1600
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Config mtu on Eth1/3 (switched interface)
- nxos_mtu:
interface: Ethernet1/3
mtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Unconfigure mtu on a given interface
- nxos_mtu:
interface: Ethernet1/3
mtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
state: absent
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"mtu": "1700"}
existing:
description:
- k/v pairs of existing mtu/sysmtu on the interface/system
type: dict
sample: {"mtu": "1600", "sysmtu": "9216"}
end_state:
description: k/v pairs of mtu/sysmtu values after module execution
returned: always
type: dict
sample: {"mtu": "1700", sysmtu": "9216"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["interface vlan10", "mtu 1700"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0] or response[0] == '\n':
body = []
elif 'show run' in command:
body = response
else:
try:
body = [json.loads(response[0])]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_mtu(interface, module):
command = 'show interface {0}'.format(interface)
mtu = {}
body = execute_show_command(command, module)
try:
mtu_table = body[0]['TABLE_interface']['ROW_interface']
mtu['mtu'] = str(
mtu_table.get('eth_mtu',
mtu_table.get('svi_mtu', 'unreadable_via_api')))
mtu['sysmtu'] = get_system_mtu(module)['sysmtu']
except KeyError:
mtu = {}
return mtu
def get_system_mtu(module):
command = 'show run all | inc jumbomtu'
sysmtu = ''
body = execute_show_command(command, module, command_type='cli_show_ascii')
if body:
sysmtu = str(body[0].split(' ')[-1])
try:
sysmtu = int(sysmtu)
except:
sysmtu = ""
return dict(sysmtu=str(sysmtu))
def get_commands_config_mtu(delta, interface):
CONFIG_ARGS = {
'mtu': 'mtu {mtu}',
'sysmtu': 'system jumbomtu {sysmtu}',
}
commands = []
for param, value in delta.items():
command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
mtu_check = delta.get('mtu', None)
if mtu_check:
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_commands_remove_mtu(delta, interface):
CONFIG_ARGS = {
'mtu': 'no mtu {mtu}',
'sysmtu': 'no system jumbomtu {sysmtu}',
}
commands = []
for param, value in delta.items():
command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
mtu_check = delta.get('mtu', None)
if mtu_check:
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
if body == 'DNE':
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
mode = 'unknown'
interface_table = {}
body = execute_show_command(command, module)
try:
interface_table = body[0]['TABLE_interface']['ROW_interface']
except (KeyError, AttributeError, IndexError):
return mode
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode in ['access', 'trunk']:
mode = 'layer2'
elif mode == 'routed':
mode = 'layer3'
elif intf_type in ['loopback', 'svi']:
mode = 'layer3'
return mode
def main():
argument_spec = dict(
mtu=dict(type='str'),
interface=dict(type='str'),
sysmtu=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
module = get_network_module(argument_spec=argument_spec,
required_together=[['mtu', 'interface']],
supports_check_mode=True)
interface = module.params['interface']
mtu = module.params['mtu']
sysmtu = module.params['sysmtu']
state = module.params['state']
if sysmtu and (interface or mtu):
module.fail_json(msg='Proper usage-- either just use the sysmtu param '
'or use interface AND mtu params')
if interface:
intf_type = get_interface_type(interface)
if intf_type != 'ethernet':
if is_default(interface, module) == 'DNE':
module.fail_json(msg='Invalid interface. It does not exist '
'on the switch.')
existing = get_mtu(interface, module)
else:
existing = get_system_mtu(module)
if interface and mtu:
if intf_type == 'loopback':
module.fail_json(msg='Cannot set MTU for loopback interface.')
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
if intf_type in ['ethernet', 'portchannel']:
if mtu not in [existing['sysmtu'], '1500']:
module.fail_json(msg='MTU on L2 interfaces can only be set'
' to the system default (1500) or '
'existing sysmtu value which is '
' {0}'.format(existing['sysmtu']))
elif mode == 'layer3':
if intf_type in ['ethernet', 'portchannel', 'svi']:
if ((int(mtu) < 576 or int(mtu) > 9216) or
((int(mtu) % 2) != 0)):
module.fail_json(msg='Invalid MTU for Layer 3 interface'
'needs to be an even number between'
'576 and 9216')
if sysmtu:
if ((int(sysmtu) < 576 or int(sysmtu) > 9216 or
((int(sysmtu) % 2) != 0))):
module.fail_json(msg='Invalid MTU- needs to be an even '
'number between 576 and 9216')
args = dict(mtu=mtu, sysmtu=sysmtu)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
changed = False
end_state = existing
commands = []
if state == 'present':
if delta:
command = get_commands_config_mtu(delta, interface)
commands.append(command)
elif state == 'absent':
common = set(proposed.items()).intersection(existing.items())
if common:
command = get_commands_remove_mtu(dict(common), interface)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
execute_config_command(cmds, module)
if interface:
end_state = get_mtu(interface, module)
else:
end_state = get_system_mtu(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
module.exit_json(**results)
if __name__ == '__main__':
main()
|
ftrader-bitcoinunlimited/hardfork_prototype_1_mvf-bu
|
refs/heads/master
|
qa/rpc-tests/invalidateblock.py
|
3
|
#!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def retryWhile(fn, exc, excStr=None):
while 1:
try:
fn()
break
except exc as e:
if (not excStr is None):
if not excStr in str(e):
raise
time.sleep(.5)
class InvalidateTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
print("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
print("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
print("\nMake sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
print("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
print("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
print("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
print("..and then mine a block")
self.nodes[2].generate(1)
print("Verify all nodes are at the right height")
time.sleep(5)
for i in range(3):
print(i,self.nodes[i].getblockcount())
assert(self.nodes[2].getblockcount() == 3)
assert(self.nodes[0].getblockcount() == 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
self.testChainSyncWithLongerInvalid()
def testChainSyncWithLongerInvalid(self):
print("verify that IBD continues on a separate chain after a block is invalidated")
ret = self.nodes[0].generate(50)
# after the headers propagate, invalidate the block
retryWhile(lambda: self.nodes[1].invalidateblock(ret[0]), JSONRPCException, "Block not found")
# now generate a competing chain
ret1 = self.nodes[1].generate(25)
# now start up a new node to sync with one of the chains
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug"]))
connect_nodes_bi(self.nodes,0,3)
connect_nodes_bi(self.nodes,1,3)
# invalidate the longest chain
self.nodes[3].invalidateblock(ret[0])
# give it time to sync with the shorter chain on node 1
print("allowing node 3 to sync")
time.sleep(5)
blocks1 = self.nodes[1].getblockcount()
nblocks = self.nodes[3].getblockcount()
# test if it is synced
if nblocks != blocks1:
print("ERROR: node 3 did not sync with longest valid chain")
print("chain tips on 0: %s" % str(self.nodes[0].getchaintips()))
print("chain tips on 1: %s" % str(self.nodes[1].getchaintips()))
print("chain tips on 3: %s" % str(self.nodes[3].getchaintips()))
print("longest chain on 3: %s" % str(self.nodes[3].getblockcount()))
# enable when fixed: assert(nblocks == blocks1); # since I invalidated a block on 0's chain, I should be caught up with 1
print("Now make the other chain (with no invalid blocks) longer")
ret1 = self.nodes[1].generate(50)
time.sleep(5)
blocks1 = self.nodes[1].getblockcount()
nblocks = self.nodes[3].getblockcount()
# test if it is synced
if nblocks != blocks1:
print("node 3 did not sync up")
print("chain tips on 0: %s" % str(self.nodes[0].getchaintips()))
print("chain tips on 1: %s" % str(self.nodes[1].getchaintips()))
print("chain tips on 3: %s" % str(self.nodes[3].getchaintips()))
print("longest chain on 3: %s" % str(self.nodes[3].getblockcount()))
else:
print("node 1 synced with longest chain")
if __name__ == '__main__':
InvalidateTest().main()
def Test():
t = InvalidateTest()
bitcoinConf = {
"debug":["net","blk","thin","mempool","req","bench","evict"], # "lck"
"blockprioritysize":2000000 # we don't want any transactions rejected due to insufficient fees...
}
t.main(["--nocleanup","--noshutdown", "--tmpdir=/ramdisk/test"],bitcoinConf,None)
|
shsingh/ansible
|
refs/heads/devel
|
test/units/modules/network/fortios/test_fortios_vpn_ssl_web_user_bookmark.py
|
21
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_vpn_ssl_web_user_bookmark
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_vpn_ssl_web_user_bookmark.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_vpn_ssl_web_user_bookmark_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_bookmark': {'custom_lang': 'test_value_3',
'name': 'default_name_4'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'custom-lang': 'test_value_3',
'name': 'default_name_4'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_ssl_web_user_bookmark_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_bookmark': {'custom_lang': 'test_value_3',
'name': 'default_name_4'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'custom-lang': 'test_value_3',
'name': 'default_name_4'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_ssl_web_user_bookmark_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_ssl_web_user_bookmark': {'custom_lang': 'test_value_3',
'name': 'default_name_4'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.ssl.web', 'user-bookmark', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_ssl_web_user_bookmark_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_ssl_web_user_bookmark': {'custom_lang': 'test_value_3',
'name': 'default_name_4'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.ssl.web', 'user-bookmark', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_ssl_web_user_bookmark_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_bookmark': {'custom_lang': 'test_value_3',
'name': 'default_name_4'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'custom-lang': 'test_value_3',
'name': 'default_name_4'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_vpn_ssl_web_user_bookmark_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_bookmark': {
'random_attribute_not_valid': 'tag', 'custom_lang': 'test_value_3',
'name': 'default_name_4'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'custom-lang': 'test_value_3',
'name': 'default_name_4'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
leafclick/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/File.after.py
|
79
|
def __init__(self):
for base in self__class__.__bases__:
bar(base, self)
def bar(base_new, self_new):
try:
base_new.__init__(self_new)
except AttributeError:
pass
|
witalikkowal/Store
|
refs/heads/master
|
vendor/doctrine/orm/docs/en/conf.py
|
2448
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
fredsod/NIPAP
|
refs/heads/master
|
nipap/nipap/nipapconfig.py
|
7
|
import ConfigParser
class NipapConfig(ConfigParser.SafeConfigParser):
""" Makes configuration data available.
Implemented as a class with a shared state; once an instance has been
created, new instances with the same state can be obtained by calling
the custructor again.
"""
__shared_state = {}
_config = None
_cfg_path = None
def __init__(self, cfg_path=None, default=None):
""" Takes config file path and command line arguments.
"""
self.__dict__ = self.__shared_state
if default is None:
default = {}
if len(self.__shared_state) == 0:
# First time - create new instance!
self._cfg_path = cfg_path
ConfigParser.SafeConfigParser.__init__(self, default)
self.read_file()
def read_file(self):
""" Read the configuration file
"""
# don't try to parse config file if we don't have one set
if not self._cfg_path:
return
try:
cfg_fp = open(self._cfg_path, 'r')
self.readfp(cfg_fp)
except IOError as exc:
raise NipapConfigError(str(exc))
class NipapConfigError(Exception):
pass
|
abhitopia/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/trainable.py
|
68
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`Trainable` interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
class Trainable(object):
"""Interface for objects that are trainable by, e.g., `Experiment`.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""Trains a model given training data `x` predictions and `y` labels.
Args:
x: Matrix of shape [n_samples, n_features...] or the dictionary of Matrices.
Can be iterator that returns arrays of features or dictionary of arrays of features.
The training input samples for fitting the model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs] or the dictionary of same.
Can be iterator that returns array of labels or dictionary of array of labels.
The training label values (class labels in classification, real numbers in regression).
If set, `input_fn` must be `None`. Note: For classification, label values must
be integers representing the class index (i.e. values from 0 to
n_classes-1).
input_fn: Input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
If input_fn is set, `x`, `y`, and `batch_size` must be `None`.
steps: Number of steps for which to train model. If `None`, train forever.
'steps' works incrementally. If you call two times fit(steps=10) then
training occurs in total 20 steps. If you don't want to have incremental
behaviour please set `max_steps` instead. If set, `max_steps` must be
`None`.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. If set, `steps` must be `None`.
Two calls to `fit(steps=100)` means 200 training
iterations. On the other hand, two calls to `fit(max_steps=100)` means
that the second call will not do any iteration since first call did
all 100 steps.
Returns:
`self`, for chaining.
"""
raise NotImplementedError
|
glennhickey/teHmm
|
refs/heads/master
|
scripts/multibench.py
|
1
|
#!/usr/bin/env python
#Copyright (C) 2014 by Glenn Hickey
#
#Released under the MIT license, see LICENSE.txt
import unittest
import sys
import os
import argparse
import logging
import random
import numpy as np
from teHmm.common import runShellCommand, setLogLevel, addLoggingFileHandler
setLogLevel("INFO")
addLoggingFileHandler("log.txt", False)
# input ############
tracksPath="tracks.xml"
tracksPath250="tracks_bin250.xml"
regionPath="region.bed"
truthPath="hollister_region.bed"
trainPath="modeler_region.bed"
superTrackName="repeat_modeler"
segOpts = "--cutMultinomial --thresh 2"
teStates = ["LINE", "SINE", "LTR", "DNA", "RC", "Unknown"]
trainPath2State = "modeler_2state_region.bed"
segLen = 20
numStates = 35
threads = 6
iter = 200
thresh = 0.08
emPrior = 1.0
mpFlags = "--maxProb --maxProbCut 5"
fitFlags = "--ignoreTgt 0 --qualThresh 0.25"
#####################
superTrackPath="tracks_super.xml"
segPath = "segments.bed"
sedExp = "\"s/" + "\\|".join(teStates) + "/TE/g\""
runShellCommand("rm2State.sh %s > %s" % (trainPath, trainPath2State))
# make a supervised training track
runShellCommand("grep -v %s %s > %s" % (superTrackName, tracksPath250, superTrackPath))
# make a segments
runShellCommand("segmentTracks.py %s %s %s %s --logInfo --logFile log.txt" % (tracksPath, regionPath, segPath, segOpts))
# do a supervised
runShellCommand("mkdir -p supervised")
runShellCommand("teHmmTrain.py %s %s supervised/out.mod --segment %s --supervised --segLen %d --logInfo" % (
superTrackPath, trainPath, segPath, segLen))
runShellCommand("teHmmEval.py %s %s %s --bed %s" % (
superTrackPath, "supervised/out.mod", segPath, "supervised/eval.bed"))
runShellCommand("rm2State.sh %s > %s" % ("supervised/eval.bed", "supervised/evalTE.bed"))
runShellCommand("compareBedStates.py %s %s > %s" % (truthPath, "supervised/evalTE.bed", "supervised/comp.txt"))
runShellCommand("fitStateNames.py %s %s %s %s" % (truthPath, "supervised/eval.bed", "supervised/fit.bed", fitFlags))
runShellCommand("compareBedStates.py %s %s > %s" % (truthPath, "supervised/fit.bed", "supervised/comp_cheat.txt"))
# do a semisupervised
runShellCommand("mkdir -p semi")
runShellCommand("createStartingModel.py %s %s %s %s %s --numTot %d --mode full --em %f --outName Unlabeled" % (
tracksPath, superTrackName, regionPath, "semi/tran.txt", "semi/em.txt", numStates, emPrior))
runShellCommand("grep -v Unlabeled semi/tran.txt > semi/tranf.txt")
runShellCommand("teHmmBenchmark.py %s %s %s --truth %s --iter %d %s --transMatEpsilons --segment --segLen %d --fit --reps %d --numThreads %d --logInfo --fixStart --initTransProbs %s --forceTransProbs %s --initEmProbs %s --forceEmProbs %s --fitOpts \"%s\" " % (
tracksPath250, "semi/bench", segPath, truthPath, iter, mpFlags, segLen, threads, threads, "semi/tran.txt", "semi/tranf.txt", "semi/em.txt", "semi/em.txt", fitFlags))
evalPath = "semi/bench/" + segPath[:-4] + "_eval.bed"
compPath = "semi/bench/" + segPath[:-4] + "_comp.txt"
runShellCommand("rm2State.sh %s > %s" % (evalPath, "semi/eval1.bed"))
runShellCommand("fitStateNames.py %s %s %s %s" % (trainPath2State, "semi/eval1.bed", "semi/fit1.bed", fitFlags))
runShellCommand("compareBedStates.py %s %s > %s" % (truthPath, "semi/fit1.bed", "semi/comp.txt"))
runShellCommand("cp %s %s" % (compPath, "semi/comp_cheat.txt"))
# do a unsupervised
runShellCommand("mkdir -p unsup")
runShellCommand("teHmmBenchmark.py %s %s %s --truth %s --iter %d %s --maxProb --maxProbCut 5 --segment --segLen %s --fit --reps %d --numThreads %d --logInfo --fixStart --emStates %s --fitOpts \"%s\"" % (
tracksPath250, "unsup/bench", segPath, truthPath, iter, mpFlags, segLen, threads, threads, numStates, fitFlags))
evalPath = "unsup/bench/" + segPath[:-4] + "_eval.bed"
compPath = "unsup/bench/" + segPath[:-4] + "_comp.txt"
runShellCommand("rm2State.sh %s > %s" % (evalPath, "unsup/eval1.bed"))
runShellCommand("fitStateNames.py %s %s %s %s" % (trainPath2State, "unsup/eval1.bed", "unsup/fit1.bed", fitFlags))
runShellCommand("compareBedStates.py %s %s > %s" % (truthPath, "unsup/fit1.bed", "unsup/comp.txt"))
runShellCommand("cp %s %s" % (compPath, "unsup/comp_cheat.txt"))
|
avinassh/rtiman
|
refs/heads/master
|
sample_settings.py
|
1
|
# all the settings, auth etc.
import os
MONGO_URL = "mongodb://avinassh:avinassh@localhost:10031/rti"
application_handler_setttings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
cookie_secret= 'aV1..No5h',
login_url='/login',
debug=True,
)
|
donnydevito/py-academicstoday
|
refs/heads/master
|
academicstoday_project/teacher/views/overview.py
|
3
|
from django.shortcuts import render
from django.core import serializers
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf import settings
import json
import datetime
from registrar.models import Course
from registrar.models import Announcement
from registrar.models import Syllabus
from registrar.models import Policy
from registrar.models import Lecture
from registrar.models import Assignment
from registrar.models import Quiz
from registrar.models import Exam
from registrar.models import CourseSubmission
# Public Functions
#--------------------
@login_required(login_url='/landpage')
def overview_page(request, course_id):
course = Course.objects.get(id=course_id)
try:
review = CourseSubmission.objects.get(course=course)
except CourseSubmission.DoesNotExist:
review = None
try:
announcements = Announcement.objects.filter(course=course).order_by('-post_date')
except Announcement.DoesNotExist:
announcements = None
try:
syllabus = Syllabus.objects.get(course=course)
except Syllabus.DoesNotExist:
syllabus = None
try:
policy = Policy.objects.get(course=course)
except Policy.DoesNotExist:
policy = None
try:
lectures = Lecture.objects.filter(course=course).order_by('-lecture_num')
except Lecture.DoesNotExist:
lectures = None
try:
assignments = Assignment.objects.filter(course=course).order_by('-assignment_num')
except Assignment.DoesNotExist:
assignments = None
try:
quizzes = Quiz.objects.filter(course=course).order_by('-quiz_num')
except Quiz.DoesNotExist:
quizzes = None
try:
exams = Exam.objects.filter(course=course).order_by('-exam_num')
except Exam.DoesNotExist:
exams = None
return render(request, 'teacher/overview/view.html',{
'course': course,
'total_final_mark_worth': total_final_mark_worth(course),
'has_final_exam': has_final_exam(exams),
'review': review,
'announcements' : announcements,
'syllabus': syllabus,
'lectures': lectures,
'assignments': assignments,
'quizzes': quizzes,
'exams': exams,
'policy': policy,
'COURSE_SUBMITTED_FOR_REVIEW_STATUS': settings.COURSE_SUBMITTED_FOR_REVIEW_STATUS,
'COURSE_IN_REVIEW_STATUS': settings.COURSE_IN_REVIEW_STATUS,
'COURSE_UNAVAILABLE_STATUS': settings.COURSE_UNAVAILABLE_STATUS,
'COURSE_AVAILABLE_STATUS': settings.COURSE_AVAILABLE_STATUS,
'COURSE_REJECTED_STATUS': settings.COURSE_REJECTED_STATUS,
'user': request.user,
'tab': 'overview',
'HAS_ADVERTISMENT': settings.APPLICATION_HAS_ADVERTISMENT,
'local_css_urls': settings.SB_ADMIN_2_CSS_LIBRARY_URLS,
'local_js_urls': settings.SB_ADMIN_2_JS_LIBRARY_URLS,
})
@login_required(login_url='/landpage')
def submit_course_for_review(request, course_id):
course = Course.objects.get(id=course_id)
response_data = {'status' : 'failed', 'message' : ''}
# Validate announcements
try:
announcements = Announcement.objects.filter(course=course).order_by('-post_date')
if announcements.count() < 1:
response_data['message'] = 'zero announcements'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Announcement.DoesNotExist:
response_data['message'] = 'no announcements detected'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate syllabus
try:
Syllabus.objects.get(course=course)
except Syllabus.DoesNotExist:
response_data['message'] = 'no syllabus set'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate policy
try:
Policy.objects.get(course=course)
except Policy.DoesNotExist:
response_data['message'] = 'no policy set'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate lectures
try:
lectures = Lecture.objects.filter(course=course).order_by('-lecture_num')
if lectures.count() < 2:
response_data['message'] = 'minimum 2 lectures required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Lecture.DoesNotExist:
response_data['message'] = 'no policy set'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate assignments
try:
assignments = Assignment.objects.filter(course=course).order_by('-assignment_num')
if assignments.count() < 1:
response_data['message'] = 'minimum 1 assignment required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Assignment.DoesNotExist:
response_data['message'] = 'no assignment(s)'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate quizzes
try:
quizzes = Quiz.objects.filter(course=course).order_by('-quiz_num')
if quizzes.count() < 1:
response_data['message'] = 'minimum 1 quiz required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Quiz.DoesNotExist:
response_data['message'] = 'no quiz(zes) found'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate exams
try:
exams = Exam.objects.filter(course=course).order_by('-exam_num')
if exams.count() < 1:
response_data['message'] = 'minimum 1 exam required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Exam.DoesNotExist:
response_data['message'] = 'no exams(s) found'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate final mark calculator
total_worth = total_final_mark_worth(course)
if total_worth != 100:
response_data['message'] = 'total final mark must add up to 100%'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Make sure we have a final exam
is_final = has_final_exam(exams)
if is_final == False:
response_data['message'] = 'course requires only 1 final exam'
return HttpResponse(json.dumps(response_data), content_type="application/json")
review = CourseSubmission.objects.create(
course=course,
)
review.save()
# Make course available.
course.status = settings.COURSE_AVAILABLE_STATUS
course.save()
response_data = {'status' : 'success', 'message' : 'submitted course review'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Private Functions
#--------------------
# Function looks through the course assignments/exams/quizzes and returns
# the accumulated worth total.
def total_final_mark_worth(course):
total_worth = 0 # Variable used to track total worth of the coursework.
# Fetch from database
try:
assignments = Assignment.objects.filter(course=course).order_by('-assignment_num')
except Assignment.DoesNotExist:
assignments = None
try:
quizzes = Quiz.objects.filter(course=course).order_by('-quiz_num')
except Quiz.DoesNotExist:
quizzes = None
try:
exams = Exam.objects.filter(course=course).order_by('-exam_num')
except Exam.DoesNotExist:
exams = None
# Iterate through all coursework and calculate the total.
for assignment in assignments:
total_worth += assignment.worth
for quiz in quizzes:
total_worth += quiz.worth
for exam in exams:
total_worth += exam.worth
return total_worth
# Function will iterate through all the exams and return either True or False
# depending if a 'final exam' was found in the list.
def has_final_exam(exams):
count = 0
for exam in exams:
if exam.is_final == True:
count += 1
return count == 1
|
jiangzhuo/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/site-packages/pip/basecommand.py
|
392
|
"""Base Command class, and related routines"""
import os
import sys
import tempfile
import traceback
import time
import optparse
from pip import cmdoptions
from pip.locations import running_under_virtualenv
from pip.log import logger
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.backwardcompat import StringIO
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import (SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR)
from pip.util import get_prog
__all__ = ['Command']
class Command(object):
name = None
usage = None
hidden = False
def __init__(self):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser)
self.parser.add_option_group(gen_opts)
def _build_session(self, options):
session = PipSession()
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle timeouts
if options.timeout:
session.timeout = options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def setup_logging(self):
pass
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4 - level)
complete_log = []
logger.add_consumers(
(level, sys.stdout),
(logger.DEBUG, complete_log.append),
)
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
#TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(VIRTUALENV_NOT_FOUND)
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.add_consumers((logger.DEBUG, log_fp))
else:
log_fp = None
exit = SUCCESS
store_log = False
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
exit = status
except PreviousBuildDirError:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError):
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except BadCommand:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except CommandError:
e = sys.exc_info()[1]
logger.fatal('ERROR: %s' % e)
logger.info('Exception information:\n%s' % format_exc())
exit = ERROR
except KeyboardInterrupt:
logger.fatal('Operation cancelled by user')
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except:
logger.fatal('Exception:\n%s' % format_exc())
store_log = True
exit = UNKNOWN_ERROR
if store_log:
log_file_fn = options.log_file
text = '\n'.join(complete_log)
try:
log_file_fp = open_logfile(log_file_fn, 'w')
except IOError:
temp = tempfile.NamedTemporaryFile(delete=False)
log_file_fn = temp.name
log_file_fp = open_logfile(log_file_fn, 'w')
logger.fatal('Storing debug log for failure in %s' % log_file_fn)
log_file_fp.write(text)
log_file_fp.close()
if log_fp is not None:
log_fp.close()
return exit
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-' * 60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp
|
Bartzi/LabShare
|
refs/heads/master
|
labshare/wsgi.py
|
1
|
import os
from django.core.wsgi import get_wsgi_application
os.environ['DJANGO_SETTINGS_MODULE'] = 'labshare.settings'
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
# Apply WSGI middleware if necessary
application = get_wsgi_application()
|
miguelalba89/hfooad-python
|
refs/heads/master
|
ch4/sam_dogdoor.py
|
1
|
"""
Sam's dogdoor
Author: m1ge7
Date: 2014/04/12
"""
import time
from threading import Timer
class Bark:
def __init__(self, sound):
self._sound = sound
def get_sound(self):
return self._sound
def __eq__(self, bark):
return self._sound.lower() == bark.get_sound().lower()
class BarkRecognizer:
def __init__(self, door):
self._door = door
def recognize(self, bark):
print(" BarkRecognizer: Heard a '" + bark.get_sound() + "'")
if self._door.get_allowed_bark() == bark:
self._door.open()
else:
print("This dog is not allowed.")
class DogDoor:
def __init__(self):
self._open = False
self._allowed_bark = None
def set_allowed_bark(self, bark):
self._allowed_bark = bark
def get_allowed_bark(self):
return self._allowed_bark
def open(self):
print("The dog door opens.")
self._open = True
timer = Timer(5, lambda door: door.close(), [self])
timer.start()
def close(self):
print("The dog door closes.")
self._open = False
def is_open(self):
return self._open
class Remote:
def __init__(self, door):
self._door = door
def press_button(self):
print("Pressing the remote control button...")
if self._door.is_open():
self._door.close()
else:
self._door.open()
if __name__ == '__main__':
door = DogDoor()
door.set_allowed_bark(Bark("Rowlf"))
recognizer = BarkRecognizer(door)
remote = Remote(door)
# Simulate the hardware hearing a bark
print("Bruce starts barking.")
recognizer.recognize(Bark("Rowlf"))
print("\nBruce has gone outside...")
time.sleep(10)
print("\nBruce's all done...")
print("...but he's stuck outside!")
# Simulate the hardware hearing a bark (not Bruce!)
print("A small dog starts barking.")
recognizer.recognize(Bark("Yip"))
time.sleep(5)
# Simulate the hardware hearing a bark again
print("\nBruce starts barking.")
recognizer.recognize(Bark("Rowlf"))
print("\nBruce's back inside...")
|
4eek/configuration
|
refs/heads/master
|
tests/jinja_check.py
|
49
|
#!/usr/bin/env python
import os
import sys
from jinja2 import FileSystemLoader
from jinja2 import Environment as j
from jinja2.exceptions import UndefinedError
from ansible.utils.template import _get_filters, _get_extensions
from yaml.representer import RepresenterError
input_file = sys.argv[1]
if not os.path.exists(input_file):
print('{0}: deleted in diff'.format(input_file))
sys.exit(0)
# Setup jinja to include ansible filters
j_e = j(trim_blocks=True, extensions=_get_extensions())
j_e.loader = FileSystemLoader(['.', os.path.dirname(input_file)])
j_e.filters.update(_get_filters())
# Go ahead and catch errors for undefined variables and bad yaml
# from `to_nice_yaml` ansible filter
try:
j_e.from_string(file((input_file)).read()).render(func=lambda: None)
except (UndefinedError, RepresenterError), ex:
pass
except TypeError, ex:
if ex.message != 'Undefined is not JSON serializable':
raise Exception(ex.message)
pass
print('{}: ok'.format(input_file))
|
microdee/IronHydra
|
refs/heads/master
|
src/IronHydra/Lib/distutils/command/register.py
|
75
|
"""distutils.command.register
Implements the Distutils 'register' command (register with the repository).
"""
# created 2002/10/21, Richard Jones
__revision__ = "$Id$"
import urllib2
import getpass
import urlparse
import StringIO
from warnings import warn
from distutils.core import PyPIRCCommand
from distutils import log
class register(PyPIRCCommand):
description = ("register the distribution with the Python package index")
user_options = PyPIRCCommand.user_options + [
('list-classifiers', None,
'list the valid Trove classifiers'),
('strict', None ,
'Will stop the registering if the meta-data are not fully compliant')
]
boolean_options = PyPIRCCommand.boolean_options + [
'verify', 'list-classifiers', 'strict']
sub_commands = [('check', lambda self: True)]
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.list_classifiers = 0
self.strict = 0
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
# setting options for the `check` subcommand
check_options = {'strict': ('register', self.strict),
'restructuredtext': ('register', 1)}
self.distribution.command_options['check'] = check_options
def run(self):
self.finalize_options()
self._set_config()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.dry_run:
self.verify_metadata()
elif self.list_classifiers:
self.classifiers()
else:
self.send_metadata()
def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.register.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.strict = self.strict
check.restructuredtext = 1
check.run()
def _set_config(self):
''' Reads the configuration file and set attributes.
'''
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
self.has_config = True
else:
if self.repository not in ('pypi', self.DEFAULT_REPOSITORY):
raise ValueError('%s not found in .pypirc' % self.repository)
if self.repository == 'pypi':
self.repository = self.DEFAULT_REPOSITORY
self.has_config = False
def classifiers(self):
''' Fetch the list of classifiers from the server.
'''
response = urllib2.urlopen(self.repository+'?:action=list_classifiers')
log.info(response.read())
def verify_metadata(self):
''' Send the metadata to the package index server to be checked.
'''
# send the info to the server and report the result
(code, result) = self.post_to_server(self.build_post_data('verify'))
log.info('Server response (%s): %s' % (code, result))
def send_metadata(self):
''' Send the metadata to the package index server.
Well, do the following:
1. figure who the user is, and then
2. send the data as a Basic auth'ed POST.
First we try to read the username/password from $HOME/.pypirc,
which is a ConfigParser-formatted file with a section
[distutils] containing username and password entries (both
in clear text). Eg:
[distutils]
index-servers =
pypi
[pypi]
username: fred
password: sekrit
Otherwise, to figure who the user is, we offer the user three
choices:
1. use existing login,
2. register as a new user, or
3. set the password to a random string and email the user.
'''
# see if we can short-cut and get the username/password from the
# config
if self.has_config:
choice = '1'
username = self.username
password = self.password
else:
choice = 'x'
username = password = ''
# get the user's login info
choices = '1 2 3 4'.split()
while choice not in choices:
self.announce('''\
We need to know who you are, so please choose either:
1. use your existing login,
2. register as a new user,
3. have the server generate a new password for you (and email it to you), or
4. quit
Your selection [default 1]: ''', log.INFO)
choice = raw_input()
if not choice:
choice = '1'
elif choice not in choices:
print 'Please choose one of the four options!'
if choice == '1':
# get the username and password
while not username:
username = raw_input('Username: ')
while not password:
password = getpass.getpass('Password: ')
# set up the authentication
auth = urllib2.HTTPPasswordMgr()
host = urlparse.urlparse(self.repository)[1]
auth.add_password(self.realm, host, username, password)
# send the info to the server and report the result
code, result = self.post_to_server(self.build_post_data('submit'),
auth)
self.announce('Server response (%s): %s' % (code, result),
log.INFO)
# possibly save the login
if code == 200:
if self.has_config:
# sharing the password in the distribution instance
# so the upload command can reuse it
self.distribution.password = password
else:
self.announce(('I can store your PyPI login so future '
'submissions will be faster.'), log.INFO)
self.announce('(the login will be stored in %s)' % \
self._get_rc_file(), log.INFO)
choice = 'X'
while choice.lower() not in 'yn':
choice = raw_input('Save your login (y/N)?')
if not choice:
choice = 'n'
if choice.lower() == 'y':
self._store_pypirc(username, password)
elif choice == '2':
data = {':action': 'user'}
data['name'] = data['password'] = data['email'] = ''
data['confirm'] = None
while not data['name']:
data['name'] = raw_input('Username: ')
while data['password'] != data['confirm']:
while not data['password']:
data['password'] = getpass.getpass('Password: ')
while not data['confirm']:
data['confirm'] = getpass.getpass(' Confirm: ')
if data['password'] != data['confirm']:
data['password'] = ''
data['confirm'] = None
print "Password and confirm don't match!"
while not data['email']:
data['email'] = raw_input(' EMail: ')
code, result = self.post_to_server(data)
if code != 200:
log.info('Server response (%s): %s' % (code, result))
else:
log.info('You will receive an email shortly.')
log.info(('Follow the instructions in it to '
'complete registration.'))
elif choice == '3':
data = {':action': 'password_reset'}
data['email'] = ''
while not data['email']:
data['email'] = raw_input('Your email address: ')
code, result = self.post_to_server(data)
log.info('Server response (%s): %s' % (code, result))
def build_post_data(self, action):
# figure the data to send - the metadata plus some additional
# information used by the package server
meta = self.distribution.metadata
data = {
':action': action,
'metadata_version' : '1.0',
'name': meta.get_name(),
'version': meta.get_version(),
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
if data['provides'] or data['requires'] or data['obsoletes']:
data['metadata_version'] = '1.1'
return data
def post_to_server(self, data, auth=None):
''' Post a query to the server, and return a string response.
'''
if 'name' in data:
self.announce('Registering %s to %s' % (data['name'],
self.repository),
log.INFO)
# Build up the MIME payload for the urllib2 POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if type(value) not in (type([]), type( () )):
value = [value]
for value in value:
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
'Content-length': str(len(body))
}
req = urllib2.Request(self.repository, body, headers)
# handle HTTP and include the Basic Auth handler
opener = urllib2.build_opener(
urllib2.HTTPBasicAuthHandler(password_mgr=auth)
)
data = ''
try:
result = opener.open(req)
except urllib2.HTTPError, e:
if self.show_response:
data = e.fp.read()
result = e.code, e.msg
except urllib2.URLError, e:
result = 500, str(e)
else:
if self.show_response:
data = result.read()
result = 200, 'OK'
if self.show_response:
dashes = '-' * 75
self.announce('%s%s%s' % (dashes, data, dashes))
return result
|
CyanogenMod/android_kernel_asus_fugu
|
refs/heads/cm-12.0
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
|
4653
|
# EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
|
MemeticParadigm/TensorFlow
|
refs/heads/master
|
tensorflow/python/training/training_ops.py
|
5
|
"""Python wrappers for training ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.training import gen_training_ops
# pylint: disable=wildcard-import
from tensorflow.python.training.gen_training_ops import *
# pylint: enable=wildcard-import
# Shape functions for fused training ops
# --------------------------------------
#
# The fused training ops all have the same basic structure: they take
# one or more variables with the same shape, and emit a reference to
# the original variable (which has the same shape as the first
# input). In addition, they take one or more scalar tensors containing
# hyperparameters.
#
# The sparse ops take the gradients as a Python IndexedSlices, which
# means that the indices are a vector of length N, and the gradient
# values are a tensor whose size is the same as the original variable,
# except for the 0th dimension, which has size N.
def _AssertInputIsScalar(op, index):
"""Raises ValueError if `op.inputs[index]` is not scalar."""
op.inputs[index].get_shape().assert_is_compatible_with(tensor_shape.scalar())
@ops.RegisterShape("ApplyAdagrad")
def _ApplyAdagradShape(op):
"""Shape function for the ApplyAdagrad op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(accum_shape)
return [grad_shape]
@ops.RegisterShape("ApplyAdam")
def _ApplyAdamShape(op):
"""Shape function for the ApplyAdam op."""
var_shape = op.inputs[0].get_shape()
m_shape = op.inputs[1].get_shape().merge_with(var_shape)
v_shape = op.inputs[2].get_shape().merge_with(m_shape)
_AssertInputIsScalar(op, 3) # beta1_power
_AssertInputIsScalar(op, 4) # beta2_power
_AssertInputIsScalar(op, 5) # lr
_AssertInputIsScalar(op, 6) # beta1
_AssertInputIsScalar(op, 7) # beta2
_AssertInputIsScalar(op, 8) # epsilon
grad_shape = op.inputs[9].get_shape().merge_with(v_shape)
return [grad_shape]
@ops.RegisterShape("ApplyMomentum")
def _ApplyMomentumShape(op):
"""Shape function for the ApplyMomentum op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(accum_shape)
_AssertInputIsScalar(op, 4) # momentum
return [grad_shape]
@ops.RegisterShape("ApplyRMSProp")
def _ApplyRMSPropShape(op):
"""Shape function for the ApplyRMSProp op."""
var_shape = op.inputs[0].get_shape()
ms_shape = op.inputs[1].get_shape().merge_with(var_shape)
mom_shape = op.inputs[2].get_shape().merge_with(ms_shape)
_AssertInputIsScalar(op, 3) # lr
_AssertInputIsScalar(op, 4) # rho
_AssertInputIsScalar(op, 5) # momentum
_AssertInputIsScalar(op, 6) # epsilon
grad_shape = op.inputs[7].get_shape().merge_with(mom_shape)
return [grad_shape]
@ops.RegisterShape("ApplyGradientDescent")
def _ApplyGradientDescentShape(op):
"""Shape function for the ApplyGradientDescent op."""
var_shape = op.inputs[0].get_shape()
_AssertInputIsScalar(op, 1) # alpha
delta_shape = op.inputs[2].get_shape().merge_with(var_shape)
return [delta_shape]
@ops.RegisterShape("SparseApplyAdagrad")
def _SparseApplyAdagradShape(op):
"""Shape function for the SparseApplyAdagrad op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(accum_shape[1:]))
unused_indices_shape = op.inputs[4].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
return [accum_shape]
@ops.RegisterShape("SparseApplyMomentum")
def _SparseApplyMomentumShape(op):
"""Shape function for the SparseApplyMomentum op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(accum_shape[1:]))
unused_indices_shape = op.inputs[4].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
_AssertInputIsScalar(op, 5) # momentum
return [accum_shape]
|
sunlianqiang/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_pep247.py
|
84
|
"""
Test suite to check compilance with PEP 247, the standard API
for hashing algorithms
"""
import hmac
import unittest
from hashlib import md5, sha1, sha224, sha256, sha384, sha512
from test import support
class Pep247Test(unittest.TestCase):
def check_module(self, module, key=None):
self.assertTrue(hasattr(module, 'digest_size'))
self.assertTrue(module.digest_size is None or module.digest_size > 0)
self.check_object(module.new, module.digest_size, key)
def check_object(self, cls, digest_size, key, digestmod=None):
if key is not None:
if digestmod is None:
digestmod = md5
obj1 = cls(key, digestmod=digestmod)
obj2 = cls(key, b'string', digestmod=digestmod)
h1 = cls(key, b'string', digestmod=digestmod).digest()
obj3 = cls(key, digestmod=digestmod)
obj3.update(b'string')
h2 = obj3.digest()
else:
obj1 = cls()
obj2 = cls(b'string')
h1 = cls(b'string').digest()
obj3 = cls()
obj3.update(b'string')
h2 = obj3.digest()
self.assertEqual(h1, h2)
self.assertTrue(hasattr(obj1, 'digest_size'))
if digest_size is not None:
self.assertEqual(obj1.digest_size, digest_size)
self.assertEqual(obj1.digest_size, len(h1))
obj1.update(b'string')
obj_copy = obj1.copy()
self.assertEqual(obj1.digest(), obj_copy.digest())
self.assertEqual(obj1.hexdigest(), obj_copy.hexdigest())
digest, hexdigest = obj1.digest(), obj1.hexdigest()
hd2 = ""
for byte in digest:
hd2 += '%02x' % byte
self.assertEqual(hd2, hexdigest)
def test_md5(self):
self.check_object(md5, None, None)
def test_sha(self):
self.check_object(sha1, None, None)
self.check_object(sha224, None, None)
self.check_object(sha256, None, None)
self.check_object(sha384, None, None)
self.check_object(sha512, None, None)
def test_hmac(self):
self.check_module(hmac, key=b'abc')
def test_main():
support.run_unittest(Pep247Test)
if __name__ == '__main__':
test_main()
|
sharma1nitish/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/QueueStatusServer/model/workitems.py
|
140
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import db
from model.queuepropertymixin import QueuePropertyMixin
class WorkItems(db.Model, QueuePropertyMixin):
queue_name = db.StringProperty()
item_ids = db.ListProperty(int)
date = db.DateTimeProperty(auto_now_add=True)
@classmethod
def key_for_queue(cls, queue_name):
return "work-items-%s" % (queue_name)
@classmethod
def lookup_by_queue(cls, queue_name):
return cls.get_or_insert(key_name=cls.key_for_queue(queue_name), queue_name=queue_name)
def display_position_for_attachment(self, attachment_id):
"""Returns a 1-based index corresponding to the position
of the attachment_id in the queue. If the attachment is
not in this queue, this returns None"""
if attachment_id in self.item_ids:
return self.item_ids.index(attachment_id) + 1
return None
@staticmethod
def _unguarded_add(key, attachment_id):
work_items = db.get(key)
if attachment_id in work_items.item_ids:
return
work_items.item_ids.append(attachment_id)
work_items.put()
# Because this uses .key() self.is_saved() must be True or this will throw NotSavedError.
def add_work_item(self, attachment_id):
db.run_in_transaction(self._unguarded_add, self.key(), attachment_id)
@staticmethod
def _unguarded_remove(key, attachment_id):
work_items = db.get(key)
if attachment_id in work_items.item_ids:
# We should never have more than one entry for a work item, so we only need remove the first.
work_items.item_ids.remove(attachment_id)
work_items.put()
# Because this uses .key() self.is_saved() must be True or this will throw NotSavedError.
def remove_work_item(self, attachment_id):
db.run_in_transaction(self._unguarded_remove, self.key(), attachment_id)
|
opena11y/fae2
|
refs/heads/master
|
fae2/rules/tests.py
|
1
|
"""
Copyright 2014-2016 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: rules/tests.py
Author: Jon Gunderson
"""
from __future__ import absolute_import
from django.test import TestCase
# Create your tests here.
|
manipopopo/tensorflow
|
refs/heads/master
|
tensorflow/contrib/data/python/ops/threadpool.py
|
4
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling threading in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.data.python.ops import contrib_op_loader # pylint: disable=unused-import
from tensorflow.contrib.data.python.ops import gen_dataset_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.ops import resource_variable_ops
_uid_counter = 0
_uid_lock = threading.Lock()
def _generate_shared_name(prefix):
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "{}{}".format(prefix, uid)
# TODO(b/73383364): Properly export in the `tf.contrib.data` API when stable
# or make private / remove.
class PrivateThreadPool(object):
"""A stateful resource that represents a private thread pool."""
def __init__(self, num_threads, display_name=None,
max_intra_op_parallelism=1):
"""Creates a `PrivateThreadPool` with the given number of threads."""
if context.executing_eagerly():
shared_name = _generate_shared_name("privatethreadpool")
self._resource = gen_dataset_ops.thread_pool_handle(
num_threads=num_threads,
max_intra_op_parallelism=max_intra_op_parallelism,
display_name=display_name,
shared_name=shared_name)
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device=context.context().device_name)
else:
self._resource = gen_dataset_ops.thread_pool_handle(
num_threads=num_threads,
max_intra_op_parallelism=max_intra_op_parallelism,
display_name=display_name)
class _ThreadPoolDataset(dataset_ops.Dataset):
"""A `Dataset` that acts as an identity, and sets a custom threadpool."""
def __init__(self, input_dataset, thread_pool):
super(_ThreadPoolDataset, self).__init__()
self._input_dataset = input_dataset
self._thread_pool = thread_pool
def _as_variant_tensor(self):
return gen_dataset_ops.thread_pool_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._thread_pool._resource, # pylint: disable=protected-access
**dataset_ops.flat_structure(self))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
@property
def output_classes(self):
return self._input_dataset.output_classes
# TODO(b/73383364): Properly export in the `tf.contrib.data` API when stable
# or make private / remove.
def override_threadpool(dataset, thread_pool):
"""Returns a new dataset that uses the given thread pool for its operations.
Args:
dataset: A `tf.data.Dataset` object.
thread_pool: A `PrivateThreadPool` object.
Returns:
A dataset containing the same values as `dataset`, but which uses
`thread_pool` to compute any of its parallel operations (such as
`tf.data.Dataset.map`).
"""
return _ThreadPoolDataset(dataset, thread_pool)
|
ramezquitao/pyoptools
|
refs/heads/master
|
pyoptools/misc/cmisc/__init__.py
|
2
|
from .cmisc import (dot_test,
test_1,
test_2,
unwrap)
__all__ = ["dot_test",
"test_1",
"test_2",
"unwrap"]
|
zstyblik/infernal-twin
|
refs/heads/master
|
build/pip/pip/_vendor/html5lib/treewalkers/genshistream.py
|
1730
|
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
KJin99/zulip
|
refs/heads/master
|
docs/conf.py
|
121
|
# -*- coding: utf-8 -*-
#
# zulip-contributor-docs documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 17 16:24:04 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zulip'
copyright = u'2015, The Zulip Team'
author = u'The Zulip Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Read The Docs can't import sphinx_rtd_theme, so don't import it there.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'zulip-contributor-docsdoc'
def setup(app):
# overrides for wide tables in RTD theme
app.add_stylesheet('theme_overrides.css') # path relative to _static
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'zulip-contributor-docs.tex', u'Zulip Documentation',
u'The Zulip Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
author, 'zulip-contributor-docs', 'Documentation for contributing to Zulip.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
z1gm4/desarrollo_web_udp
|
refs/heads/dev
|
env/lib/python2.7/site-packages/faker/utils/__init__.py
|
20
|
# coding=utf-8
def is_string(var):
try:
return isinstance(var, basestring)
except NameError:
return isinstance(var, str)
def quote(var):
return ('"{0}"' if '"' not in var else "'{0}'").format(var)
|
imitrichev/cantera
|
refs/heads/master
|
interfaces/cython/cantera/examples/transport/dusty_gas.py
|
4
|
"""
Dusty Gas transport model.
The Dusty Gas model is a multicomponent transport model for gas transport
through the pores of a stationary porous medium. This example shows how to
create a transport manager that implements the Dusty Gas model and use it to
compute the multicomponent diffusion coefficients.
"""
import cantera as ct
# create a gas-phase object to represent the gas in the pores, with a
# dusty gas transport manager
g = ct.DustyGas('h2o2.cti')
# set the gas state
g.TPX = 500.0, ct.one_atm, "OH:1, H:2, O2:3, O:1.0E-8, H2:1.0E-8, H2O:1.0E-8, H2O2:1.0E-8, HO2:1.0E-8, AR:1.0E-8"
# set its parameters
g.porosity = 0.2
g.tortuosity = 4.0
g.mean_pore_radius = 1.5e-7
g.mean_particle_diameter = 1.5e-6 # lengths in meters
# print the multicomponent diffusion coefficients
print(g.multi_diff_coeffs)
# compute molar species fluxes
T1, rho1, Y1 = g.TDY
g.TP = g.T, 1.2 * ct.one_atm
T2, rho2, Y2 = g.TDY
delta = 0.001
print(g.molar_fluxes(T1, T1, rho1, rho1, Y1, Y1, delta))
print(g.molar_fluxes(T1, T2, rho1, rho2, Y1, Y2, delta))
|
Justin-Yuan/Image2Music-Generator
|
refs/heads/master
|
library/jython2.5.3/Lib/test/test_joverload.py
|
22
|
# test overloaded java methods dispatch logic in PyReflectedFunction
# needs to grow more tests. Uses javatests.JOverload as a bag of overloaded methods.
# (can be adapted to test alternative re-implemations even while they are developed
# write a *Envl class and change/add to to_test for that)
import sys
import unittest
import java
from java.util import ArrayList
from javatests import JOverload, Reflection
from org.python.core import PyReflectedFunction
class PyReflFuncEnvl:
def __init__(self,name,meths):
self.reflfunc = PyReflectedFunction(meths)
def __call__(self,inst,args):
return self.reflfunc(inst,*args)
def extract_ov_meths(jcl,envl_class):
meths = java.lang.Class.getDeclaredMethods(jcl)
names = [ m.name for m in meths]
meth_dict = {}
for name in names:
if name.startswith('ov_') and not meth_dict.has_key(name):
meth_dict[name] = envl_class(name,[ m for m in meths if m.name == name ])
return meth_dict
jo = JOverload()
to_test = [extract_ov_meths(JOverload,PyReflFuncEnvl)]
class OverloadedDispatchTests(unittest.TestCase):
def check(self,lbl,rng,args,expected):
expected = expected.split()
for meth_dict in to_test:
for i,expect in zip(rng,expected):
self.assertEqual(meth_dict['ov_%s%s' % (lbl,i)](jo,args),expect)
def test_posprec(self):
self.check('posprec',[1,2],[0,0],
"(int,long) (long,int)")
def test_scal_int_zero(self):
self.check('scal',xrange(1,15),[0],
"""
(long)
(int)
(short)
(byte)
(byte)
(double)
(float)
(boolean)
(java.io.Serializable)
(java.io.Serializable)
(java.io.Serializable)
(java.io.Serializable)
(java.io.Serializable)
(java.lang.Object)
""")
def test_scal_string(self):
self.check('scal',xrange(1,15),['str'],
"""
(java.lang.String)
(java.lang.String)
(java.lang.String)
(java.lang.String)
(java.lang.String)
(java.lang.String)
(java.lang.String)
(java.lang.String)
(java.lang.String)
(java.io.Serializable)
(java.io.Serializable)
(java.io.Serializable)
(java.io.Serializable)
(java.lang.Object)
""")
def test_scal_char(self):
self.check('scal',xrange(1,15),['c'],
"""
(char)
(char)
(char)
(char)
(java.lang.String)
(java.lang.String)
(java.lang.String)
(java.lang.String)
(java.lang.String)
(java.io.Serializable)
(java.io.Serializable)
(java.io.Serializable)
(java.io.Serializable)
(java.lang.Object)
""")
def test_scal_float_one(self):
self.check('scal',xrange(1,15),[1.0],
"""
(double)
(double)
(double)
(double)
(double)
(double)
(float)
(java.io.Serializable)
(java.io.Serializable)
(java.io.Serializable)
(java.io.Serializable)
(java.io.Serializable)
(java.io.Serializable)
(java.lang.Object)
""")
class VarargsDispatchTests(unittest.TestCase):
def test_strings(self):
t = Reflection.StringVarargs()
self.assertEqual(t.test("abc", "xyz"),
"String...:[abc, xyz]")
self.assertEqual(t.test("abc"),
"String...:[abc]")
self.assertEqual(t.test(),
"String...:[]")
self.assertEqual(t.test(["abc", "xyz"]),
"String...:[abc, xyz]")
self.assertEqual(t.test(["abc"]),
"String...:[abc]")
self.assertEqual(t.test([]),
"String...:[]")
def test_lists(self):
t = Reflection.ListVarargs()
self.assertEqual(t.test(ArrayList([1,2,3]), ArrayList([4,5,6])),
"List...:[[1, 2, 3], [4, 5, 6]]")
self.assertEqual(t.test(ArrayList([1,2,3])),
"List...:[[1, 2, 3]]")
self.assertEqual(t.test(),
"List...:[]")
self.assertEqual(t.test([ArrayList([1,2,3]), ArrayList([4,5,6])]),
"List...:[[1, 2, 3], [4, 5, 6]]")
self.assertEqual(t.test([ArrayList([1,2,3])]),
"List...:[[1, 2, 3]]")
self.assertEqual(t.test([]),
"List...:[]")
class ComplexOverloadingTests(unittest.TestCase):
def test_complex(self):
o = Reflection.Overloaded()
self.assertEqual(o(2.), "class java.lang.Double=2.0")
self.assertEqual(o(1+2j), "class org.python.core.PyComplex=(1+2j)")
def printout(meth_dict,lbl,rng,args):
for i in rng:
print meth_dict['ov_%s%s' % (lbl,i)](jo,args)
if __name__ == '__main__' and not sys.argv[1:] == ['break-out']:
try:
import test_support
except ImportError:
unittest.main()
else:
test_support.run_unittest(OverloadedDispatchTests, VarargsDispatchTests, ComplexOverloadingTests)
|
jk1/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/referenceToClassWithNewInMovedSymbol/after/src/classFile.py
|
62
|
from collections import namedtuple
class Pipeline(namedtuple('_Pipeline', 'name')):
def __new__(cls, name):
return super(Pipeline, cls).__new__(cls, name)
def __init__(self, name):
pass
|
jeanlinux/calibre
|
refs/heads/master
|
src/regex/_regex_core.py
|
13
|
#
# Secret Labs' Regular Expression Engine core module
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
# 2010-01-16 mrab Python front-end re-written and extended
import string
import unicodedata
from collections import defaultdict
from calibre.constants import plugins
_regex = plugins['_regex'][0]
if _regex is None:
raise RuntimeError('Failed to load regex module with error: ' + plugins['_regex'][1])
__all__ = ["A", "ASCII", "B", "BESTMATCH", "D", "DEBUG", "E", "ENHANCEMATCH",
"F", "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "R",
"REVERSE", "S", "DOTALL", "T", "TEMPLATE", "U", "UNICODE", "V0", "VERSION0",
"V1", "VERSION1", "W", "WORD", "X", "VERBOSE", "error",
"Scanner"]
# The regex exception.
class error(Exception):
def __init__(self, message, pattern=None, pos=None):
newline = u'\n' if isinstance(pattern, unicode) else '\n'
self.msg = message
self.pattern = pattern
self.pos = pos
if pattern is not None and pos is not None:
self.lineno = pattern.count(newline, 0, pos) + 1
self.colno = pos - pattern.rfind(newline, 0, pos)
message = "%s at position %d" % (message, pos)
if newline in pattern:
message += " (line %d, column %d)" % (self.lineno, self.colno)
Exception.__init__(self, message)
# The exception for when a positional flag has been turned on in the old
# behaviour.
class _UnscopedFlagSet(Exception):
pass
# The exception for when parsing fails and we want to try something else.
class ParseError(Exception):
pass
# The exception for when there isn't a valid first set.
class _FirstSetError(Exception):
pass
# Flags.
A = ASCII = 0x80 # Assume ASCII locale.
B = BESTMATCH = 0x1000 # Best fuzzy match.
D = DEBUG = 0x200 # Print parsed pattern.
E = ENHANCEMATCH = 0x8000 # Attempt to improve the fit after finding the first
# fuzzy match.
F = FULLCASE = 0x4000 # Unicode full case-folding.
I = IGNORECASE = 0x2 # Ignore case.
L = LOCALE = 0x4 # Assume current 8-bit locale.
M = MULTILINE = 0x8 # Make anchors look for newline.
R = REVERSE = 0x400 # Search backwards.
S = DOTALL = 0x10 # Make dot match newline.
U = UNICODE = 0x20 # Assume Unicode locale.
V0 = VERSION0 = 0x2000 # Old legacy behaviour.
V1 = VERSION1 = 0x100 # New enhanced behaviour.
W = WORD = 0x800 # Default Unicode word breaks.
X = VERBOSE = 0x40 # Ignore whitespace and comments.
T = TEMPLATE = 0x1 # Template (present because re module has it).
DEFAULT_VERSION = VERSION1
_ALL_VERSIONS = VERSION0 | VERSION1
_ALL_ENCODINGS = ASCII | LOCALE | UNICODE
# The default flags for the various versions.
DEFAULT_FLAGS = {VERSION0: 0, VERSION1: FULLCASE}
# The mask for the flags.
GLOBAL_FLAGS = (_ALL_ENCODINGS | _ALL_VERSIONS | BESTMATCH | DEBUG |
ENHANCEMATCH | REVERSE)
SCOPED_FLAGS = FULLCASE | IGNORECASE | MULTILINE | DOTALL | WORD | VERBOSE
ALPHA = frozenset(string.ascii_letters)
DIGITS = frozenset(string.digits)
ALNUM = ALPHA | DIGITS
OCT_DIGITS = frozenset(string.octdigits)
HEX_DIGITS = frozenset(string.hexdigits)
SPECIAL_CHARS = frozenset("()|?*+{^$.[\\#") | frozenset([""])
NAMED_CHAR_PART = ALNUM | frozenset(" -")
PROPERTY_NAME_PART = ALNUM | frozenset(" &_-.")
SET_OPS = ("||", "~~", "&&", "--")
# The width of the code words inside the regex engine.
BYTES_PER_CODE = _regex.get_code_size()
BITS_PER_CODE = BYTES_PER_CODE * 8
# The repeat count which represents infinity.
UNLIMITED = (1 << BITS_PER_CODE) - 1
# The regular expression flags.
REGEX_FLAGS = {"a": ASCII, "b": BESTMATCH, "e": ENHANCEMATCH, "f": FULLCASE,
"i": IGNORECASE, "L": LOCALE, "m": MULTILINE, "r": REVERSE, "s": DOTALL, "u":
UNICODE, "V0": VERSION0, "V1": VERSION1, "w": WORD, "x": VERBOSE}
# The case flags.
CASE_FLAGS = FULLCASE | IGNORECASE
NOCASE = 0
FULLIGNORECASE = FULLCASE | IGNORECASE
FULL_CASE_FOLDING = UNICODE | FULLIGNORECASE
# The number of digits in hexadecimal escapes.
HEX_ESCAPES = {"x": 2, "u": 4, "U": 8}
# A singleton which indicates a comment within a pattern.
COMMENT = object()
FLAGS = object()
# The names of the opcodes.
OPCODES = """
FAILURE
SUCCESS
ANY
ANY_ALL
ANY_ALL_REV
ANY_REV
ANY_U
ANY_U_REV
ATOMIC
BOUNDARY
BRANCH
CALL_REF
CHARACTER
CHARACTER_IGN
CHARACTER_IGN_REV
CHARACTER_REV
DEFAULT_BOUNDARY
DEFAULT_END_OF_WORD
DEFAULT_START_OF_WORD
END
END_OF_LINE
END_OF_LINE_U
END_OF_STRING
END_OF_STRING_LINE
END_OF_STRING_LINE_U
END_OF_WORD
FUZZY
GRAPHEME_BOUNDARY
GREEDY_REPEAT
GROUP
GROUP_CALL
GROUP_EXISTS
LAZY_REPEAT
LOOKAROUND
NEXT
PROPERTY
PROPERTY_IGN
PROPERTY_IGN_REV
PROPERTY_REV
RANGE
RANGE_IGN
RANGE_IGN_REV
RANGE_REV
REF_GROUP
REF_GROUP_FLD
REF_GROUP_FLD_REV
REF_GROUP_IGN
REF_GROUP_IGN_REV
REF_GROUP_REV
SEARCH_ANCHOR
SET_DIFF
SET_DIFF_IGN
SET_DIFF_IGN_REV
SET_DIFF_REV
SET_INTER
SET_INTER_IGN
SET_INTER_IGN_REV
SET_INTER_REV
SET_SYM_DIFF
SET_SYM_DIFF_IGN
SET_SYM_DIFF_IGN_REV
SET_SYM_DIFF_REV
SET_UNION
SET_UNION_IGN
SET_UNION_IGN_REV
SET_UNION_REV
START_OF_LINE
START_OF_LINE_U
START_OF_STRING
START_OF_WORD
STRING
STRING_FLD
STRING_FLD_REV
STRING_IGN
STRING_IGN_REV
STRING_REV
STRING_SET
STRING_SET_FLD
STRING_SET_FLD_REV
STRING_SET_IGN
STRING_SET_IGN_REV
STRING_SET_REV
"""
# Define the opcodes in a namespace.
class Namespace(object):
pass
OP = Namespace()
for i, op in enumerate(OPCODES.split()):
setattr(OP, op, i)
def _shrink_cache(cache_dict, args_dict, locale_sensitive, max_length, divisor=5):
"""Make room in the given cache.
Args:
cache_dict: The cache dictionary to modify.
args_dict: The dictionary of named list args used by patterns.
max_length: Maximum # of entries in cache_dict before it is shrunk.
divisor: Cache will shrink to max_length - 1/divisor*max_length items.
"""
# Toss out a fraction of the entries at random to make room for new ones.
# A random algorithm was chosen as opposed to simply cache_dict.popitem()
# as popitem could penalize the same regular expression repeatedly based
# on its internal hash value. Being random should spread the cache miss
# love around.
cache_keys = tuple(cache_dict.keys())
overage = len(cache_keys) - max_length
if overage < 0:
# Cache is already within limits. Normally this should not happen
# but it could due to multithreading.
return
number_to_toss = max_length // divisor + overage
# The import is done here to avoid a circular dependency.
import random
if not hasattr(random, 'sample'):
# Do nothing while resolving the circular dependency:
# re->random->warnings->tokenize->string->re
return
for doomed_key in random.sample(cache_keys, number_to_toss):
try:
del cache_dict[doomed_key]
except KeyError:
# Ignore problems if the cache changed from another thread.
pass
# Rebuild the arguments and locale-sensitivity dictionaries.
args_dict.clear()
sensitivity_dict = {}
for pattern, pattern_type, flags, args, default_version, locale in tuple(cache_dict):
args_dict[pattern, pattern_type, flags, default_version, locale] = args
try:
sensitivity_dict[pattern_type, pattern] = locale_sensitive[pattern_type, pattern]
except KeyError:
pass
locale_sensitive.clear()
locale_sensitive.update(sensitivity_dict)
def _fold_case(info, string):
"Folds the case of a string."
flags = info.flags
if (flags & _ALL_ENCODINGS) == 0:
flags |= info.guess_encoding
return _regex.fold_case(flags, string)
def is_cased(info, char):
"Checks whether a character is cased."
return len(_regex.get_all_cases(info.flags, char)) > 1
def _compile_firstset(info, fs):
"Compiles the firstset for the pattern."
if not fs or None in fs:
return []
# If we ignore the case, for simplicity we won't build a firstset.
members = set()
for i in fs:
if isinstance(i, Character) and not i.positive:
return []
if i.case_flags:
if isinstance(i, Character):
if is_cased(info, i.value):
return []
elif isinstance(i, SetBase):
return []
members.add(i.with_flags(case_flags=NOCASE))
# Build the firstset.
fs = SetUnion(info, list(members), zerowidth=True)
fs = fs.optimise(info, in_set=True)
# Compile the firstset.
return fs.compile(bool(info.flags & REVERSE))
def _flatten_code(code):
"Flattens the code from a list of tuples."
flat_code = []
for c in code:
flat_code.extend(c)
return flat_code
def make_character(info, value, in_set=False):
"Makes a character literal."
if in_set:
# A character set is built case-sensitively.
return Character(value)
return Character(value, case_flags=info.flags & CASE_FLAGS)
def make_ref_group(info, name, position):
"Makes a group reference."
return RefGroup(info, name, position, case_flags=info.flags & CASE_FLAGS)
def make_string_set(info, name):
"Makes a string set."
return StringSet(info, name, case_flags=info.flags & CASE_FLAGS)
def make_property(info, prop, in_set):
"Makes a property."
if in_set:
return prop
return prop.with_flags(case_flags=info.flags & CASE_FLAGS)
def _parse_pattern(source, info):
"Parses a pattern, eg. 'a|b|c'."
branches = [parse_sequence(source, info)]
while source.match("|"):
branches.append(parse_sequence(source, info))
if len(branches) == 1:
return branches[0]
return Branch(branches)
def parse_sequence(source, info):
"Parses a sequence, eg. 'abc'."
sequence = []
applied = False
while True:
# Get literal characters followed by an element.
characters, case_flags, element = parse_literal_and_element(source,
info)
if not element:
# No element, just a literal. We've also reached the end of the
# sequence.
append_literal(characters, case_flags, sequence)
break
if element is COMMENT or element is FLAGS:
append_literal(characters, case_flags, sequence)
elif type(element) is tuple:
# It looks like we've found a quantifier.
ch, saved_pos = element
counts = parse_quantifier(source, info, ch)
if counts:
# It _is_ a quantifier.
apply_quantifier(source, info, counts, characters, case_flags,
ch, saved_pos, applied, sequence)
applied = True
else:
# It's not a quantifier. Maybe it's a fuzzy constraint.
constraints = parse_fuzzy(source, ch)
if constraints:
# It _is_ a fuzzy constraint.
apply_constraint(source, info, constraints, characters,
case_flags, saved_pos, applied, sequence)
applied = True
else:
# The element was just a literal.
characters.append(ord(ch))
append_literal(characters, case_flags, sequence)
applied = False
else:
# We have a literal followed by something else.
append_literal(characters, case_flags, sequence)
sequence.append(element)
applied = False
return make_sequence(sequence)
def apply_quantifier(source, info, counts, characters, case_flags, ch,
saved_pos, applied, sequence):
if characters:
# The quantifier applies to the last character.
append_literal(characters[ : -1], case_flags, sequence)
element = Character(characters[-1], case_flags=case_flags)
else:
# The quantifier applies to the last item in the sequence.
if applied:
raise error("multiple repeat", source.string, saved_pos)
if not sequence:
raise error("nothing to repeat", source.string, saved_pos)
element = sequence.pop()
min_count, max_count = counts
saved_pos = source.pos
ch = source.get()
if ch == "?":
# The "?" suffix that means it's a lazy repeat.
repeated = LazyRepeat
elif ch == "+":
# The "+" suffix that means it's a possessive repeat.
repeated = PossessiveRepeat
else:
# No suffix means that it's a greedy repeat.
source.pos = saved_pos
repeated = GreedyRepeat
# Ignore the quantifier if it applies to a zero-width item or the number of
# repeats is fixed at 1.
if not element.is_empty() and (min_count != 1 or max_count != 1):
element = repeated(element, min_count, max_count)
sequence.append(element)
def apply_constraint(source, info, constraints, characters, case_flags,
saved_pos, applied, sequence):
if characters:
# The constraint applies to the last character.
append_literal(characters[ : -1], case_flags, sequence)
element = Character(characters[-1], case_flags=case_flags)
sequence.append(Fuzzy(element, constraints))
else:
# The constraint applies to the last item in the sequence.
if applied or not sequence:
raise error("nothing for fuzzy constraint", source.string,
saved_pos)
element = sequence.pop()
# If a group is marked as fuzzy then put all of the fuzzy part in the
# group.
if isinstance(element, Group):
element.subpattern = Fuzzy(element.subpattern, constraints)
sequence.append(element)
else:
sequence.append(Fuzzy(element, constraints))
def append_literal(characters, case_flags, sequence):
if characters:
sequence.append(Literal(characters, case_flags=case_flags))
def PossessiveRepeat(element, min_count, max_count):
"Builds a possessive repeat."
return Atomic(GreedyRepeat(element, min_count, max_count))
_QUANTIFIERS = {"?": (0, 1), "*": (0, None), "+": (1, None)}
def parse_quantifier(source, info, ch):
"Parses a quantifier."
q = _QUANTIFIERS.get(ch)
if q:
# It's a quantifier.
return q
if ch == "{":
# Looks like a limited repeated element, eg. 'a{2,3}'.
counts = parse_limited_quantifier(source)
if counts:
return counts
return None
def is_above_limit(count):
"Checks whether a count is above the maximum."
return count is not None and count >= UNLIMITED
def parse_limited_quantifier(source):
"Parses a limited quantifier."
saved_pos = source.pos
min_count = parse_count(source)
if source.match(","):
max_count = parse_count(source)
# No minimum means 0 and no maximum means unlimited.
min_count = int(min_count or 0)
max_count = int(max_count) if max_count else None
if max_count is not None and min_count > max_count:
raise error("min repeat greater than max repeat", source.string,
saved_pos)
else:
if not min_count:
source.pos = saved_pos
return None
min_count = max_count = int(min_count)
if is_above_limit(min_count) or is_above_limit(max_count):
raise error("repeat count too big", source.string, saved_pos)
if not source.match ("}"):
source.pos = saved_pos
return None
return min_count, max_count
def parse_fuzzy(source, ch):
"Parses a fuzzy setting, if present."
if ch != "{":
return None
saved_pos = source.pos
constraints = {}
try:
parse_fuzzy_item(source, constraints)
while source.match(","):
parse_fuzzy_item(source, constraints)
except ParseError:
source.pos = saved_pos
return None
if not source.match("}"):
raise error("expected }", source.string, source.pos)
return constraints
def parse_fuzzy_item(source, constraints):
"Parses a fuzzy setting item."
saved_pos = source.pos
try:
parse_cost_constraint(source, constraints)
except ParseError:
source.pos = saved_pos
parse_cost_equation(source, constraints)
def parse_cost_constraint(source, constraints):
"Parses a cost constraint."
saved_pos = source.pos
ch = source.get()
if ch in ALPHA:
# Syntax: constraint [("<=" | "<") cost]
constraint = parse_constraint(source, constraints, ch)
max_inc = parse_fuzzy_compare(source)
if max_inc is None:
# No maximum cost.
constraints[constraint] = 0, None
else:
# There's a maximum cost.
cost_pos = source.pos
max_cost = int(parse_count(source))
# Inclusive or exclusive limit?
if not max_inc:
max_cost -= 1
if max_cost < 0:
raise error("bad fuzzy cost limit", source.string, cost_pos)
constraints[constraint] = 0, max_cost
elif ch in DIGITS:
# Syntax: cost ("<=" | "<") constraint ("<=" | "<") cost
source.pos = saved_pos
try:
# Minimum cost.
min_cost = int(parse_count(source))
min_inc = parse_fuzzy_compare(source)
if min_inc is None:
raise ParseError()
constraint = parse_constraint(source, constraints, source.get())
max_inc = parse_fuzzy_compare(source)
if max_inc is None:
raise ParseError()
# Maximum cost.
cost_pos = source.pos
max_cost = int(parse_count(source))
# Inclusive or exclusive limits?
if not min_inc:
min_cost += 1
if not max_inc:
max_cost -= 1
if not 0 <= min_cost <= max_cost:
raise error("bad fuzzy cost limit", source.string, cost_pos)
constraints[constraint] = min_cost, max_cost
except ValueError:
raise ParseError()
else:
raise ParseError()
def parse_constraint(source, constraints, ch):
"Parses a constraint."
if ch not in "deis":
raise error("bad fuzzy constraint", source.string, source.pos)
if ch in constraints:
raise error("repeated fuzzy constraint", source.string, source.pos)
return ch
def parse_fuzzy_compare(source):
"Parses a cost comparator."
if source.match("<="):
return True
elif source.match("<"):
return False
else:
return None
def parse_cost_equation(source, constraints):
"Parses a cost equation."
if "cost" in constraints:
raise error("more than one cost equation", source.string, source.pos)
cost = {}
parse_cost_term(source, cost)
while source.match("+"):
parse_cost_term(source, cost)
max_inc = parse_fuzzy_compare(source)
if max_inc is None:
raise error("missing fuzzy cost limit", source.string, source.pos)
max_cost = int(parse_count(source))
if not max_inc:
max_cost -= 1
if max_cost < 0:
raise error("bad fuzzy cost limit", source.string, source.pos)
cost["max"] = max_cost
constraints["cost"] = cost
def parse_cost_term(source, cost):
"Parses a cost equation term."
coeff = parse_count(source)
ch = source.get()
if ch not in "dis":
raise ParseError()
if ch in cost:
raise error("repeated fuzzy cost", source.string, source.pos)
cost[ch] = int(coeff or 1)
def parse_count(source):
"Parses a quantifier's count, which can be empty."
return source.get_while(DIGITS)
def parse_literal_and_element(source, info):
"""Parses a literal followed by an element. The element is FLAGS if it's an
inline flag or None if it has reached the end of a sequence.
"""
characters = []
case_flags = info.flags & CASE_FLAGS
while True:
saved_pos = source.pos
ch = source.get()
if ch in SPECIAL_CHARS:
if ch in ")|":
# The end of a sequence. At the end of the pattern ch is "".
source.pos = saved_pos
return characters, case_flags, None
elif ch == "\\":
# An escape sequence outside a set.
element = parse_escape(source, info, False)
return characters, case_flags, element
elif ch == "(":
# A parenthesised subpattern or a flag.
element = parse_paren(source, info)
if element and element is not COMMENT:
return characters, case_flags, element
elif ch == ".":
# Any character.
if info.flags & DOTALL:
element = AnyAll()
elif info.flags & WORD:
element = AnyU()
else:
element = Any()
return characters, case_flags, element
elif ch == "[":
# A character set.
element = parse_set(source, info)
return characters, case_flags, element
elif ch == "^":
# The start of a line or the string.
if info.flags & MULTILINE:
if info.flags & WORD:
element = StartOfLineU()
else:
element = StartOfLine()
else:
element = StartOfString()
return characters, case_flags, element
elif ch == "$":
# The end of a line or the string.
if info.flags & MULTILINE:
if info.flags & WORD:
element = EndOfLineU()
else:
element = EndOfLine()
else:
if info.flags & WORD:
element = EndOfStringLineU()
else:
element = EndOfStringLine()
return characters, case_flags, element
elif ch in "?*+{":
# Looks like a quantifier.
return characters, case_flags, (ch, saved_pos)
else:
# A literal.
characters.append(ord(ch))
else:
# A literal.
characters.append(ord(ch))
def parse_paren(source, info):
"""Parses a parenthesised subpattern or a flag. Returns FLAGS if it's an
inline flag.
"""
saved_pos = source.pos
ch = source.get()
if ch == "?":
# (?...
saved_pos_2 = source.pos
ch = source.get()
if ch == "<":
# (?<...
saved_pos_3 = source.pos
ch = source.get()
if ch in ("=", "!"):
# (?<=... or (?<!...: lookbehind.
return parse_lookaround(source, info, True, ch == "=")
# (?<...: a named capture group.
source.pos = saved_pos_3
name = parse_name(source)
group = info.open_group(name)
source.expect(">")
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
info.close_group()
return Group(info, group, subpattern)
if ch in ("=", "!"):
# (?=... or (?!...: lookahead.
return parse_lookaround(source, info, False, ch == "=")
if ch == "P":
# (?P...: a Python extension.
return parse_extension(source, info)
if ch == "#":
# (?#...: a comment.
return parse_comment(source)
if ch == "(":
# (?(...: a conditional subpattern.
return parse_conditional(source, info)
if ch == ">":
# (?>...: an atomic subpattern.
return parse_atomic(source, info)
if ch == "|":
# (?|...: a common/reset groups branch.
return parse_common(source, info)
if ch == "R" or "0" <= ch <= "9":
# (?R...: probably a call to a group.
return parse_call_group(source, info, ch, saved_pos_2)
if ch == "&":
# (?&...: a call to a named group.
return parse_call_named_group(source, info, saved_pos_2)
# (?...: probably a flags subpattern.
source.pos = saved_pos_2
return parse_flags_subpattern(source, info)
# (...: an unnamed capture group.
source.pos = saved_pos
group = info.open_group()
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
info.close_group()
return Group(info, group, subpattern)
def parse_extension(source, info):
"Parses a Python extension."
saved_pos = source.pos
ch = source.get()
if ch == "<":
# (?P<...: a named capture group.
name = parse_name(source)
group = info.open_group(name)
source.expect(">")
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
info.close_group()
return Group(info, group, subpattern)
if ch == "=":
# (?P=...: a named group reference.
name = parse_name(source, allow_numeric=True)
source.expect(")")
if info.is_open_group(name):
raise error("cannot refer to an open group", source.string,
saved_pos)
return make_ref_group(info, name, saved_pos)
if ch == ">" or ch == "&":
# (?P>...: a call to a group.
return parse_call_named_group(source, info, saved_pos)
source.pos = saved_pos
raise error("unknown extension", source.string, saved_pos)
def parse_comment(source):
"Parses a comment."
source.skip_while(set(")"), include=False)
source.expect(")")
return COMMENT
def parse_lookaround(source, info, behind, positive):
"Parses a lookaround."
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
return LookAround(behind, positive, subpattern)
def parse_conditional(source, info):
"Parses a conditional subpattern."
saved_flags = info.flags
saved_pos = source.pos
try:
group = parse_name(source, True)
source.expect(")")
yes_branch = parse_sequence(source, info)
if source.match("|"):
no_branch = parse_sequence(source, info)
else:
no_branch = Sequence()
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
if yes_branch.is_empty() and no_branch.is_empty():
return Sequence()
return Conditional(info, group, yes_branch, no_branch, saved_pos)
def parse_atomic(source, info):
"Parses an atomic subpattern."
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
return Atomic(subpattern)
def parse_common(source, info):
"Parses a common groups branch."
# Capture group numbers in different branches can reuse the group numbers.
initial_group_count = info.group_count
branches = [parse_sequence(source, info)]
final_group_count = info.group_count
while source.match("|"):
info.group_count = initial_group_count
branches.append(parse_sequence(source, info))
final_group_count = max(final_group_count, info.group_count)
info.group_count = final_group_count
source.expect(")")
if len(branches) == 1:
return branches[0]
return Branch(branches)
def parse_call_group(source, info, ch, pos):
"Parses a call to a group."
if ch == "R":
group = "0"
else:
group = ch + source.get_while(DIGITS)
source.expect(")")
return CallGroup(info, group, pos)
def parse_call_named_group(source, info, pos):
"Parses a call to a named group."
group = parse_name(source)
source.expect(")")
return CallGroup(info, group, pos)
def parse_flag_set(source):
"Parses a set of inline flags."
flags = 0
try:
while True:
saved_pos = source.pos
ch = source.get()
if ch == "V":
ch += source.get()
flags |= REGEX_FLAGS[ch]
except KeyError:
source.pos = saved_pos
return flags
def parse_flags(source, info):
"Parses flags being turned on/off."
flags_on = parse_flag_set(source)
if source.match("-"):
flags_off = parse_flag_set(source)
if not flags_off:
raise error("bad inline flags: no flags after '-'", source.string,
source.pos)
else:
flags_off = 0
if flags_on & LOCALE:
# Remember that this pattern as an inline locale flag.
info.inline_locale = True
return flags_on, flags_off
def parse_subpattern(source, info, flags_on, flags_off):
"Parses a subpattern with scoped flags."
saved_flags = info.flags
info.flags = (info.flags | flags_on) & ~flags_off
source.ignore_space = bool(info.flags & VERBOSE)
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
return subpattern
def parse_flags_subpattern(source, info):
"""Parses a flags subpattern. It could be inline flags or a subpattern
possibly with local flags. If it's a subpattern, then that's returned;
if it's a inline flags, then FLAGS is returned.
"""
flags_on, flags_off = parse_flags(source, info)
if flags_off & GLOBAL_FLAGS:
raise error("bad inline flags: cannot turn off global flag",
source.string, source.pos)
if flags_on & flags_off:
raise error("bad inline flags: flag turned on and off", source.string,
source.pos)
# Handle flags which are global in all regex behaviours.
new_global_flags = (flags_on & ~info.global_flags) & GLOBAL_FLAGS
if new_global_flags:
info.global_flags |= new_global_flags
# A global has been turned on, so reparse the pattern.
raise _UnscopedFlagSet(info.global_flags)
# Ensure that from now on we have only scoped flags.
flags_on &= ~GLOBAL_FLAGS
if source.match(":"):
return parse_subpattern(source, info, flags_on, flags_off)
if source.match(")"):
parse_positional_flags(source, info, flags_on, flags_off)
return FLAGS
raise error("unknown extension", source.string, source.pos)
def parse_positional_flags(source, info, flags_on, flags_off):
"Parses positional flags."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version == VERSION0:
# Positional flags are global and can only be turned on.
if flags_off:
raise error("bad inline flags: cannot turn flags off",
source.string, source.pos)
new_global_flags = flags_on & ~info.global_flags
if new_global_flags:
info.global_flags |= new_global_flags
# A global has been turned on, so reparse the pattern.
raise _UnscopedFlagSet(info.global_flags)
else:
info.flags = (info.flags | flags_on) & ~flags_off
source.ignore_space = bool(info.flags & VERBOSE)
def parse_name(source, allow_numeric=False, allow_group_0=False):
"Parses a name."
name = source.get_while(set(")>"), include=False)
if not name:
raise error("missing group name", source.string, source.pos)
if name.isdigit():
min_group = 0 if allow_group_0 else 1
if not allow_numeric or int(name) < min_group:
raise error("bad character in group name", source.string,
source.pos)
else:
if not is_identifier(name):
raise error("bad character in group name", source.string,
source.pos)
return name
def is_identifier(name):
if not name:
return False
if name[0] not in ALPHA and name[0] != "_":
return False
name = name.replace("_", "")
return not name or all(c in ALNUM for c in name)
def is_octal(string):
"Checks whether a string is octal."
return all(ch in OCT_DIGITS for ch in string)
def is_decimal(string):
"Checks whether a string is decimal."
return all(ch in DIGITS for ch in string)
def is_hexadecimal(string):
"Checks whether a string is hexadecimal."
return all(ch in HEX_DIGITS for ch in string)
def parse_escape(source, info, in_set):
"Parses an escape sequence."
saved_ignore = source.ignore_space
source.ignore_space = False
ch = source.get()
source.ignore_space = saved_ignore
if not ch:
# A backslash at the end of the pattern.
raise error("bad escape (end of pattern)", source.string, source.pos)
if ch in HEX_ESCAPES:
# A hexadecimal escape sequence.
return parse_hex_escape(source, info, HEX_ESCAPES[ch], in_set, ch)
elif ch == "g" and not in_set:
# A group reference.
saved_pos = source.pos
try:
return parse_group_ref(source, info)
except error:
# Invalid as a group reference, so assume it's a literal.
source.pos = saved_pos
return make_character(info, ord(ch), in_set)
elif ch == "G" and not in_set:
# A search anchor.
return SearchAnchor()
elif ch == "L" and not in_set:
# A string set.
return parse_string_set(source, info)
elif ch == "N":
# A named codepoint.
return parse_named_char(source, info, in_set)
elif ch in "pP":
# A Unicode property, positive or negative.
return parse_property(source, info, ch == "p", in_set)
elif ch == "X" and not in_set:
# A grapheme cluster.
return Grapheme()
elif ch in ALPHA:
# An alphabetic escape sequence.
# Positional escapes aren't allowed inside a character set.
if not in_set:
if info.flags & WORD:
value = WORD_POSITION_ESCAPES.get(ch)
else:
value = POSITION_ESCAPES.get(ch)
if value:
return value
value = CHARSET_ESCAPES.get(ch)
if value:
return value
value = CHARACTER_ESCAPES.get(ch)
if value:
return Character(ord(value))
return make_character(info, ord(ch), in_set)
elif ch in DIGITS:
# A numeric escape sequence.
return parse_numeric_escape(source, info, ch, in_set)
else:
# A literal.
return make_character(info, ord(ch), in_set)
def parse_numeric_escape(source, info, ch, in_set):
"Parses a numeric escape sequence."
if in_set or ch == "0":
# Octal escape sequence, max 3 digits.
return parse_octal_escape(source, info, [ch], in_set)
# At least 1 digit, so either octal escape or group.
digits = ch
saved_pos = source.pos
ch = source.get()
if ch in DIGITS:
# At least 2 digits, so either octal escape or group.
digits += ch
saved_pos = source.pos
ch = source.get()
if is_octal(digits) and ch in OCT_DIGITS:
# 3 octal digits, so octal escape sequence.
encoding = info.flags & _ALL_ENCODINGS
if encoding == ASCII or encoding == LOCALE:
octal_mask = 0xFF
else:
octal_mask = 0x1FF
value = int(digits + ch, 8) & octal_mask
return make_character(info, value)
# Group reference.
source.pos = saved_pos
if info.is_open_group(digits):
raise error("cannot refer to an open group", source.string, source.pos)
return make_ref_group(info, digits, source.pos)
def parse_octal_escape(source, info, digits, in_set):
"Parses an octal escape sequence."
saved_pos = source.pos
ch = source.get()
while len(digits) < 3 and ch in OCT_DIGITS:
digits.append(ch)
saved_pos = source.pos
ch = source.get()
source.pos = saved_pos
try:
value = int("".join(digits), 8)
return make_character(info, value, in_set)
except ValueError:
if digits[0] in OCT_DIGITS:
raise error("incomplete escape \\%s" % ''.join(digits),
source.string, source.pos)
else:
raise error("bad escape \\%s" % digits[0], source.string,
source.pos)
def parse_hex_escape(source, info, expected_len, in_set, type):
"Parses a hex escape sequence."
digits = []
for i in range(expected_len):
ch = source.get()
if ch not in HEX_DIGITS:
raise error("incomplete escape \\%s%s" % (type, ''.join(digits)),
source.string, source.pos)
digits.append(ch)
value = int("".join(digits), 16)
return make_character(info, value, in_set)
def parse_group_ref(source, info):
"Parses a group reference."
source.expect("<")
saved_pos = source.pos
name = parse_name(source, True)
source.expect(">")
if info.is_open_group(name):
raise error("cannot refer to an open group", source.string, source.pos)
return make_ref_group(info, name, saved_pos)
def parse_string_set(source, info):
"Parses a string set reference."
source.expect("<")
name = parse_name(source, True)
source.expect(">")
if name is None or name not in info.kwargs:
raise error("undefined named list", source.string, source.pos)
return make_string_set(info, name)
def parse_named_char(source, info, in_set):
"Parses a named character."
saved_pos = source.pos
if source.match("{"):
name = source.get_while(NAMED_CHAR_PART)
if source.match("}"):
try:
value = unicodedata.lookup(name)
return make_character(info, ord(value), in_set)
except KeyError:
raise error("undefined character name", source.string,
source.pos)
source.pos = saved_pos
return make_character(info, ord("N"), in_set)
def parse_property(source, info, positive, in_set):
"Parses a Unicode property."
saved_pos = source.pos
ch = source.get()
if ch == "{":
negate = source.match("^")
prop_name, name = parse_property_name(source)
if source.match("}"):
# It's correctly delimited.
prop = lookup_property(prop_name, name, positive != negate, source)
return make_property(info, prop, in_set)
elif ch and ch in "CLMNPSZ":
# An abbreviated property, eg \pL.
prop = lookup_property(None, ch, positive, source)
return make_property(info, prop, in_set)
# Not a property, so treat as a literal "p" or "P".
source.pos = saved_pos
ch = "p" if positive else "P"
return make_character(info, ord(ch), in_set)
def parse_property_name(source):
"Parses a property name, which may be qualified."
name = source.get_while(PROPERTY_NAME_PART)
saved_pos = source.pos
ch = source.get()
if ch and ch in ":=":
prop_name = name
name = source.get_while(ALNUM | set(" &_-./")).strip()
if name:
# Name after the ":" or "=", so it's a qualified name.
saved_pos = source.pos
else:
# No name after the ":" or "=", so assume it's an unqualified name.
prop_name, name = None, prop_name
else:
prop_name = None
source.pos = saved_pos
return prop_name, name
def parse_set(source, info):
"Parses a character set."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
saved_ignore = source.ignore_space
source.ignore_space = False
# Negative set?
negate = source.match("^")
try:
if version == VERSION0:
item = parse_set_imp_union(source, info)
else:
item = parse_set_union(source, info)
if not source.match("]"):
raise error("missing ]", source.string, source.pos)
finally:
source.ignore_space = saved_ignore
if negate:
item = item.with_flags(positive=not item.positive)
item = item.with_flags(case_flags=info.flags & CASE_FLAGS)
return item
def parse_set_union(source, info):
"Parses a set union ([x||y])."
items = [parse_set_symm_diff(source, info)]
while source.match("||"):
items.append(parse_set_symm_diff(source, info))
if len(items) == 1:
return items[0]
return SetUnion(info, items)
def parse_set_symm_diff(source, info):
"Parses a set symmetric difference ([x~~y])."
items = [parse_set_inter(source, info)]
while source.match("~~"):
items.append(parse_set_inter(source, info))
if len(items) == 1:
return items[0]
return SetSymDiff(info, items)
def parse_set_inter(source, info):
"Parses a set intersection ([x&&y])."
items = [parse_set_diff(source, info)]
while source.match("&&"):
items.append(parse_set_diff(source, info))
if len(items) == 1:
return items[0]
return SetInter(info, items)
def parse_set_diff(source, info):
"Parses a set difference ([x--y])."
items = [parse_set_imp_union(source, info)]
while source.match("--"):
items.append(parse_set_imp_union(source, info))
if len(items) == 1:
return items[0]
return SetDiff(info, items)
def parse_set_imp_union(source, info):
"Parses a set implicit union ([xy])."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
items = [parse_set_member(source, info)]
while True:
saved_pos = source.pos
if source.match("]"):
# End of the set.
source.pos = saved_pos
break
if version == VERSION1 and any(source.match(op) for op in SET_OPS):
# The new behaviour has set operators.
source.pos = saved_pos
break
items.append(parse_set_member(source, info))
if len(items) == 1:
return items[0]
return SetUnion(info, items)
def parse_set_member(source, info):
"Parses a member in a character set."
# Parse a set item.
start = parse_set_item(source, info)
saved_pos1 = source.pos
if (not isinstance(start, Character) or not start.positive or not
source.match("-")):
# It's not the start of a range.
return start
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
# It looks like the start of a range of characters.
saved_pos2 = source.pos
if version == VERSION1 and source.match("-"):
# It's actually the set difference operator '--', so return the
# character.
source.pos = saved_pos1
return start
if source.match("]"):
# We've reached the end of the set, so return both the character and
# hyphen.
source.pos = saved_pos2
return SetUnion(info, [start, Character(ord("-"))])
# Parse a set item.
end = parse_set_item(source, info)
if not isinstance(end, Character) or not end.positive:
# It's not a range, so return the character, hyphen and property.
return SetUnion(info, [start, Character(ord("-")), end])
# It _is_ a range.
if start.value > end.value:
raise error("bad character range", source.string, source.pos)
if start.value == end.value:
return start
return Range(start.value, end.value)
def parse_set_item(source, info):
"Parses an item in a character set."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if source.match("\\"):
# An escape sequence in a set.
return parse_escape(source, info, True)
saved_pos = source.pos
if source.match("[:"):
# Looks like a POSIX character class.
try:
return parse_posix_class(source, info)
except ParseError:
# Not a POSIX character class.
source.pos = saved_pos
if version == VERSION1 and source.match("["):
# It's the start of a nested set.
# Negative set?
negate = source.match("^")
item = parse_set_union(source, info)
if not source.match("]"):
raise error("missing ]", source.string, source.pos)
if negate:
item = item.with_flags(positive=not item.positive)
return item
ch = source.get()
if not ch:
raise error("unterminated character set", source.string, source.pos)
return Character(ord(ch))
def parse_posix_class(source, info):
"Parses a POSIX character class."
negate = source.match("^")
prop_name, name = parse_property_name(source)
if not source.match(":]"):
raise ParseError()
return lookup_property(prop_name, name, not negate, source, posix=True)
def float_to_rational(flt):
"Converts a float to a rational pair."
int_part = int(flt)
error = flt - int_part
if abs(error) < 0.0001:
return int_part, 1
den, num = float_to_rational(1.0 / error)
return int_part * den + num, den
def numeric_to_rational(numeric):
"Converts a numeric string to a rational string, if possible."
if numeric[ : 1] == "-":
sign, numeric = numeric[0], numeric[1 : ]
else:
sign = ""
parts = numeric.split("/")
if len(parts) == 2:
num, den = float_to_rational(float(parts[0]) / float(parts[1]))
elif len(parts) == 1:
num, den = float_to_rational(float(parts[0]))
else:
raise ValueError()
result = "%s%s/%s" % (sign, num, den)
if result.endswith("/1"):
return result[ : -2]
return result
def standardise_name(name):
"Standardises a property or value name."
try:
return numeric_to_rational("".join(name))
except (ValueError, ZeroDivisionError):
return "".join(ch for ch in name if ch not in "_- ").upper()
_posix_classes = set('ALNUM DIGIT PUNCT XDIGIT'.split())
def lookup_property(property, value, positive, source=None, posix=False):
"Looks up a property."
# Normalise the names (which may still be lists).
property = standardise_name(property) if property else None
value = standardise_name(value)
if (property, value) == ("GENERALCATEGORY", "ASSIGNED"):
property, value, positive = "GENERALCATEGORY", "UNASSIGNED", not positive
if posix and not property and value.upper() in _posix_classes:
value = 'POSIX' + value
if property:
# Both the property and the value are provided.
prop = PROPERTIES.get(property)
if not prop:
if not source:
raise error("unknown property")
raise error("unknown property", source.string, source.pos)
prop_id, value_dict = prop
val_id = value_dict.get(value)
if val_id is None:
if not source:
raise error("unknown property value")
raise error("unknown property value", source.string, source.pos)
if "YES" in value_dict and val_id == 0:
positive, val_id = not positive, 1
return Property((prop_id << 16) | val_id, positive)
# Only the value is provided.
# It might be the name of a GC, script or block value.
for property in ("GC", "SCRIPT", "BLOCK"):
prop_id, value_dict = PROPERTIES.get(property)
val_id = value_dict.get(value)
if val_id is not None:
return Property((prop_id << 16) | val_id, positive)
# It might be the name of a binary property.
prop = PROPERTIES.get(value)
if prop:
prop_id, value_dict = prop
if "YES" in value_dict:
return Property((prop_id << 16) | 1, positive)
# It might be the name of a binary property starting with a prefix.
if value.startswith("IS"):
prop = PROPERTIES.get(value[2 : ])
if prop:
prop_id, value_dict = prop
if "YES" in value_dict:
return Property((prop_id << 16) | 1, positive)
# It might be the name of a script or block starting with a prefix.
for prefix, property in (("IS", "SCRIPT"), ("IN", "BLOCK")):
if value.startswith(prefix):
prop_id, value_dict = PROPERTIES.get(property)
val_id = value_dict.get(value[2 : ])
if val_id is not None:
return Property((prop_id << 16) | val_id, positive)
# Unknown property.
if not source:
raise error("unknown property")
raise error("unknown property", source.string, source.pos)
def _compile_replacement(source, pattern, is_unicode):
"Compiles a replacement template escape sequence."
ch = source.get()
if ch in ALPHA:
# An alphabetic escape sequence.
value = CHARACTER_ESCAPES.get(ch)
if value:
return False, [ord(value)]
if ch in HEX_ESCAPES and (ch == "x" or is_unicode):
# A hexadecimal escape sequence.
return False, [parse_repl_hex_escape(source, HEX_ESCAPES[ch], ch)]
if ch == "g":
# A group preference.
return True, [compile_repl_group(source, pattern)]
if ch == "N" and is_unicode:
# A named character.
value = parse_repl_named_char(source)
if value is not None:
return False, [value]
return False, [ord("\\"), ord(ch)]
if isinstance(source.sep, str):
octal_mask = 0xFF
else:
octal_mask = 0x1FF
if ch == "0":
# An octal escape sequence.
digits = ch
while len(digits) < 3:
saved_pos = source.pos
ch = source.get()
if ch not in OCT_DIGITS:
source.pos = saved_pos
break
digits += ch
return False, [int(digits, 8) & octal_mask]
if ch in DIGITS:
# Either an octal escape sequence (3 digits) or a group reference (max
# 2 digits).
digits = ch
saved_pos = source.pos
ch = source.get()
if ch in DIGITS:
digits += ch
saved_pos = source.pos
ch = source.get()
if ch and is_octal(digits + ch):
# An octal escape sequence.
return False, [int(digits + ch, 8) & octal_mask]
# A group reference.
source.pos = saved_pos
return True, [int(digits)]
if ch == "\\":
# An escaped backslash is a backslash.
return False, [ord("\\")]
if not ch:
# A trailing backslash.
raise error("bad escape (end of pattern)", source.string, source.pos)
# An escaped non-backslash is a backslash followed by the literal.
return False, [ord("\\"), ord(ch)]
def parse_repl_hex_escape(source, expected_len, type):
"Parses a hex escape sequence in a replacement string."
digits = []
for i in range(expected_len):
ch = source.get()
if ch not in HEX_DIGITS:
raise error("incomplete escape \\%s%s" % (type, ''.join(digits)),
source.string, source.pos)
digits.append(ch)
return int("".join(digits), 16)
def parse_repl_named_char(source):
"Parses a named character in a replacement string."
saved_pos = source.pos
if source.match("{"):
name = source.get_while(ALPHA | set(" "))
if source.match("}"):
try:
value = unicodedata.lookup(name)
return ord(value)
except KeyError:
raise error("undefined character name", source.string,
source.pos)
source.pos = saved_pos
return None
def compile_repl_group(source, pattern):
"Compiles a replacement template group reference."
source.expect("<")
name = parse_name(source, True, True)
source.expect(">")
if name.isdigit():
index = int(name)
if not 0 <= index <= pattern.groups:
raise error("invalid group reference", source.string, source.pos)
return index
try:
return pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group")
# The regular expression is parsed into a syntax tree. The different types of
# node are defined below.
INDENT = " "
POSITIVE_OP = 0x1
ZEROWIDTH_OP = 0x2
FUZZY_OP = 0x4
REVERSE_OP = 0x8
REQUIRED_OP = 0x10
POS_TEXT = {False: "NON-MATCH", True: "MATCH"}
CASE_TEXT = {NOCASE: "", IGNORECASE: " SIMPLE_IGNORE_CASE", FULLCASE: "",
FULLIGNORECASE: " FULL_IGNORE_CASE"}
def make_sequence(items):
if len(items) == 1:
return items[0]
return Sequence(items)
# Common base class for all nodes.
class RegexBase(object):
def __init__(self):
self._key = self.__class__
def with_flags(self, positive=None, case_flags=None, zerowidth=None):
if positive is None:
positive = self.positive
else:
positive = bool(positive)
if case_flags is None:
case_flags = self.case_flags
else:
case_flags = case_flags & CASE_FLAGS
if zerowidth is None:
zerowidth = self.zerowidth
else:
zerowidth = bool(zerowidth)
if (positive == self.positive and case_flags == self.case_flags and
zerowidth == self.zerowidth):
return self
return self.rebuild(positive, case_flags, zerowidth)
def fix_groups(self, pattern, reverse, fuzzy):
pass
def optimise(self, info):
return self
def pack_characters(self, info):
return self
def remove_captures(self):
return self
def is_atomic(self):
return True
def can_be_affix(self):
return True
def contains_group(self):
return False
def get_firstset(self, reverse):
raise _FirstSetError()
def has_simple_start(self):
return False
def compile(self, reverse=False, fuzzy=False):
return self._compile(reverse, fuzzy)
def dump(self, indent, reverse):
self._dump(indent, reverse)
def is_empty(self):
return False
def __hash__(self):
return hash(self._key)
def __eq__(self, other):
return type(self) is type(other) and self._key == other._key
def __ne__(self, other):
return not self.__eq__(other)
def get_required_string(self, reverse):
return self.max_width(), None
# Base class for zero-width nodes.
class ZeroWidthBase(RegexBase):
def __init__(self, positive=True):
RegexBase.__init__(self)
self.positive = bool(positive)
self._key = self.__class__, self.positive
def get_firstset(self, reverse):
return set([None])
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if fuzzy:
flags |= FUZZY_OP
if reverse:
flags |= REVERSE_OP
return [(self._opcode, flags)]
def _dump(self, indent, reverse):
print "%s%s %s" % (INDENT * indent, self._op_name,
POS_TEXT[self.positive])
def max_width(self):
return 0
class Any(RegexBase):
_opcode = {False: OP.ANY, True: OP.ANY_REV}
_op_name = "ANY"
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[reverse], flags)]
def _dump(self, indent, reverse):
print "%s%s" % (INDENT * indent, self._op_name)
def max_width(self):
return 1
class AnyAll(Any):
_opcode = {False: OP.ANY_ALL, True: OP.ANY_ALL_REV}
_op_name = "ANY_ALL"
class AnyU(Any):
_opcode = {False: OP.ANY_U, True: OP.ANY_U_REV}
_op_name = "ANY_U"
class Atomic(RegexBase):
def __init__(self, subpattern):
RegexBase.__init__(self)
self.subpattern = subpattern
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
self.subpattern = self.subpattern.optimise(info)
if self.subpattern.is_empty():
return self.subpattern
return self
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
self.subpattern = self.subpattern.remove_captures()
return self
def can_be_affix(self):
return self.subpattern.can_be_affix()
def contains_group(self):
return self.subpattern.contains_group()
def get_firstset(self, reverse):
return self.subpattern.get_firstset(reverse)
def has_simple_start(self):
return self.subpattern.has_simple_start()
def _compile(self, reverse, fuzzy):
return ([(OP.ATOMIC, )] + self.subpattern.compile(reverse, fuzzy) +
[(OP.END, )])
def _dump(self, indent, reverse):
print "%sATOMIC" % (INDENT * indent)
self.subpattern.dump(indent + 1, reverse)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return (type(self) is type(other) and self.subpattern ==
other.subpattern)
def max_width(self):
return self.subpattern.max_width()
def get_required_string(self, reverse):
return self.subpattern.get_required_string(reverse)
class Boundary(ZeroWidthBase):
_opcode = OP.BOUNDARY
_op_name = "BOUNDARY"
class Branch(RegexBase):
def __init__(self, branches):
RegexBase.__init__(self)
self.branches = branches
def fix_groups(self, pattern, reverse, fuzzy):
for b in self.branches:
b.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
# Flatten branches within branches.
branches = Branch._flatten_branches(info, self.branches)
# Move any common prefix or suffix out of the branches.
prefix, branches = Branch._split_common_prefix(info, branches)
suffix, branches = Branch._split_common_suffix(info, branches)
# Merge branches starting with the same character. (If a character
# prefix doesn't match in one branch, it won't match in any of the
# others starting with that same character.)
branches = Branch._merge_common_prefixes(info, branches)
# Try to reduce adjacent single-character branches to sets.
branches = Branch._reduce_to_set(info, branches)
if len(branches) > 1:
sequence = prefix + [Branch(branches)] + suffix
else:
sequence = prefix + branches + suffix
return make_sequence(sequence)
def optimise(self, info):
# Flatten branches within branches.
branches = Branch._flatten_branches(info, self.branches)
# Try to reduce adjacent single-character branches to sets.
branches = Branch._reduce_to_set(info, branches)
if len(branches) > 1:
sequence = [Branch(branches)]
else:
sequence = branches
return make_sequence(sequence)
def pack_characters(self, info):
self.branches = [b.pack_characters(info) for b in self.branches]
return self
def remove_captures(self):
self.branches = [b.remove_captures() for b in self.branches]
return self
def is_atomic(self):
return all(b.is_atomic() for b in self.branches)
def can_be_affix(self):
return all(b.can_be_affix() for b in self.branches)
def contains_group(self):
return any(b.contains_group() for b in self.branches)
def get_firstset(self, reverse):
fs = set()
for b in self.branches:
fs |= b.get_firstset(reverse)
return fs or set([None])
def _compile(self, reverse, fuzzy):
code = [(OP.BRANCH, )]
for b in self.branches:
code.extend(b.compile(reverse, fuzzy))
code.append((OP.NEXT, ))
code[-1] = (OP.END, )
return code
def _dump(self, indent, reverse):
print "%sBRANCH" % (INDENT * indent)
self.branches[0].dump(indent + 1, reverse)
for b in self.branches[1 : ]:
print "%sOR" % (INDENT * indent)
b.dump(indent + 1, reverse)
@staticmethod
def _flatten_branches(info, branches):
# Flatten the branches so that there aren't branches of branches.
new_branches = []
for b in branches:
b = b.optimise(info)
if isinstance(b, Branch):
new_branches.extend(b.branches)
else:
new_branches.append(b)
return new_branches
@staticmethod
def _split_common_prefix(info, branches):
# Common leading items can be moved out of the branches.
# Get the items in the branches.
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
# What is the maximum possible length of the prefix?
max_count = min(len(a) for a in alternatives)
# What is the longest common prefix?
prefix = alternatives[0]
pos = 0
end_pos = max_count
while pos < end_pos and prefix[pos].can_be_affix() and all(a[pos] ==
prefix[pos] for a in alternatives):
pos += 1
count = pos
if info.flags & UNICODE:
# We need to check that we're not splitting a sequence of
# characters which could form part of full case-folding.
count = pos
while count > 0 and not all(Branch._can_split(a, count) for a in
alternatives):
count -= 1
# No common prefix is possible.
if count == 0:
return [], branches
# Rebuild the branches.
new_branches = []
for a in alternatives:
new_branches.append(make_sequence(a[count : ]))
return prefix[ : count], new_branches
@staticmethod
def _split_common_suffix(info, branches):
# Common trailing items can be moved out of the branches.
# Get the items in the branches.
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
# What is the maximum possible length of the suffix?
max_count = min(len(a) for a in alternatives)
# What is the longest common suffix?
suffix = alternatives[0]
pos = -1
end_pos = -1 - max_count
while pos > end_pos and suffix[pos].can_be_affix() and all(a[pos] ==
suffix[pos] for a in alternatives):
pos -= 1
count = -1 - pos
if info.flags & UNICODE:
# We need to check that we're not splitting a sequence of
# characters which could form part of full case-folding.
while count > 0 and not all(Branch._can_split_rev(a, count) for a
in alternatives):
count -= 1
# No common suffix is possible.
if count == 0:
return [], branches
# Rebuild the branches.
new_branches = []
for a in alternatives:
new_branches.append(make_sequence(a[ : -count]))
return suffix[-count : ], new_branches
@staticmethod
def _can_split(items, count):
# Check the characters either side of the proposed split.
if not Branch._is_full_case(items, count - 1):
return True
if not Branch._is_full_case(items, count):
return True
# Check whether a 1-1 split would be OK.
if Branch._is_folded(items[count - 1 : count + 1]):
return False
# Check whether a 1-2 split would be OK.
if (Branch._is_full_case(items, count + 2) and
Branch._is_folded(items[count - 1 : count + 2])):
return False
# Check whether a 2-1 split would be OK.
if (Branch._is_full_case(items, count - 2) and
Branch._is_folded(items[count - 2 : count + 1])):
return False
return True
@staticmethod
def _can_split_rev(items, count):
end = len(items)
# Check the characters either side of the proposed split.
if not Branch._is_full_case(items, end - count):
return True
if not Branch._is_full_case(items, end - count - 1):
return True
# Check whether a 1-1 split would be OK.
if Branch._is_folded(items[end - count - 1 : end - count + 1]):
return False
# Check whether a 1-2 split would be OK.
if (Branch._is_full_case(items, end - count + 2) and
Branch._is_folded(items[end - count - 1 : end - count + 2])):
return False
# Check whether a 2-1 split would be OK.
if (Branch._is_full_case(items, end - count - 2) and
Branch._is_folded(items[end - count - 2 : end - count + 1])):
return False
return True
@staticmethod
def _merge_common_prefixes(info, branches):
# Branches with the same case-sensitive character prefix can be grouped
# together if they are separated only by other branches with a
# character prefix.
prefixed = defaultdict(list)
order = {}
new_branches = []
for b in branches:
if Branch._is_simple_character(b):
# Branch starts with a simple character.
prefixed[b.value].append([b])
order.setdefault(b.value, len(order))
elif (isinstance(b, Sequence) and b.items and
Branch._is_simple_character(b.items[0])):
# Branch starts with a simple character.
prefixed[b.items[0].value].append(b.items)
order.setdefault(b.items[0].value, len(order))
else:
Branch._flush_char_prefix(info, prefixed, order, new_branches)
new_branches.append(b)
Branch._flush_char_prefix(info, prefixed, order, new_branches)
return new_branches
@staticmethod
def _is_simple_character(c):
return isinstance(c, Character) and c.positive and not c.case_flags
@staticmethod
def _reduce_to_set(info, branches):
# Can the branches be reduced to a set?
new_branches = []
items = set()
case_flags = NOCASE
for b in branches:
if isinstance(b, (Character, Property, SetBase)):
# Branch starts with a single character.
if b.case_flags != case_flags:
# Different case sensitivity, so flush.
Branch._flush_set_members(info, items, case_flags,
new_branches)
case_flags = b.case_flags
items.add(b.with_flags(case_flags=NOCASE))
else:
Branch._flush_set_members(info, items, case_flags,
new_branches)
new_branches.append(b)
Branch._flush_set_members(info, items, case_flags, new_branches)
return new_branches
@staticmethod
def _flush_char_prefix(info, prefixed, order, new_branches):
# Flush the prefixed branches.
if not prefixed:
return
for value, branches in sorted(prefixed.items(), key=lambda pair:
order[pair[0]]):
if len(branches) == 1:
new_branches.append(make_sequence(branches[0]))
else:
subbranches = []
optional = False
for b in branches:
if len(b) > 1:
subbranches.append(make_sequence(b[1 : ]))
elif not optional:
subbranches.append(Sequence())
optional = True
sequence = Sequence([Character(value), Branch(subbranches)])
new_branches.append(sequence.optimise(info))
prefixed.clear()
order.clear()
@staticmethod
def _flush_set_members(info, items, case_flags, new_branches):
# Flush the set members.
if not items:
return
if len(items) == 1:
item = list(items)[0]
else:
item = SetUnion(info, list(items)).optimise(info)
new_branches.append(item.with_flags(case_flags=case_flags))
items.clear()
@staticmethod
def _is_full_case(items, i):
if not 0 <= i < len(items):
return False
item = items[i]
return (isinstance(item, Character) and item.positive and
(item.case_flags & FULLIGNORECASE) == FULLIGNORECASE)
@staticmethod
def _is_folded(items):
if len(items) < 2:
return False
for i in items:
if (not isinstance(i, Character) or not i.positive or not
i.case_flags):
return False
folded = u"".join(unichr(i.value) for i in items)
folded = _regex.fold_case(FULL_CASE_FOLDING, folded)
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
for c in expanding_chars:
if folded == _regex.fold_case(FULL_CASE_FOLDING, c):
return True
return False
def is_empty(self):
return all(b.is_empty() for b in self.branches)
def __eq__(self, other):
return type(self) is type(other) and self.branches == other.branches
def max_width(self):
return max(b.max_width() for b in self.branches)
class CallGroup(RegexBase):
def __init__(self, info, group, position):
RegexBase.__init__(self)
self.info = info
self.group = group
self.position = position
self._key = self.__class__, self.group
def fix_groups(self, pattern, reverse, fuzzy):
try:
self.group = int(self.group)
except ValueError:
try:
self.group = self.info.group_index[self.group]
except KeyError:
raise error("invalid group reference", pattern, self.position)
if not 0 <= self.group <= self.info.group_count:
raise error("unknown group", pattern, self.position)
if self.group > 0 and self.info.open_group_count[self.group] > 1:
raise error("ambiguous group reference", pattern, self.position)
self.info.group_calls.append((self, reverse, fuzzy))
self._key = self.__class__, self.group
def remove_captures(self):
raise error("group reference not allowed", pattern, self.position)
def _compile(self, reverse, fuzzy):
return [(OP.GROUP_CALL, self.call_ref)]
def _dump(self, indent, reverse):
print "%sGROUP_CALL %s" % (INDENT * indent, self.group)
def __eq__(self, other):
return type(self) is type(other) and self.group == other.group
def max_width(self):
return UNLIMITED
class Character(RegexBase):
_opcode = {(NOCASE, False): OP.CHARACTER, (IGNORECASE, False):
OP.CHARACTER_IGN, (FULLCASE, False): OP.CHARACTER, (FULLIGNORECASE,
False): OP.CHARACTER_IGN, (NOCASE, True): OP.CHARACTER_REV, (IGNORECASE,
True): OP.CHARACTER_IGN_REV, (FULLCASE, True): OP.CHARACTER_REV,
(FULLIGNORECASE, True): OP.CHARACTER_IGN_REV}
def __init__(self, value, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.value = value
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
if (self.positive and (self.case_flags & FULLIGNORECASE) ==
FULLIGNORECASE):
self.folded = _regex.fold_case(FULL_CASE_FOLDING, unichr(self.value))
else:
self.folded = unichr(self.value)
self._key = (self.__class__, self.value, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return Character(self.value, positive, case_flags, zerowidth)
def optimise(self, info, in_set=False):
return self
def get_firstset(self, reverse):
return set([self])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
code = PrecompiledCode([self._opcode[self.case_flags, reverse], flags,
self.value])
if len(self.folded) > 1:
# The character expands on full case-folding.
code = Branch([code, String([ord(c) for c in self.folded],
case_flags=self.case_flags)])
return code.compile(reverse, fuzzy)
def _dump(self, indent, reverse):
display = repr(unichr(self.value)).lstrip("bu")
print "%sCHARACTER %s %s%s" % (INDENT * indent,
POS_TEXT[self.positive], display, CASE_TEXT[self.case_flags])
def matches(self, ch):
return (ch == self.value) == self.positive
def max_width(self):
return len(self.folded)
def get_required_string(self, reverse):
if not self.positive:
return 1, None
self.folded_characters = tuple(ord(c) for c in self.folded)
return 0, self
class Conditional(RegexBase):
def __init__(self, info, group, yes_item, no_item, position):
RegexBase.__init__(self)
self.info = info
self.group = group
self.yes_item = yes_item
self.no_item = no_item
self.position = position
def fix_groups(self, pattern, reverse, fuzzy):
try:
self.group = int(self.group)
except ValueError:
try:
self.group = self.info.group_index[self.group]
except KeyError:
raise error("unknown group", pattern, self.position)
if not 1 <= self.group <= self.info.group_count:
raise error("invalid group reference", pattern, self.position)
self.yes_item.fix_groups(pattern, reverse, fuzzy)
self.no_item.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
yes_item = self.yes_item.optimise(info)
no_item = self.no_item.optimise(info)
return Conditional(info, self.group, yes_item, no_item, self.position)
def pack_characters(self, info):
self.yes_item = self.yes_item.pack_characters(info)
self.no_item = self.no_item.pack_characters(info)
return self
def remove_captures(self):
self.yes_item = self.yes_item.remove_captures()
self.no_item = self.no_item.remove_captures()
def is_atomic(self):
return self.yes_item.is_atomic() and self.no_item.is_atomic()
def can_be_affix(self):
return self.yes_item.can_be_affix() and self.no_item.can_be_affix()
def contains_group(self):
return self.yes_item.contains_group() or self.no_item.contains_group()
def get_firstset(self, reverse):
return (self.yes_item.get_firstset(reverse) |
self.no_item.get_firstset(reverse))
def _compile(self, reverse, fuzzy):
code = [(OP.GROUP_EXISTS, self.group)]
code.extend(self.yes_item.compile(reverse, fuzzy))
add_code = self.no_item.compile(reverse, fuzzy)
if add_code:
code.append((OP.NEXT, ))
code.extend(add_code)
code.append((OP.END, ))
return code
def _dump(self, indent, reverse):
print "%sGROUP_EXISTS %s" % (INDENT * indent, self.group)
self.yes_item.dump(indent + 1, reverse)
if self.no_item:
print "%sOR" % (INDENT * indent)
self.no_item.dump(indent + 1, reverse)
def is_empty(self):
return self.yes_item.is_empty() and self.no_item.is_empty()
def __eq__(self, other):
return type(self) is type(other) and (self.group, self.yes_item,
self.no_item) == (other.group, other.yes_item, other.no_item)
def max_width(self):
return max(self.yes_item.max_width(), self.no_item.max_width())
class DefaultBoundary(ZeroWidthBase):
_opcode = OP.DEFAULT_BOUNDARY
_op_name = "DEFAULT_BOUNDARY"
class DefaultEndOfWord(ZeroWidthBase):
_opcode = OP.DEFAULT_END_OF_WORD
_op_name = "DEFAULT_END_OF_WORD"
class DefaultStartOfWord(ZeroWidthBase):
_opcode = OP.DEFAULT_START_OF_WORD
_op_name = "DEFAULT_START_OF_WORD"
class EndOfLine(ZeroWidthBase):
_opcode = OP.END_OF_LINE
_op_name = "END_OF_LINE"
class EndOfLineU(EndOfLine):
_opcode = OP.END_OF_LINE_U
_op_name = "END_OF_LINE_U"
class EndOfString(ZeroWidthBase):
_opcode = OP.END_OF_STRING
_op_name = "END_OF_STRING"
class EndOfStringLine(ZeroWidthBase):
_opcode = OP.END_OF_STRING_LINE
_op_name = "END_OF_STRING_LINE"
class EndOfStringLineU(EndOfStringLine):
_opcode = OP.END_OF_STRING_LINE_U
_op_name = "END_OF_STRING_LINE_U"
class EndOfWord(ZeroWidthBase):
_opcode = OP.END_OF_WORD
_op_name = "END_OF_WORD"
class Fuzzy(RegexBase):
def __init__(self, subpattern, constraints=None):
RegexBase.__init__(self)
if constraints is None:
constraints = {}
self.subpattern = subpattern
self.constraints = constraints
# If an error type is mentioned in the cost equation, then its maximum
# defaults to unlimited.
if "cost" in constraints:
for e in "dis":
if e in constraints["cost"]:
constraints.setdefault(e, (0, None))
# If any error type is mentioned, then all the error maxima default to
# 0, otherwise they default to unlimited.
if set(constraints) & set("dis"):
for e in "dis":
constraints.setdefault(e, (0, 0))
else:
for e in "dis":
constraints.setdefault(e, (0, None))
# The maximum of the generic error type defaults to unlimited.
constraints.setdefault("e", (0, None))
# The cost equation defaults to equal costs. Also, the cost of any
# error type not mentioned in the cost equation defaults to 0.
if "cost" in constraints:
for e in "dis":
constraints["cost"].setdefault(e, 0)
else:
constraints["cost"] = {"d": 1, "i": 1, "s": 1, "max":
constraints["e"][1]}
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, reverse, True)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
self.subpattern = self.subpattern.remove_captures()
return self
def is_atomic(self):
return self.subpattern.is_atomic()
def contains_group(self):
return self.subpattern.contains_group()
def _compile(self, reverse, fuzzy):
# The individual limits.
arguments = []
for e in "dise":
v = self.constraints[e]
arguments.append(v[0])
arguments.append(UNLIMITED if v[1] is None else v[1])
# The coeffs of the cost equation.
for e in "dis":
arguments.append(self.constraints["cost"][e])
# The maximum of the cost equation.
v = self.constraints["cost"]["max"]
arguments.append(UNLIMITED if v is None else v)
flags = 0
if reverse:
flags |= REVERSE_OP
return ([(OP.FUZZY, flags) + tuple(arguments)] +
self.subpattern.compile(reverse, True) + [(OP.END,)])
def _dump(self, indent, reverse):
constraints = self._constraints_to_string()
if constraints:
constraints = " " + constraints
print "%sFUZZY%s" % (INDENT * indent, constraints)
self.subpattern.dump(indent + 1, reverse)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return (type(self) is type(other) and self.subpattern ==
other.subpattern)
def max_width(self):
return UNLIMITED
def _constraints_to_string(self):
constraints = []
for name in "ids":
min, max = self.constraints[name]
if max == 0:
continue
con = ""
if min > 0:
con = "%s<=" % min
con += name
if max is not None:
con += "<=%s" % max
constraints.append(con)
cost = []
for name in "ids":
coeff = self.constraints["cost"][name]
if coeff > 0:
cost.append("%s%s" % (coeff, name))
limit = self.constraints["cost"]["max"]
if limit is not None and limit > 0:
cost = "%s<=%s" % ("+".join(cost), limit)
constraints.append(cost)
return ",".join(constraints)
class Grapheme(RegexBase):
def _compile(self, reverse, fuzzy):
# Match at least 1 character until a grapheme boundary is reached. Note
# that this is the same whether matching forwards or backwards.
grapheme_matcher = Atomic(Sequence([LazyRepeat(AnyAll(), 1, None),
GraphemeBoundary()]))
return grapheme_matcher.compile(reverse, fuzzy)
def _dump(self, indent, reverse):
print "%sGRAPHEME" % (INDENT * indent)
def max_width(self):
return UNLIMITED
class GraphemeBoundary:
def compile(self, reverse, fuzzy):
return [(OP.GRAPHEME_BOUNDARY, 1)]
class GreedyRepeat(RegexBase):
_opcode = OP.GREEDY_REPEAT
_op_name = "GREEDY_REPEAT"
def __init__(self, subpattern, min_count, max_count):
RegexBase.__init__(self)
self.subpattern = subpattern
self.min_count = min_count
self.max_count = max_count
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
subpattern = self.subpattern.optimise(info)
return type(self)(subpattern, self.min_count, self.max_count)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
self.subpattern = self.subpattern.remove_captures()
return self
def is_atomic(self):
return self.min_count == self.max_count and self.subpattern.is_atomic()
def contains_group(self):
return self.subpattern.contains_group()
def get_firstset(self, reverse):
fs = self.subpattern.get_firstset(reverse)
if self.min_count == 0:
fs.add(None)
return fs
def _compile(self, reverse, fuzzy):
repeat = [self._opcode, self.min_count]
if self.max_count is None:
repeat.append(UNLIMITED)
else:
repeat.append(self.max_count)
subpattern = self.subpattern.compile(reverse, fuzzy)
if not subpattern:
return []
return ([tuple(repeat)] + subpattern + [(OP.END, )])
def _dump(self, indent, reverse):
if self.max_count is None:
limit = "INF"
else:
limit = self.max_count
print "%s%s %s %s" % (INDENT * indent, self._op_name, self.min_count,
limit)
self.subpattern.dump(indent + 1, reverse)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return type(self) is type(other) and (self.subpattern, self.min_count,
self.max_count) == (other.subpattern, other.min_count,
other.max_count)
def max_width(self):
if self.max_count is None:
return UNLIMITED
return self.subpattern.max_width() * self.max_count
def get_required_string(self, reverse):
max_count = UNLIMITED if self.max_count is None else self.max_count
if self.min_count == 0:
w = self.subpattern.max_width() * max_count
return min(w, UNLIMITED), None
ofs, req = self.subpattern.get_required_string(reverse)
if req:
return ofs, req
w = self.subpattern.max_width() * max_count
return min(w, UNLIMITED), None
class Group(RegexBase):
def __init__(self, info, group, subpattern):
RegexBase.__init__(self)
self.info = info
self.group = group
self.subpattern = subpattern
self.call_ref = None
def fix_groups(self, pattern, reverse, fuzzy):
self.info.defined_groups[self.group] = (self, reverse, fuzzy)
self.subpattern.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
subpattern = self.subpattern.optimise(info)
return Group(self.info, self.group, subpattern)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
return self.subpattern.remove_captures()
def is_atomic(self):
return self.subpattern.is_atomic()
def can_be_affix(self):
return False
def contains_group(self):
return True
def get_firstset(self, reverse):
return self.subpattern.get_firstset(reverse)
def has_simple_start(self):
return self.subpattern.has_simple_start()
def _compile(self, reverse, fuzzy):
code = []
key = self.group, reverse, fuzzy
ref = self.info.call_refs.get(key)
if ref is not None:
code += [(OP.CALL_REF, ref)]
public_group = private_group = self.group
if private_group < 0:
public_group = self.info.private_groups[private_group]
private_group = self.info.group_count - private_group
code += ([(OP.GROUP, private_group, public_group)] +
self.subpattern.compile(reverse, fuzzy) + [(OP.END, )])
if ref is not None:
code += [(OP.END, )]
return code
def _dump(self, indent, reverse):
group = self.group
if group < 0:
group = private_groups[group]
print "%sGROUP %s" % (INDENT * indent, group)
self.subpattern.dump(indent + 1, reverse)
def __eq__(self, other):
return (type(self) is type(other) and (self.group, self.subpattern) ==
(other.group, other.subpattern))
def max_width(self):
return self.subpattern.max_width()
def get_required_string(self, reverse):
return self.subpattern.get_required_string(reverse)
class LazyRepeat(GreedyRepeat):
_opcode = OP.LAZY_REPEAT
_op_name = "LAZY_REPEAT"
class LookAround(RegexBase):
_dir_text = {False: "AHEAD", True: "BEHIND"}
def __new__(cls, behind, positive, subpattern):
if positive and subpattern.is_empty():
return subpattern
return RegexBase.__new__(cls)
def __init__(self, behind, positive, subpattern):
RegexBase.__init__(self)
self.behind = bool(behind)
self.positive = bool(positive)
self.subpattern = subpattern
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, self.behind, fuzzy)
def optimise(self, info):
subpattern = self.subpattern.optimise(info)
return LookAround(self.behind, self.positive, subpattern)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
return self.subpattern.remove_captures()
def is_atomic(self):
return self.subpattern.is_atomic()
def can_be_affix(self):
return self.subpattern.can_be_affix()
def contains_group(self):
return self.subpattern.contains_group()
def _compile(self, reverse, fuzzy):
return ([(OP.LOOKAROUND, int(self.positive), int(not self.behind))] +
self.subpattern.compile(self.behind) + [(OP.END, )])
def _dump(self, indent, reverse):
print "%sLOOK%s %s" % (INDENT * indent, self._dir_text[self.behind],
POS_TEXT[self.positive])
self.subpattern.dump(indent + 1, self.behind)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return type(self) is type(other) and (self.behind, self.positive,
self.subpattern) == (other.behind, other.positive, other.subpattern)
def max_width(self):
return 0
class PrecompiledCode(RegexBase):
def __init__(self, code):
self.code = code
def _compile(self, reverse, fuzzy):
return [tuple(self.code)]
class Property(RegexBase):
_opcode = {(NOCASE, False): OP.PROPERTY, (IGNORECASE, False):
OP.PROPERTY_IGN, (FULLCASE, False): OP.PROPERTY, (FULLIGNORECASE, False):
OP.PROPERTY_IGN, (NOCASE, True): OP.PROPERTY_REV, (IGNORECASE, True):
OP.PROPERTY_IGN_REV, (FULLCASE, True): OP.PROPERTY_REV, (FULLIGNORECASE,
True): OP.PROPERTY_IGN_REV}
def __init__(self, value, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.value = value
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
self._key = (self.__class__, self.value, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return Property(self.value, positive, case_flags, zerowidth)
def optimise(self, info, in_set=False):
return self
def get_firstset(self, reverse):
return set([self])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[self.case_flags, reverse], flags, self.value)]
def _dump(self, indent, reverse):
prop = PROPERTY_NAMES[self.value >> 16]
name, value = prop[0], prop[1][self.value & 0xFFFF]
print "%sPROPERTY %s %s:%s%s" % (INDENT * indent,
POS_TEXT[self.positive], name, value, CASE_TEXT[self.case_flags])
def matches(self, ch):
return _regex.has_property_value(self.value, ch) == self.positive
def max_width(self):
return 1
class Range(RegexBase):
_opcode = {(NOCASE, False): OP.RANGE, (IGNORECASE, False): OP.RANGE_IGN,
(FULLCASE, False): OP.RANGE, (FULLIGNORECASE, False): OP.RANGE_IGN,
(NOCASE, True): OP.RANGE_REV, (IGNORECASE, True): OP.RANGE_IGN_REV,
(FULLCASE, True): OP.RANGE_REV, (FULLIGNORECASE, True): OP.RANGE_IGN_REV}
_op_name = "RANGE"
def __init__(self, lower, upper, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.lower = lower
self.upper = upper
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
self._key = (self.__class__, self.lower, self.upper, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return Range(self.lower, self.upper, positive, case_flags, zerowidth)
def optimise(self, info, in_set=False):
# Is the range case-sensitive?
if not self.positive or not (self.case_flags & IGNORECASE) or in_set:
return self
# Is full case-folding possible?
if (not (info.flags & UNICODE) or (self.case_flags & FULLIGNORECASE) !=
FULLIGNORECASE):
return self
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
# Get the folded characters in the range.
items = []
for ch in expanding_chars:
if self.lower <= ord(ch) <= self.upper:
folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
items.append(String([ord(c) for c in folded],
case_flags=self.case_flags))
if not items:
# We can fall back to simple case-folding.
return self
if len(items) < self.upper - self.lower + 1:
# Not all the characters are covered by the full case-folding.
items.insert(0, self)
return Branch(items)
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[self.case_flags, reverse], flags, self.lower,
self.upper)]
def _dump(self, indent, reverse):
display_lower = repr(unichr(self.lower)).lstrip("bu")
display_upper = repr(unichr(self.upper)).lstrip("bu")
print "%sRANGE %s %s %s%s" % (INDENT * indent, POS_TEXT[self.positive],
display_lower, display_upper, CASE_TEXT[self.case_flags])
def matches(self, ch):
return (self.lower <= ch <= self.upper) == self.positive
def max_width(self):
return 1
class RefGroup(RegexBase):
_opcode = {(NOCASE, False): OP.REF_GROUP, (IGNORECASE, False):
OP.REF_GROUP_IGN, (FULLCASE, False): OP.REF_GROUP, (FULLIGNORECASE,
False): OP.REF_GROUP_FLD, (NOCASE, True): OP.REF_GROUP_REV, (IGNORECASE,
True): OP.REF_GROUP_IGN_REV, (FULLCASE, True): OP.REF_GROUP_REV,
(FULLIGNORECASE, True): OP.REF_GROUP_FLD_REV}
def __init__(self, info, group, position, case_flags=NOCASE):
RegexBase.__init__(self)
self.info = info
self.group = group
self.position = position
self.case_flags = case_flags
self._key = self.__class__, self.group, self.case_flags
def fix_groups(self, pattern, reverse, fuzzy):
try:
self.group = int(self.group)
except ValueError:
try:
self.group = self.info.group_index[self.group]
except KeyError:
raise error("unknown group", pattern, self.position)
if not 1 <= self.group <= self.info.group_count:
raise error("invalid group reference", pattern, self.position)
self._key = self.__class__, self.group, self.case_flags
def remove_captures(self):
raise error("group reference not allowed", pattern, self.position)
def _compile(self, reverse, fuzzy):
flags = 0
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[self.case_flags, reverse], flags, self.group)]
def _dump(self, indent, reverse):
print "%sREF_GROUP %s%s" % (INDENT * indent, self.group,
CASE_TEXT[self.case_flags])
def max_width(self):
return UNLIMITED
class SearchAnchor(ZeroWidthBase):
_opcode = OP.SEARCH_ANCHOR
_op_name = "SEARCH_ANCHOR"
class Sequence(RegexBase):
def __init__(self, items=None):
RegexBase.__init__(self)
if items is None:
items = []
self.items = items
def fix_groups(self, pattern, reverse, fuzzy):
for s in self.items:
s.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
# Flatten the sequences.
items = []
for s in self.items:
s = s.optimise(info)
if isinstance(s, Sequence):
items.extend(s.items)
else:
items.append(s)
return make_sequence(items)
def pack_characters(self, info):
"Packs sequences of characters into strings."
items = []
characters = []
case_flags = NOCASE
for s in self.items:
if type(s) is Character and s.positive:
if s.case_flags != case_flags:
# Different case sensitivity, so flush, unless neither the
# previous nor the new character are cased.
if s.case_flags or is_cased(info, s.value):
Sequence._flush_characters(info, characters,
case_flags, items)
case_flags = s.case_flags
characters.append(s.value)
elif type(s) is String or type(s) is Literal:
if s.case_flags != case_flags:
# Different case sensitivity, so flush, unless the neither
# the previous nor the new string are cased.
if s.case_flags or any(is_cased(info, c) for c in
characters):
Sequence._flush_characters(info, characters,
case_flags, items)
case_flags = s.case_flags
characters.extend(s.characters)
else:
Sequence._flush_characters(info, characters, case_flags, items)
items.append(s.pack_characters(info))
Sequence._flush_characters(info, characters, case_flags, items)
return make_sequence(items)
def remove_captures(self):
self.items = [s.remove_captures() for s in self.items]
return self
def is_atomic(self):
return all(s.is_atomic() for s in self.items)
def can_be_affix(self):
return False
def contains_group(self):
return any(s.contains_group() for s in self.items)
def get_firstset(self, reverse):
fs = set()
items = self.items
if reverse:
items.reverse()
for s in items:
fs |= s.get_firstset(reverse)
if None not in fs:
return fs
fs.discard(None)
return fs | set([None])
def has_simple_start(self):
return self.items and self.items[0].has_simple_start()
def _compile(self, reverse, fuzzy):
seq = self.items
if reverse:
seq = seq[::-1]
code = []
for s in seq:
code.extend(s.compile(reverse, fuzzy))
return code
def _dump(self, indent, reverse):
for s in self.items:
s.dump(indent, reverse)
@staticmethod
def _flush_characters(info, characters, case_flags, items):
if not characters:
return
# Disregard case_flags if all of the characters are case-less.
if case_flags & IGNORECASE:
if not any(is_cased(info, c) for c in characters):
case_flags = NOCASE
if len(characters) == 1:
items.append(Character(characters[0], case_flags=case_flags))
else:
items.append(String(characters, case_flags=case_flags))
characters[:] = []
def is_empty(self):
return all(i.is_empty() for i in self.items)
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def max_width(self):
return sum(s.max_width() for s in self.items)
def get_required_string(self, reverse):
seq = self.items
if reverse:
seq = seq[::-1]
offset = 0
for s in seq:
ofs, req = s.get_required_string(reverse)
offset += ofs
if req:
return offset, req
return offset, None
class SetBase(RegexBase):
def __init__(self, info, items, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.info = info
self.items = tuple(items)
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
self.char_width = 1
self._key = (self.__class__, self.items, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return type(self)(self.info, self.items, positive, case_flags,
zerowidth).optimise(self.info)
def get_firstset(self, reverse):
return set([self])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
code = [(self._opcode[self.case_flags, reverse], flags)]
for m in self.items:
code.extend(m.compile())
code.append((OP.END, ))
return code
def _dump(self, indent, reverse):
print "%s%s %s%s" % (INDENT * indent, self._op_name,
POS_TEXT[self.positive], CASE_TEXT[self.case_flags])
for i in self.items:
i.dump(indent + 1, reverse)
def _handle_case_folding(self, info, in_set):
# Is the set case-sensitive?
if not self.positive or not (self.case_flags & IGNORECASE) or in_set:
return self
# Is full case-folding possible?
if (not (self.info.flags & UNICODE) or (self.case_flags &
FULLIGNORECASE) !=
FULLIGNORECASE):
return self
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
# Get the folded characters in the set.
items = []
seen = set()
for ch in expanding_chars:
if self.matches(ord(ch)):
folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
if folded not in seen:
items.append(String([ord(c) for c in folded],
case_flags=self.case_flags))
seen.add(folded)
if not items:
# We can fall back to simple case-folding.
return self
return Branch([self] + items)
def max_width(self):
# Is the set case-sensitive?
if not self.positive or not (self.case_flags & IGNORECASE):
return 1
# Is full case-folding possible?
if (not (self.info.flags & UNICODE) or (self.case_flags &
FULLIGNORECASE) != FULLIGNORECASE):
return 1
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
# Get the folded characters in the set.
seen = set()
for ch in expanding_chars:
if self.matches(ord(ch)):
folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
seen.add(folded)
if not seen:
return 1
return max(len(folded) for folded in seen)
class SetDiff(SetBase):
_opcode = {(NOCASE, False): OP.SET_DIFF, (IGNORECASE, False):
OP.SET_DIFF_IGN, (FULLCASE, False): OP.SET_DIFF, (FULLIGNORECASE, False):
OP.SET_DIFF_IGN, (NOCASE, True): OP.SET_DIFF_REV, (IGNORECASE, True):
OP.SET_DIFF_IGN_REV, (FULLCASE, True): OP.SET_DIFF_REV, (FULLIGNORECASE,
True): OP.SET_DIFF_IGN_REV}
_op_name = "SET_DIFF"
def optimise(self, info, in_set=False):
items = self.items
if len(items) > 2:
items = [items[0], SetUnion(info, items[1 : ])]
if len(items) == 1:
return items[0].with_flags(case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(m.optimise(info, in_set=True) for m in items)
return self._handle_case_folding(info, in_set)
def matches(self, ch):
m = self.items[0].matches(ch) and not self.items[1].matches(ch)
return m == self.positive
class SetInter(SetBase):
_opcode = {(NOCASE, False): OP.SET_INTER, (IGNORECASE, False):
OP.SET_INTER_IGN, (FULLCASE, False): OP.SET_INTER, (FULLIGNORECASE,
False): OP.SET_INTER_IGN, (NOCASE, True): OP.SET_INTER_REV, (IGNORECASE,
True): OP.SET_INTER_IGN_REV, (FULLCASE, True): OP.SET_INTER_REV,
(FULLIGNORECASE, True): OP.SET_INTER_IGN_REV}
_op_name = "SET_INTER"
def optimise(self, info, in_set=False):
items = []
for m in self.items:
m = m.optimise(info, in_set=True)
if isinstance(m, SetInter) and m.positive:
# Intersection in intersection.
items.extend(m.items)
else:
items.append(m)
if len(items) == 1:
return items[0].with_flags(case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(items)
return self._handle_case_folding(info, in_set)
def matches(self, ch):
m = all(i.matches(ch) for i in self.items)
return m == self.positive
class SetSymDiff(SetBase):
_opcode = {(NOCASE, False): OP.SET_SYM_DIFF, (IGNORECASE, False):
OP.SET_SYM_DIFF_IGN, (FULLCASE, False): OP.SET_SYM_DIFF, (FULLIGNORECASE,
False): OP.SET_SYM_DIFF_IGN, (NOCASE, True): OP.SET_SYM_DIFF_REV,
(IGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV, (FULLCASE, True):
OP.SET_SYM_DIFF_REV, (FULLIGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV}
_op_name = "SET_SYM_DIFF"
def optimise(self, info, in_set=False):
items = []
for m in self.items:
m = m.optimise(info, in_set=True)
if isinstance(m, SetSymDiff) and m.positive:
# Symmetric difference in symmetric difference.
items.extend(m.items)
else:
items.append(m)
if len(items) == 1:
return items[0].with_flags(case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(items)
return self._handle_case_folding(info, in_set)
def matches(self, ch):
m = False
for i in self.items:
m = m != i.matches(ch)
return m == self.positive
class SetUnion(SetBase):
_opcode = {(NOCASE, False): OP.SET_UNION, (IGNORECASE, False):
OP.SET_UNION_IGN, (FULLCASE, False): OP.SET_UNION, (FULLIGNORECASE,
False): OP.SET_UNION_IGN, (NOCASE, True): OP.SET_UNION_REV, (IGNORECASE,
True): OP.SET_UNION_IGN_REV, (FULLCASE, True): OP.SET_UNION_REV,
(FULLIGNORECASE, True): OP.SET_UNION_IGN_REV}
_op_name = "SET_UNION"
def optimise(self, info, in_set=False):
items = []
for m in self.items:
m = m.optimise(info, in_set=True)
if isinstance(m, SetUnion) and m.positive:
# Union in union.
items.extend(m.items)
else:
items.append(m)
if len(items) == 1:
i = items[0]
return i.with_flags(positive=i.positive == self.positive,
case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(items)
return self._handle_case_folding(info, in_set)
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
characters, others = defaultdict(list), []
for m in self.items:
if isinstance(m, Character):
characters[m.positive].append(m.value)
else:
others.append(m)
code = [(self._opcode[self.case_flags, reverse], flags)]
for positive, values in characters.items():
flags = 0
if positive:
flags |= POSITIVE_OP
if len(values) == 1:
code.append((OP.CHARACTER, flags, values[0]))
else:
code.append((OP.STRING, flags, len(values)) + tuple(values))
for m in others:
code.extend(m.compile())
code.append((OP.END, ))
return code
def matches(self, ch):
m = any(i.matches(ch) for i in self.items)
return m == self.positive
class StartOfLine(ZeroWidthBase):
_opcode = OP.START_OF_LINE
_op_name = "START_OF_LINE"
class StartOfLineU(StartOfLine):
_opcode = OP.START_OF_LINE_U
_op_name = "START_OF_LINE_U"
class StartOfString(ZeroWidthBase):
_opcode = OP.START_OF_STRING
_op_name = "START_OF_STRING"
class StartOfWord(ZeroWidthBase):
_opcode = OP.START_OF_WORD
_op_name = "START_OF_WORD"
class String(RegexBase):
_opcode = {(NOCASE, False): OP.STRING, (IGNORECASE, False): OP.STRING_IGN,
(FULLCASE, False): OP.STRING, (FULLIGNORECASE, False): OP.STRING_FLD,
(NOCASE, True): OP.STRING_REV, (IGNORECASE, True): OP.STRING_IGN_REV,
(FULLCASE, True): OP.STRING_REV, (FULLIGNORECASE, True):
OP.STRING_FLD_REV}
def __init__(self, characters, case_flags=NOCASE):
self.characters = tuple(characters)
self.case_flags = case_flags
if (self.case_flags & FULLIGNORECASE) == FULLIGNORECASE:
folded_characters = []
for char in self.characters:
folded = _regex.fold_case(FULL_CASE_FOLDING, unichr(char))
folded_characters.extend(ord(c) for c in folded)
else:
folded_characters = self.characters
self.folded_characters = tuple(folded_characters)
self.required = False
self._key = self.__class__, self.characters, self.case_flags
def get_firstset(self, reverse):
if reverse:
pos = -1
else:
pos = 0
return set([Character(self.characters[pos],
case_flags=self.case_flags)])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if fuzzy:
flags |= FUZZY_OP
if self.required:
flags |= REQUIRED_OP
return [(self._opcode[self.case_flags, reverse], flags,
len(self.folded_characters)) + self.folded_characters]
def _dump(self, indent, reverse):
display = repr("".join(unichr(c) for c in self.characters)).lstrip("bu")
print "%sSTRING %s%s" % (INDENT * indent, display,
CASE_TEXT[self.case_flags])
def max_width(self):
return len(self.folded_characters)
def get_required_string(self, reverse):
return 0, self
class Literal(String):
def _dump(self, indent, reverse):
for c in self.characters:
display = repr(unichr(c)).lstrip("bu")
print "%sCHARACTER MATCH %s%s" % (INDENT * indent, display,
CASE_TEXT[self.case_flags])
class StringSet(RegexBase):
_opcode = {(NOCASE, False): OP.STRING_SET, (IGNORECASE, False):
OP.STRING_SET_IGN, (FULLCASE, False): OP.STRING_SET, (FULLIGNORECASE,
False): OP.STRING_SET_FLD, (NOCASE, True): OP.STRING_SET_REV,
(IGNORECASE, True): OP.STRING_SET_IGN_REV, (FULLCASE, True):
OP.STRING_SET_REV, (FULLIGNORECASE, True): OP.STRING_SET_FLD_REV}
def __init__(self, info, name, case_flags=NOCASE):
self.info = info
self.name = name
self.case_flags = case_flags
self._key = self.__class__, self.name, self.case_flags
self.set_key = (name, self.case_flags)
if self.set_key not in info.named_lists_used:
info.named_lists_used[self.set_key] = len(info.named_lists_used)
def _compile(self, reverse, fuzzy):
index = self.info.named_lists_used[self.set_key]
items = self.info.kwargs[self.name]
case_flags = self.case_flags
if not items:
return []
encoding = self.info.flags & _ALL_ENCODINGS
fold_flags = encoding | case_flags
if fuzzy:
choices = [self._folded(fold_flags, i) for i in items]
# Sort from longest to shortest.
choices.sort(key=lambda s: (-len(s), s))
branches = []
for string in choices:
branches.append(Sequence([Character(c, case_flags=case_flags)
for c in string]))
if len(branches) > 1:
branch = Branch(branches)
else:
branch = branches[0]
branch = branch.optimise(self.info).pack_characters(self.info)
return branch.compile(reverse, fuzzy)
else:
min_len = min(len(i) for i in items)
max_len = max(len(self._folded(fold_flags, i)) for i in items)
return [(self._opcode[case_flags, reverse], index, min_len,
max_len)]
def _dump(self, indent, reverse):
print "%sSTRING_SET %s%s" % (INDENT * indent, self.name,
CASE_TEXT[self.case_flags])
def _folded(self, fold_flags, item):
if isinstance(item, unicode):
return [ord(c) for c in _regex.fold_case(fold_flags, item)]
else:
return [ord(c) for c in item]
def _flatten(self, s):
# Flattens the branches.
if isinstance(s, Branch):
for b in s.branches:
self._flatten(b)
elif isinstance(s, Sequence) and s.items:
seq = s.items
while isinstance(seq[-1], Sequence):
seq[-1 : ] = seq[-1].items
n = 0
while n < len(seq) and isinstance(seq[n], Character):
n += 1
if n > 1:
seq[ : n] = [String([c.value for c in seq[ : n]],
case_flags=self.case_flags)]
self._flatten(seq[-1])
def max_width(self):
if not self.info.kwargs[self.name]:
return 0
if self.case_flags & IGNORECASE:
fold_flags = (self.info.flags & _ALL_ENCODINGS) | self.case_flags
return max(len(_regex.fold_case(fold_flags, i)) for i in
self.info.kwargs[self.name])
else:
return max(len(i) for i in self.info.kwargs[self.name])
class Source(object):
"Scanner for the regular expression source string."
def __init__(self, string):
if isinstance(string, unicode):
self.string = string
self.char_type = unichr
else:
self.string = string
self.char_type = chr
self.pos = 0
self.ignore_space = False
self.sep = string[ : 0]
def get(self):
string = self.string
pos = self.pos
try:
if self.ignore_space:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
else:
break
ch = string[pos]
self.pos = pos + 1
return ch
except IndexError:
# We've reached the end of the string.
self.pos = pos
return string[ : 0]
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
return string[ : 0]
def get_many(self, count=1):
string = self.string
pos = self.pos
try:
if self.ignore_space:
substring = []
while len(substring) < count:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
else:
break
substring.append(string[pos])
pos += 1
substring = "".join(substring)
else:
substring = string[pos : pos + count]
pos += len(substring)
self.pos = pos
return substring
except IndexError:
# We've reached the end of the string.
self.pos = len(string)
return "".join(substring)
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
return "".join(substring)
def get_while(self, test_set, include=True):
string = self.string
pos = self.pos
if self.ignore_space:
try:
substring = []
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
elif (string[pos] in test_set) == include:
substring.append(string[pos])
pos += 1
else:
break
self.pos = pos
except IndexError:
# We've reached the end of the string.
self.pos = len(string)
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
return "".join(substring)
else:
try:
while (string[pos] in test_set) == include:
pos += 1
substring = string[self.pos : pos]
self.pos = pos
return substring
except IndexError:
# We've reached the end of the string.
substring = string[self.pos : pos]
self.pos = pos
return substring
def skip_while(self, test_set, include=True):
string = self.string
pos = self.pos
try:
if self.ignore_space:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
elif (string[pos] in test_set) == include:
pos += 1
else:
break
else:
while (string[pos] in test_set) == include:
pos += 1
self.pos = pos
except IndexError:
# We've reached the end of the string.
self.pos = len(string)
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
def match(self, substring):
string = self.string
pos = self.pos
if self.ignore_space:
try:
for c in substring:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
else:
break
if string[pos] != c:
return False
pos += 1
self.pos = pos
return True
except IndexError:
# We've reached the end of the string.
return False
except ValueError:
# The comment extended to the end of the string.
return False
else:
if not string.startswith(substring, pos):
return False
self.pos = pos + len(substring)
return True
def expect(self, substring):
if not self.match(substring):
raise error("missing %s" % substring, self.string, self.pos)
def at_end(self):
string = self.string
pos = self.pos
try:
if self.ignore_space:
while True:
if string[pos].isspace():
pos += 1
elif string[pos] == "#":
pos = string.index("\n", pos)
else:
break
return pos >= len(string)
except IndexError:
# We've reached the end of the string.
return True
except ValueError:
# The comment extended to the end of the string.
return True
class Info(object):
"Info about the regular expression."
def __init__(self, flags=0, char_type=None, kwargs={}):
flags |= DEFAULT_FLAGS[(flags & _ALL_VERSIONS) or DEFAULT_VERSION]
self.flags = flags
self.global_flags = flags
self.inline_locale = False
self.kwargs = kwargs
self.group_count = 0
self.group_index = {}
self.group_name = {}
self.char_type = char_type
self.named_lists_used = {}
self.open_groups = []
self.open_group_count = {}
self.defined_groups = {}
self.group_calls = []
self.private_groups = {}
def open_group(self, name=None):
group = self.group_index.get(name)
if group is None:
while True:
self.group_count += 1
if name is None or self.group_count not in self.group_name:
break
group = self.group_count
if name:
self.group_index[name] = group
self.group_name[group] = name
if group in self.open_groups:
# We have a nested named group. We'll assign it a private group
# number, initially negative until we can assign a proper
# (positive) number.
group_alias = -(len(self.private_groups) + 1)
self.private_groups[group_alias] = group
group = group_alias
self.open_groups.append(group)
self.open_group_count[group] = self.open_group_count.get(group, 0) + 1
return group
def close_group(self):
self.open_groups.pop()
def is_open_group(self, name):
# In version 1, a group reference can refer to an open group. We'll
# just pretend the group isn't open.
version = (self.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version == VERSION1:
return False
if name.isdigit():
group = int(name)
else:
group = self.group_index.get(name)
return group in self.open_groups
def _check_group_features(info, parsed):
"""Checks whether the reverse and fuzzy features of the group calls match
the groups which they call.
"""
call_refs = {}
additional_groups = []
for call, reverse, fuzzy in info.group_calls:
# Look up the reference of this group call.
key = (call.group, reverse, fuzzy)
ref = call_refs.get(key)
if ref is None:
# This group doesn't have a reference yet, so look up its features.
if call.group == 0:
# Calling the pattern as a whole.
rev = bool(info.flags & REVERSE)
fuz = isinstance(parsed, Fuzzy)
if (rev, fuz) != (reverse, fuzzy):
# The pattern as a whole doesn't have the features we want,
# so we'll need to make a copy of it with the desired
# features.
additional_groups.append((parsed, reverse, fuzzy))
else:
# Calling a capture group.
def_info = info.defined_groups[call.group]
group = def_info[0]
if def_info[1 : ] != (reverse, fuzzy):
# The group doesn't have the features we want, so we'll
# need to make a copy of it with the desired features.
additional_groups.append((group, reverse, fuzzy))
ref = len(call_refs)
call_refs[key] = ref
call.call_ref = ref
info.call_refs = call_refs
info.additional_groups = additional_groups
def _get_required_string(parsed, flags):
"Gets the required string and related info of a parsed pattern."
req_offset, required = parsed.get_required_string(bool(flags & REVERSE))
if required:
required.required = True
if req_offset >= UNLIMITED:
req_offset = -1
req_flags = required.case_flags
if not (flags & UNICODE):
req_flags &= ~UNICODE
req_chars = required.folded_characters
else:
req_offset = 0
req_chars = ()
req_flags = 0
return req_offset, req_chars, req_flags
class Scanner:
def __init__(self, lexicon, flags=0):
self.lexicon = lexicon
# Combine phrases into a compound pattern.
patterns = []
for phrase, action in lexicon:
# Parse the regular expression.
source = Source(phrase)
info = Info(flags, source.char_type)
source.ignore_space = bool(info.flags & VERBOSE)
parsed = _parse_pattern(source, info)
if not source.at_end():
raise error("unbalanced parenthesis", source.string, source.pos)
# We want to forbid capture groups within each phrase.
patterns.append(parsed.remove_captures())
# Combine all the subpatterns into one pattern.
info = Info(flags)
patterns = [Group(info, g + 1, p) for g, p in enumerate(patterns)]
parsed = Branch(patterns)
# Optimise the compound pattern.
parsed = parsed.optimise(info)
parsed = parsed.pack_characters(info)
# Get the required string.
req_offset, req_chars, req_flags = _get_required_string(parsed,
info.flags)
# Check the features of the groups.
_check_group_features(info, parsed)
# Complain if there are any group calls. They are not supported by the
# Scanner class.
if info.call_refs:
raise error("recursive regex not supported by Scanner",
source.string, source.pos)
reverse = bool(info.flags & REVERSE)
# Compile the compound pattern. The result is a list of tuples.
code = parsed.compile(reverse) + [(OP.SUCCESS, )]
# Flatten the code into a list of ints.
code = _flatten_code(code)
if not parsed.has_simple_start():
# Get the first set, if possible.
try:
fs_code = _compile_firstset(info, parsed.get_firstset(reverse))
fs_code = _flatten_code(fs_code)
code = fs_code + code
except _FirstSetError:
pass
# Check the global flags for conflicts.
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version not in (0, VERSION0, VERSION1):
raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible")
# Create the PatternObject.
#
# Local flags like IGNORECASE affect the code generation, but aren't
# needed by the PatternObject itself. Conversely, global flags like
# LOCALE _don't_ affect the code generation but _are_ needed by the
# PatternObject.
self.scanner = _regex.compile(None, (flags & GLOBAL_FLAGS) | version,
code, {}, {}, {}, [], req_offset, req_chars, req_flags,
len(patterns))
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while True:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex - 1][1]
if hasattr(action, '__call__'):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i : ]
# Get the known properties dict.
PROPERTIES = _regex.get_properties()
# Build the inverse of the properties dict.
PROPERTY_NAMES = {}
for prop_name, (prop_id, values) in PROPERTIES.items():
name, prop_values = PROPERTY_NAMES.get(prop_id, ("", {}))
name = max(name, prop_name, key=len)
PROPERTY_NAMES[prop_id] = name, prop_values
for val_name, val_id in values.items():
prop_values[val_id] = max(prop_values.get(val_id, ""), val_name,
key=len)
# Character escape sequences.
CHARACTER_ESCAPES = {
"a": "\a",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
}
# Predefined character set escape sequences.
CHARSET_ESCAPES = {
"d": lookup_property(None, "Digit", True),
"D": lookup_property(None, "Digit", False),
"s": lookup_property(None, "Space", True),
"S": lookup_property(None, "Space", False),
"w": lookup_property(None, "Word", True),
"W": lookup_property(None, "Word", False),
}
# Positional escape sequences.
POSITION_ESCAPES = {
"A": StartOfString(),
"b": Boundary(),
"B": Boundary(False),
"m": StartOfWord(),
"M": EndOfWord(),
"Z": EndOfString(),
}
# Positional escape sequences when WORD flag set.
WORD_POSITION_ESCAPES = dict(POSITION_ESCAPES)
WORD_POSITION_ESCAPES.update({
"b": DefaultBoundary(),
"B": DefaultBoundary(False),
"m": DefaultStartOfWord(),
"M": DefaultEndOfWord(),
})
|
jlegendary/Dato-Core
|
refs/heads/master
|
src/unity/python/graphlab_util/metric_tracker.py
|
13
|
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
from config import DEFAULT_CONFIG as CONFIG
from metric_mock import MetricMock
# metrics libraries
import mixpanel
import librato
import Queue
import logging
import os
import platform
import pprint
import threading
import uuid
import copy as _copy
import requests as _requests
import sys
import urllib as _urllib
# product key
from graphlab import product_key
__ALL__ = [ 'MetricTracker' ]
try:
from graphlab_psutil import TOTAL_PHYMEM, NUM_CPUS
except ImportError:
TOTAL_PHYMEM = 0
NUM_CPUS = 0
# global objects for producer/consumer for background metrics publishing
METRICS_QUEUE = Queue.Queue(maxsize=100)
METRICS_THREAD = None
SHUTDOWN_MESSAGE = 'SHUTTING_DOWN'
class _MetricsWorkerThread(threading.Thread):
"""Worker Thread for publishing metrics in the background."""
def __init__(self, mode, source):
threading.Thread.__init__(self, name='metrics-worker')
if CONFIG.version.endswith('.gpu'):
self._version = CONFIG.version.split('.gpu')[0]
self._isgpu = True
else:
self._version = CONFIG.version
self._isgpu = False
self._mode = mode
self._source = source
try:
self._product_key = product_key.get_product_key()
except Exception, e:
self._product_key = None
self.queue = METRICS_QUEUE
self.logger = logging.getLogger('graphlab.metrics')
self._tracker = None # librato metrics tracker
self._mixpanel = None # Mixpanel metrics tracker
buffer_size = 5
offline_buffer_size = 25
self._sys_info_set = False
self._usable = False
try:
self._metrics_url = CONFIG.metrics_url
self._requests = _requests # support mocking out requests library in unit-tests
if self._mode != 'PROD':
self.logger.info("Using MetricMock instead of real metrics, mode is: %s" % self._mode)
self._tracker = MetricMock()
self._mixpanel = MetricMock()
else:
self._tracker = librato.connect(CONFIG.librato_user, CONFIG.librato_token)
self._mixpanel = mixpanel.Mixpanel(CONFIG.mixpanel_user)
except Exception, e:
self.logger.warning("Unexpected exception connecting to Metrics service, disabling metrics, exception %s" % e)
else:
self._usable = True
self._distinct_id = 'unknown'
self._distinct_id = self._get_distinct_id()
def run(self):
while True:
try:
metric = self.queue.get() # block until something received
if (metric['event_name'] == SHUTDOWN_MESSAGE):
# shutting down
self.queue.task_done()
break
self._track(metric['event_name'], metric['value'], metric['type'], metric['properties'], metric['meta'], metric['send_sys_info'])
self.queue.task_done()
except Exception as e:
pass
def _get_distinct_id(self):
if self._distinct_id == 'unknown':
poss_id = 'unknown'
id_file_path = "/".join([os.path.abspath(os.path.dirname(__file__)), '..','graphlab',"id"])
if os.path.isfile(id_file_path):
try:
with open(id_file_path, 'r') as f:
poss_id = f.readline()
except:
return "session-" + str(uuid.uuid4())
else:
# no distinct id found from installation,
# try to create one and write it to the appropriate location
# if not able to write to appropriate location, then create temp one
new_id = str(uuid.uuid4())
try:
with open(id_file_path, "w") as id_file:
id_file.write(new_id)
except:
return "session-" + str(uuid.uuid4())
return new_id
return poss_id.strip()
else:
return self._distinct_id
@staticmethod
def _get_bucket_name_suffix(buckets, value):
"""
Given a list of buckets and a value, generate a suffix for the bucket
name, corresponding to either one of the buckets given, or the largest
bucket with "+" appended.
"""
suffix = None
for bucket in buckets:
if value <= bucket:
suffix = str(bucket)
break
# if we get here and suffix is None, value must be > the largest bucket
if suffix is None:
suffix = '%d+' % buckets[-1]
return suffix
@staticmethod
def _bucketize_mixpanel(event_name, value):
"""
Take events that we would like to bucketize and bucketize them before sending to mixpanel
@param event_name current event name, used to assess if bucketization is required
@param value value used to decide which bucket for event
@return event_name if updated then will have bucket appended as suffix, otherwise original returned
"""
if value == 1:
return event_name
bucket_events = {
'col.size': [ 5, 10, 20 ],
'row.size': [ 100000, 1000000, 10000000, 100000000 ],
'duration.secs': [ 300, 1800, 3600, 7200 ],
'duration.ms': [ 10, 100, 1000, 10000, 100000 ]
}
for (event_suffix, buckets) in bucket_events.iteritems():
if event_name.endswith(event_suffix):
# if the suffix matches one we expect, bucketize using the buckets defined above
return '%s.%s' % (event_name, _MetricsWorkerThread._get_bucket_name_suffix(buckets, value))
# if there was no suffix match, just use the original event name
return event_name
def _set_sys_info(self):
# Don't do this if system info has been set
if self._sys_info_set:
return
self._sys_info = {}
# Get OS-specific info
self._sys_info['system'] = platform.system()
if self._sys_info['system'] == 'Linux':
self._sys_info['os_version'] = self._tup_to_flat_str(platform.linux_distribution())
self._sys_info['libc_version'] = self._tup_to_flat_str(platform.libc_ver())
elif self._sys_info['system'] == 'Darwin':
self._sys_info['os_version'] = self._tup_to_flat_str(platform.mac_ver())
elif self._sys_info['system'] == 'Windows':
self._sys_info['os_version'] = self._tup_to_flat_str(platform.win32_ver())
elif self._sys_info['system'] == 'Java':
self._sys_info['os_version'] = self._tup_to_flat_str(platform.java_ver())
# Python specific stuff
self._sys_info['python_implementation'] = platform.python_implementation()
self._sys_info['python_version'] = platform.python_version()
self._sys_info['python_build'] = self._tup_to_flat_str(platform.python_build())
self._sys_info['python_executable'] = sys.executable
# Get architecture info
self._sys_info['architecture'] = self._tup_to_flat_str(platform.architecture())
self._sys_info['platform'] = platform.platform()
self._sys_info['num_cpus'] = NUM_CPUS
# Get RAM size
self._sys_info['total_mem'] = TOTAL_PHYMEM
self._sys_info_set = True
def _print_sys_info(self):
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(self._sys_info)
def _tup_to_flat_str(self, tup):
tmp_list = []
for t in tup:
if isinstance(t, tuple):
tmp_str =self._tup_to_flat_str(t)
tmp_list.append(tmp_str)
elif isinstance(t, str):
tmp_list.append(t)
else:
# UNEXPECTED! Just don't crash
try:
tmp_list.append(str(t))
except:
pass
return " ".join(tmp_list)
def _track(self, event_name, value=1, type="gauge", properties={}, meta={}, send_sys_info=False):
"""
Internal method to actually send metrics, expected to be called from background thread only.
"""
if not self._usable:
return
the_properties = {}
if send_sys_info:
if not self._sys_info_set:
self._set_sys_info()
the_properties.update(self._sys_info)
the_properties.update(properties)
try:
# librato
self._tracker.submit(name=event_name, value=value, type="gauge", source=self._source, attributes=the_properties)
except Exception as e:
pass
try:
# since mixpanel cannot send sizes or numbers, just tracks events, bucketize these here
if value != 1:
event_name = self._bucketize_mixpanel(event_name, value)
the_properties['value'] = value
# mixpanel
the_properties['source'] = self._source
self._mixpanel.track(self._distinct_id, event_name, properties=the_properties, meta=meta)
except Exception as e:
pass
try:
# homebrew metrics - cloudfront
if self._metrics_url != '':
cloudfront_props = {}
props = _copy.deepcopy(the_properties)
props.update(meta)
cloudfront_props['event_name'] = event_name
cloudfront_props['value'] = value
cloudfront_props['distinct_id'] = self._distinct_id
cloudfront_props['version'] = self._version
cloudfront_props['isgpu'] = self._isgpu
cloudfront_props['properties'] = _urllib.quote_plus(str(props))
# if product key is not set, then try to get it now when submitting
if not self._product_key:
try:
self._product_key = product_key.get_product_key()
except Exception, e:
self._product_key = 'Unknown'
pass
cloudfront_props['product_key'] = self._product_key
# self.logger.debug("SENDING '%s' to %s" % (cloudfront_props, self._metrics_url))
self._requests.get(self._metrics_url, params=cloudfront_props)
except Exception as e:
pass
class MetricTracker:
def __init__(self, mode='UNIT', background_thread=True):
# setup logging
self.logger = logging.getLogger('graphlab.metrics')
self._mode = mode
self._queue = METRICS_QUEUE
self._source = ("%s-%s" % (self._mode, CONFIG.version))
self.logger.debug("Running with metric source: %s" % self._source)
# background thread for metrics
self._thread = None
if background_thread:
self._start_queue_thread()
def __del__(self):
try:
self._stop_queue_thread()
except:
# Lot of strange exceptions can happen when destructing, not really anything we can do...
pass
def _stop_queue_thread(self):
# send the shutting down message, wait for thread to exit
if self._thread is not None:
self.track(SHUTDOWN_MESSAGE)
self._thread.join(2.0)
def track(self, event_name, value=1, type="gauge", properties={}, meta={}, send_sys_info=False):
"""
Publishes event / metric to metrics providers.
This method is a facade / proxy, queuing up this metric for a background thread to process.
"""
if self._mode != 'PROD' and (not (isinstance(value, int) or isinstance(value, float))):
raise Exception("Metrics attempted with value being not a number, unsupported.")
try:
item = dict(event_name=event_name, value=value, type=type, properties=properties, meta=meta, send_sys_info=send_sys_info)
self._queue.put_nowait(item) # don't wait if Queue is full, just silently ignore
except Queue.Full:
if not self._thread or not self._thread.is_alive():
self.logger.debug("Queue is full and background thread is no longer alive, trying to restart")
self._restart_queue_thread()
else:
self.logger.debug("Queue is full, doing nothing.")
except Exception as e:
self.logger.debug("Unexpected exception in queueing metrics, %s" % e)
def _start_queue_thread(self):
global METRICS_THREAD
if (self._thread is None):
self.logger.debug("Starting background thread")
self._thread = _MetricsWorkerThread(self._mode, self._source)
METRICS_THREAD = self._thread
self._thread.daemon = True
self._thread.start()
def _restart_queue_thread(self):
global METRICS_THREAD
if (self._thread is not None and self._thread.is_alive()):
self._stop_queue_thread()
METRICS_THREAD = None
del self._thread
self._thread = None
self._start_queue_thread()
|
KnoxMakers/KM-Laser
|
refs/heads/master
|
extensions/km_deps/axidrawinternal/axidraw_svg_reorder.py
|
2
|
# coding=utf-8
#
# SVG Path Ordering Extension
# This extension uses a simple TSP algorithm to order the paths so as
# to reduce plotting time by plotting nearby paths consecutively.
#
#
# While written from scratch, this is a derivative in spirit of the work by
# Matthew Beckler and Daniel C. Newman for the EggBot project.
#
# The MIT License (MIT)
#
# Copyright (c) 2020 Windell H. Oskay, Evil Mad Science LLC
# www.evilmadscientist.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import sys
from lxml import etree
from axidrawinternal.plot_utils_import import from_dependency_import # plotink
inkex = from_dependency_import('ink_extensions.inkex')
simpletransform = from_dependency_import('ink_extensions.simpletransform')
simplestyle = from_dependency_import('ink_extensions.simplestyle')
exit_status = from_dependency_import('ink_extensions_utils.exit_status')
plot_utils = from_dependency_import('plotink.plot_utils') # https://github.com/evil-mad/plotink Requires version 0.15
"""
TODOs:
* Apparent difference in execution time for portrait vs landscape document orientation.
Seems to be related to the _change_
* Implement path functions
<param name="path_handling" _gui-text="Compound Paths" type="optiongroup">
<_option value=0>Leave as is</_option>
<_option value=1>Reorder subpaths</_option>
<_option value=2>Break apart</_option>
</param>
self.OptionParser.add_option( "--path_handling",\
action="store", type="int", dest="path_handling",\
default=1,help="How compound paths are handled")
* Consider re-introducing GUI method for rendering:
<param indent="1" name="rendering" type="boolean" _gui-text="Preview pen-up travel">false</param>
"""
class ReorderEffect(inkex.Effect):
"""
Inkscape effect extension.
Re-order the objects in the SVG document for faster plotting.
Respect layers: Initialize a new dictionary of objects for each layer, and sort
objects within that layer only
Objects in root of document are treated as being on a _single_ layer, and will all
be sorted.
"""
def __init__( self ):
inkex.Effect.__init__( self )
self.OptionParser.add_option( "--reordering",\
action="store", type="int", dest="reordering",\
default=1,help="How groups are handled")
self.auto_rotate = True
def effect(self):
# Main entry point of the program
self.svg_width = 0
self.svg_height = 0
self.air_total_default = 0
self.air_total_sorted = 0
self.printPortrait = False
# Rendering is available for debug purposes. It only previews
# pen-up movements that are reordered and typically does not
# include all possible movement.
self.preview_rendering = False
self.layer_index = 0 # index for coloring layers
self.svg = self.document.getroot()
self.DocUnits = "in" # Default
self.DocUnits = self.getDocumentUnit()
self.unit_scaling = 1
self.getDocProps()
"""
Set up the document-wide transforms to handle SVG viewbox
"""
matCurrent = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
viewbox = self.svg.get( 'viewBox' )
vb = self.svg.get('viewBox')
if vb:
p_a_r = self.svg.get('preserveAspectRatio')
sx,sy,ox,oy = plot_utils.vb_scale(vb, p_a_r, self.svg_width, self.svg_height)
else:
sx = 1.0 / float(plot_utils.PX_PER_INCH) # Handle case of no viewbox
sy = sx
ox = 0.0
oy = 0.0
# Initial transform of document is based on viewbox, if present:
matCurrent = simpletransform.parseTransform('scale({0:.6E},{1:.6E}) translate({2:.6E},{3:.6E})'.format(sx, sy, ox, oy))
# Set up x_last, y_last, which keep track of last known pen position
# The initial position is given by the expected initial pen position
self.y_last = 0
if (self.printPortrait):
self.x_last = self.svg_width
else:
self.x_last = 0
parent_vis='visible'
self.root_nodes = []
if self.preview_rendering:
# Remove old preview layers, if rendering is enabled
for node in self.svg:
if node.tag == inkex.addNS( 'g', 'svg' ) or node.tag == 'g':
if ( node.get( inkex.addNS( 'groupmode', 'inkscape' ) ) == 'layer' ):
LayerName = node.get( inkex.addNS( 'label', 'inkscape' ) )
if LayerName == '% Preview':
self.svg.remove( node )
preview_transform = simpletransform.parseTransform(
'translate({2:.6E},{3:.6E}) scale({0:.6E},{1:.6E})'.format(
1.0/sx, 1.0/sy, -ox, -oy))
path_attrs = { 'transform': simpletransform.formatTransform(preview_transform)}
self.preview_layer = etree.Element(inkex.addNS('g', 'svg'),
path_attrs, nsmap=inkex.NSS)
self.preview_layer.set( inkex.addNS('groupmode', 'inkscape' ), 'layer' )
self.preview_layer.set( inkex.addNS( 'label', 'inkscape' ), '% Preview' )
self.svg.append( self.preview_layer )
# Preview stroke width: 1/1000 of page width or height, whichever is smaller
if self.svg_width < self.svg_height:
width_du = self.svg_width / 1000.0
else:
width_du = self.svg_height / 1000.0
"""
Stroke-width is a css style element, and cannot accept scientific notation.
Thus, in cases with large scaling (i.e., high values of 1/sx, 1/sy)
resulting from the viewbox attribute of the SVG document, it may be necessary to use
a _very small_ stroke width, so that the stroke width displayed on the screen
has a reasonable width after being displayed greatly magnified thanks to the viewbox.
Use log10(the number) to determine the scale, and thus the precision needed.
"""
log_ten = math.log10(width_du)
if log_ten > 0: # For width_du > 1
width_string = "{0:.3f}".format(width_du)
else:
prec = int(math.ceil(-log_ten) + 3)
width_string = "{0:.{1}f}".format(width_du, prec)
self.p_style = {'stroke-width': width_string, 'fill': 'none',
'stroke-linejoin': 'round', 'stroke-linecap': 'round'}
self.svg = self.parse_svg(self.svg, matCurrent)
def parse_svg(self, input_node, mat_current=None, parent_vis='visible'):
"""
Input: An SVG node (usually) containing other nodes:
The SVG root, a layer, sublayer, or other group.
Output: The re-ordered node. The contents are reordered with the greedy
algorithm, except:
- Layers and sublayers are preserved. The contents of each are
re-ordered for faster plotting.
- Groups are either preserved, broken apart, or re-ordered within
the group, depending on the value of group_mode.
"""
coord_dict = {}
# coord_dict maps a node ID to the following data:
# Is the node plottable, first coordinate pair, last coordinate pair.
# i.e., Node_id -> (Boolean: plottable, Xi, Yi, Xf, Yf)
group_dict = {}
# group_dict maps a node ID for a group to the contents of that group.
# The contents may be a preserved nested group or a flat list, depending
# on the selected group handling mode. Example:
# group_dict = {'id_1': <Element {http://www.w3.org/2000/svg}g at memory_location_1>,
# 'id_2': <Element {http://www.w3.org/2000/svg}g at memory_location_2>
nodes_to_delete = []
counter = 0 # TODO: Replace this with better unique ID system
# Account for input_node's transform and any transforms above it:
if mat_current is None:
mat_current = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
try:
matNew = simpletransform.composeTransform( mat_current,
simpletransform.parseTransform( input_node.get( "transform" )))
except AttributeError:
matNew = mat_current
for node in input_node:
# Step through each object within the top-level input node
if node.tag is etree.Comment:
continue
try:
id = node.get( 'id' )
except AttributeError:
id = self.uniqueId("1",True)
node.set( 'id', id)
if id == None:
id = self.uniqueId("1",True)
node.set( 'id', id)
# First check for object visibility:
skip_object = False
# Check for "display:none" in the node's style attribute:
style = simplestyle.parseStyle(node.get('style'))
if 'display' in style.keys() and style['display'] == 'none':
skip_object = True # Plot neither this object nor its children
# The node may have a display="none" attribute as well:
if node.get( 'display' ) == 'none':
skip_object = True # Plot neither this object nor its children
# Visibility attributes control whether a given object will plot.
# Children of hidden (not visible) parents may be plotted if
# they assert visibility.
visibility = node.get( 'visibility', parent_vis )
if 'visibility' in style.keys():
visibility = style['visibility'] # Style may override attribute.
if visibility == 'inherit':
visibility = parent_vis
if visibility != 'visible':
skip_object = True # Skip this object and its children
# Next, check to see if this inner node is itself a group or layer:
if node.tag == inkex.addNS( 'g', 'svg' ) or node.tag == 'g':
# Use the user-given option to decide what to do with subgroups:
subgroup_mode = self.options.reordering
# Values of the parameter:
# subgroup_mode=="1": Preserve groups
# subgroup_mode=="2": Reorder within groups
# subgroup_mode=="3": Break apart groups
if node.get(inkex.addNS('groupmode', 'inkscape')) == 'layer':
# The node is a layer or sub-layer, not a regular group.
# Parse it separately, and re-order its contents.
subgroup_mode = 2 # Always sort within each layer.
self.layer_index += 1
layer_name = node.get( inkex.addNS( 'label', 'inkscape' ) )
if sys.version_info < (3,): # Yes this is ugly. More elegant suggestions welcome. :)
layer_name = layer_name.encode( 'ascii', 'ignore' ) #Drop non-ascii characters
else:
layer_name = str(layer_name)
layer_name.lstrip # Remove leading whitespace
if layer_name:
if layer_name[0] == '%': # First character is '%'; This
skip_object = True # is a documentation layer; skip plotting.
self.layer_index -= 1 # Set this back to previous value.
if skip_object:
# Do not re-order hidden groups or layers.
subgroup_mode = 1 # Preserve this group
if subgroup_mode == 3:
# Break apart this non-layer subgroup and add it to
# the set of things to be re-ordered.
nodes_to_delete.append(node)
nodes_inside_group = self.group2NodeDict(node)
for a_node in nodes_inside_group:
try:
id = a_node.get( 'id' )
except AttributeError:
id = self.uniqueId("1",True)
a_node.set( 'id', id)
if id == None:
id = self.uniqueId("1",True)
a_node.set( 'id', id)
# Use getFirstPoint and getLastPoint on each object:
start_plottable, first_point = self.getFirstPoint(a_node, matNew)
end_plottable, last_point = self.getLastPoint(a_node, matNew)
coord_dict[id] = (start_plottable and end_plottable,
first_point[0], first_point[1], last_point[0], last_point[1] )
# Entry in group_dict is this node
group_dict[id] = a_node
elif subgroup_mode == 2:
# Reorder a layer or subgroup with a recursive call.
node = self.parse_svg(node, matNew, visibility)
# Capture the first and last x,y coordinates of the optimized node
start_plottable, first_point = self.group_first_pt(node, matNew)
end_plottable, last_point = self.group_last_pt(node, matNew)
# Then add this optimized node to the coord_dict
coord_dict[id] = (start_plottable and end_plottable,
first_point[0], first_point[1], last_point[0], last_point[1] )
# Entry in group_dict is this node
group_dict[id] = node
else: # (subgroup_mode == 1)
# Preserve the group, but find its first and last point so
# that it can be re-ordered with respect to other items
if skip_object:
start_plottable = False
end_plottable = False
first_point = [(-1.), (-1.)]
last_point = [(-1.), (-1.)]
else:
start_plottable, first_point = self.group_first_pt(node, matNew)
end_plottable, last_point = self.group_last_pt(node, matNew)
coord_dict[id] = (start_plottable and end_plottable,
first_point[0], first_point[1], last_point[0], last_point[1] )
# Entry in group_dict is this node
group_dict[id] = node
else: # Handle objects that are not groups
if skip_object:
start_plottable = False
end_plottable = False
first_point = [(-1.), (-1.)]
last_point = [(-1.), (-1.)]
else:
start_plottable, first_point = self.getFirstPoint(node, matNew)
end_plottable, last_point = self.getLastPoint(node, matNew)
coord_dict[id] = (start_plottable and end_plottable,
first_point[0], first_point[1], last_point[0], last_point[1] )
group_dict[id] = node # Entry in group_dict is this node
# Perform the re-ordering:
ordered_element_list = self.ReorderNodeList(coord_dict, group_dict)
# Once a better order for the svg elements has been determined,
# All there is do to is to reintroduce the nodes to the parent in the correct order
for elt in ordered_element_list:
# Creates identical node at the correct location according to ordered_element_list
input_node.append(elt)
# Once program is finished parsing through
for element_to_remove in nodes_to_delete:
try:
input_node.remove(element_to_remove)
except ValueError:
inkex.errormsg(str(element_to_remove.get('id'))+" is not a member of " + str(input_node.get('id')))
return input_node
def break_apart_path(self, path):
"""
An SVG path may contain multiple distinct portions, that are normally separated
by pen-up movements.
This function takes the path data string from an SVG path, parses it, and returns
a dictionary of independent path data strings, each of which represents a single
pen-down movement. It is equivalent to the Inkscape function Path > Break Apart
Input: path data string, representing a single SVG path
Output: Dictionary of (separated) path data strings
"""
MaxLength = len(path)
ix = 0
move_to_location = []
path_dictionary = {}
path_list = []
path_number = 1
# Search for M or m location
while ix < MaxLength:
if(path[ix] == 'm' or path[ix] == 'M'):
move_to_location.append(ix)
ix = ix + 1
# Iterate through every M or m location in our list of move to instructions
# Slice the path string according to path beginning and ends as indicated by the
# location of these instructions
for counter, m in enumerate(move_to_location):
if (m == move_to_location[-1]):
# last entry
path_list.append(path[m:MaxLength].rstrip())
else:
path_list.append(path[m:move_to_location[counter + 1]].rstrip())
for counter, current_path in enumerate(path_list):
# Enumerate over every entry in the path looking for relative m commands
if current_path[0] == 'm' and counter > 0:
# If path contains relative m command, the best case is when the last command
# was a Z or z. In this case, all relative operations are performed relative to
# initial x, y coordinates of the previous path
if path_list[counter -1][-1].upper() == 'Z':
current_path_x, current_path_y,index = self.getFirstPoint(current_path, matNew)
prev_path_x, prev_path_y,ignore = self.getFirstPoint(path_list[counter-1])
adapted_x = current_path_x + prev_path_x
adapted_y = current_path_y + prev_path_y
# Now we can replace the path data with an Absolute Move to instruction
# HOWEVER, we need to adapt all the data until we reach a different command in the case of a repeating
path_list[counter] = "m "+str(adapted_x)+","+str(adapted_y) + ' ' +current_path[index:]
# If there is no z or absolute commands, we need to parse the entire path
else:
# scan path for absolute coordinates. If present, begin parsing from their index
# instead of the beginning
prev_path = path_list[counter-1]
prev_path_length = len(prev_path)
jx = 0
x_val, y_val = 0,0
# Check one char at a time
# until we have the moveTo Command
last_command = ''
is_absolute_command = False
repeated_command = False
# name of command
# how many parameters we need to skip
accepted_commands = {
'M':0,
'L':0,
'H':0,
'V':0,
'C':4,
'S':2,
'Q':2,
'T':0,
'A':5
}
# If there is an absolute command which specifies a new initial point
# then we can save time by setting our index directly to its location in the path data
# See if an accepted_command is present in the path data. If it is present further in the
# string than any command found before, then set the pointer to that location
# if a command is not found, find() will return a -1. jx is initialized to 0, so if no matches
# are found, the program will parse from the beginning to the end of the path
for keys in 'MLCSQTA': # TODO: Compare to last_point; see if we can clean up this part
if(prev_path.find(keys) > jx):
jx = prev_path.find(keys)
while jx < prev_path_length:
temp_x_val = ''
temp_y_val = ''
num_of_params_to_skip = 0
# SVG Path commands can be repeated
if (prev_path[jx].isdigit() and last_command):
repeated_command = True
else:
repeated_command = False
if (prev_path[jx].isalpha() and prev_path[jx].upper() in accepted_commands) or repeated_command:
if repeated_command:
#is_relative_command is saved from last iteration of the loop
current_command = last_command
else:
# If the character is accepted, we must parse until reach the x y coordinates
is_absolute_command = prev_path[jx].isupper()
current_command = prev_path[jx].upper()
# Each command has a certain number of parameters we must pass before we reach the
# information we care about. We will parse until we know that we have reached them
# Get to start of next number
# We will know we have reached a number if the current character is a +/- sign
# or current character is a digit
while jx < prev_path_length:
if(prev_path[jx] in '+-' or prev_path[jx].isdigit()):
break
jx = jx + 1
# We need to parse past the unused parameters in our command
# The number of parameters to parse past is dependent on the command and stored
# as the value of accepted_command
# Spaces and commas are used to deliniate paramters
while jx < prev_path_length and num_of_params_to_skip < accepted_commands[current_command]:
if(prev_path[jx].isspace() or prev_path[jx] == ','):
num_of_params_to_skip = num_of_params_to_skip + 1
jx = jx + 1
# Now, we are in front of the x character
if current_command.upper() == 'V':
temp_x_val = 0
if current_command.upper() == 'H':
temp_y_val = 0
# Parse until next character is a digit or +/- character
while jx < prev_path_length and current_command.upper() != 'V':
if(prev_path[jx] in '+-' or prev_path[jx].isdigit()):
break
jx = jx + 1
# Save each next character until we reach a space
while jx < prev_path_length and current_command.upper() != 'V' and not (prev_path[jx].isspace() or prev_path[jx] == ','):
temp_x_val = temp_x_val + prev_path[jx]
jx = jx + 1
# Then we know we have completely parsed the x character
# Now we are in front of the y character
# Parse until next character is a digit or +/- character
while jx < prev_path_length and current_command.upper() != 'H':
if(prev_path[jx] in '+-' or prev_path[jx].isdigit()):
break
jx = jx + 1
## Save each next character until we reach a space
while jx < prev_path_length and current_command.upper() != 'H' and not (prev_path[jx].isspace() or prev_path[jx] == ','):
temp_y_val = temp_y_val + prev_path[jx]
jx = jx + 1
# Then we know we have completely parsed the y character
if is_absolute_command:
if current_command == 'H':
# Absolute commands create new x,y position
try:
x_val = float(temp_x_val)
except ValueError:
pass
elif current_command == 'V':
# Absolute commands create new x,y position
try:
y_val = float(temp_y_val)
except ValueError:
pass
else:
# Absolute commands create new x,y position
try:
x_val = float(temp_x_val)
y_val = float(temp_y_val)
except ValueError:
pass
else:
if current_command == 'h':
# Absolute commands create new x,y position
try:
x_val = x_val + float(temp_x_val)
except ValueError:
pass
elif current_command == 'V':
# Absolute commands create new x,y position
try:
y_val = y_val + float(temp_y_val)
except ValueError:
pass
else:
# Absolute commands create new x,y position
try:
x_val = x_val + float(temp_x_val)
y_val = y_val + float(temp_y_val)
except ValueError:
pass
last_command = current_command
jx = jx + 1
x,y,index = self.getFirstPoint(current_path,None)
path_list[counter] = "m "+str(x_val+x)+","+str(y_val+y) + ' ' + current_path[index:]
for counter, path in enumerate(path_list):
path_dictionary['ad_path'+ str(counter)] = path
return path_dictionary
def getFirstPoint(self, node, matCurrent):
"""
Input: (non-group) node and parent transformation matrix
Output: Boolean value to indicate if the svg element is plottable and
two floats stored in a list representing the x and y coordinates we plot first
"""
# first apply the current matrix transform to this node's transform
matNew = simpletransform.composeTransform( matCurrent, simpletransform.parseTransform( node.get( "transform" ) ) )
point = [float(-1), float(-1)]
try:
if node.tag == inkex.addNS( 'path', 'svg' ):
pathdata = node.get('d')
point = plot_utils.pathdata_first_point(pathdata)
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if node.tag == inkex.addNS( 'rect', 'svg' ) or node.tag == 'rect':
"""
The x,y coordinates for a rect are included in their specific attributes
If there is a transform, we need translate the x & y coordinates to their
correct location via applyTransformToPoint.
"""
point[0] = float( node.get( 'x' ) )
point[1] = float( node.get( 'y' ) )
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if node.tag == inkex.addNS( 'line', 'svg' ) or node.tag == 'line':
"""
The x1 and y1 attributes are where we will start to draw
So, get them, apply the transform matrix, and return the point
"""
point[0] = float( node.get( 'x1' ) )
point[1] = float( node.get( 'y1' ) )
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if node.tag == inkex.addNS( 'polyline', 'svg' ) or node.tag == 'polyline':
pl = node.get( 'points', '' ).strip()
if pl == '':
return False, point
pa = pl.replace(',',' ').split() # replace comma with space before splitting
if not pa:
return False, point
pathLength = len( pa )
if (pathLength < 4): # Minimum of x1,y1 x2,y2 required.
return False, point
d = "M " + pa[0] + " " + pa[1]
i = 2
while (i < (pathLength - 1 )):
d += " L " + pa[i] + " " + pa[i + 1]
i += 2
point = plot_utils.pathdata_first_point(d)
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if (node.tag == inkex.addNS( 'polygon', 'svg' ) or
node.tag == 'polygon'):
"""
We need to extract x1 and y1 from these:
<polygon points="x1,y1 x2,y2 x3,y3 [...]"/>
We accomplish this with Python string strip
and split methods. Then apply transforms
"""
# Strip() removes all whitespace from the start and end of p1
pl = node.get( 'points', '' ).strip()
if (pl == ''):
# If pl is blank there has been an error, return False and -1,-1 to indicate a problem has occured
return False, point
# Split string by whitespace
pa = pl.split()
if not len( pa ):
# If pa is blank there has been an error, return False and -1,-1 to indicate a problem has occured
return False, point
# pa[0] = "x1,y1
# split string via comma to get x1 and y1 individually
# then point = [x1,x2]
point = pa[0].split(",")
point = [float(point[0]),float(point[1])]
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if node.tag == inkex.addNS( 'ellipse', 'svg' ) or \
node.tag == 'ellipse':
cx = float( node.get( 'cx', '0' ) )
cy = float( node.get( 'cy', '0' ) )
rx = float( node.get( 'rx', '0' ) )
point[0] = cx - rx
point[1] = cy
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if node.tag == inkex.addNS( 'circle', 'svg' ) or \
node.tag == 'circle':
cx = float( node.get( 'cx', '0' ) )
cy = float( node.get( 'cy', '0' ) )
r = float( node.get( 'r', '0' ) )
point[0] = cx - r
point[1] = cy
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if node.tag == inkex.addNS('symbol', 'svg') or node.tag == 'symbol':
# A symbol is much like a group, except that
# it's an invisible object.
return False, point # Skip this element.
if node.tag == inkex.addNS('use', 'svg') or node.tag == 'use':
"""
A <use> element refers to another SVG element via an xlink:href="#blah"
attribute. We will handle the element by doing an XPath search through
the document, looking for the element with the matching id="blah"
attribute. We then recursively process that element after applying
any necessary (x,y) translation.
Notes:
1. We ignore the height and g attributes as they do not apply to
path-like elements, and
2. Even if the use element has visibility="hidden", SVG still calls
for processing the referenced element. The referenced element is
hidden only if its visibility is "inherit" or "hidden".
3. We may be able to unlink clones using the code in pathmodifier.py
"""
refid = node.get(inkex.addNS('href', 'xlink'))
if refid is not None:
# [1:] to ignore leading '#' in reference
path = '//*[@id="{0}"]'.format(refid[1:])
refnode = node.xpath(path)
if refnode is not None:
x = float(node.get('x', '0'))
y = float(node.get('y', '0'))
# Note: the transform has already been applied
if x != 0 or y != 0:
mat_new2 = simpletransform.composeTransform(matNew, simpletransform.parseTransform('translate({0:f},{1:f})'.format(x, y)))
else:
mat_new2 = matNew
# Note that the referenced object may be a 'symbol`,
# which acts like a group, or it may be a simple
# object.
if len(refnode) > 0:
plottable, the_point = self.group_first_pt(refnode[0], mat_new2)
else:
plottable, the_point = self.group_first_pt(refnode, mat_new2)
return plottable, the_point
except:
pass
# Svg Object is not a plottable element
# In this case, return False to indicate a non-plottable element
# and a default point
return False, point
def getLastPoint(self, node, matCurrent):
"""
Input: XML tree node and transformation matrix
Output: Boolean value to indicate if the svg element is plottable or not and
two floats stored in a list representing the x and y coordinates we plot last
"""
# first apply the current matrix transform to this node's transform
matNew = simpletransform.composeTransform( matCurrent, simpletransform.parseTransform( node.get( "transform" ) ) )
# If we return a negative value, we know that this function did not work
point = [float(-1), float(-1)]
try:
if node.tag == inkex.addNS( 'path', 'svg' ):
path = node.get('d')
point = plot_utils.pathdata_last_point(path)
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if node.tag == inkex.addNS( 'rect', 'svg' ) or node.tag == 'rect':
"""
The x,y coordinates for a rect are included in their specific attributes
If there is a transform, we need translate the x & y coordinates to their
correct location via applyTransformToPoint.
"""
point[0] = float( node.get( 'x' ) )
point[1] = float( node.get( 'y' ) )
simpletransform.applyTransformToPoint(matNew, point)
return True, point # Same start and end points
if node.tag == inkex.addNS( 'line', 'svg' ) or node.tag == 'line':
"""
The x2 and y2 attributes are where we will end our drawing
So, get them, apply the transform matrix, and return the point
"""
point[0] = float( node.get( 'x2' ) )
point[1] = float( node.get( 'y2' ) )
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if node.tag == inkex.addNS( 'polyline', 'svg' ) or node.tag == 'polyline':
pl = node.get( 'points', '' ).strip()
if pl == '':
return False, point
pa = pl.replace(',',' ').split()
if not pa:
return False, point
pathLength = len( pa )
if (pathLength < 4): # Minimum of x1,y1 x2,y2 required.
return False, point
d = "M " + pa[0] + " " + pa[1]
i = 2
while (i < (pathLength - 1 )):
d += " L " + pa[i] + " " + pa[i + 1]
i += 2
endpoint = plot_utils.pathdata_last_point(d)
simpletransform.applyTransformToPoint(matNew, endpoint)
return True, endpoint
if node.tag == inkex.addNS( 'polygon', 'svg' ) or node.tag == 'polygon':
"""
We need to extract x1 and y1 from these:
<polygon points="x1,y1 x2,y2 x3,y3 [...]"/>
We accomplish this with Python string strip
and split methods. Then apply transforms
"""
# Strip() removes all whitespace from the start and end of p1
pl = node.get( 'points', '' ).strip()
if (pl == ''):
# If pl is blank there has been an error, return -1,-1 to indicate a problem has occured
return False, point
# Split string by whitespace
pa = pl.split()
if not len( pa ):
# If pl is blank there has been an error, return -1,-1 to indicate a problem has occured
return False, point
# pa[0] = "x1,y1
# split string via comma to get x1 and y1 individually
# then point = [x1,x2]
point = pa[0].split(",")
point = [float(point[0]),float(point[1])]
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if node.tag == inkex.addNS( 'ellipse', 'svg' ) or node.tag == 'ellipse':
cx = float( node.get( 'cx', '0' ) )
cy = float( node.get( 'cy', '0' ) )
rx = float( node.get( 'rx', '0' ) )
point[0] = cx - rx
point[1] = cy
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if node.tag == inkex.addNS( 'circle', 'svg' ) or node.tag == 'circle':
cx = float( node.get( 'cx', '0' ) )
cy = float( node.get( 'cy', '0' ) )
r = float( node.get( 'r', '0' ) )
point[0] = cx - r
point[1] = cy
simpletransform.applyTransformToPoint(matNew, point)
return True, point
if node.tag == inkex.addNS('symbol', 'svg') or node.tag == 'symbol':
# A symbol is much like a group, except that it should only be
# rendered when called within a "use" tag.
return False, point # Skip this element.
if node.tag == inkex.addNS('use', 'svg') or node.tag == 'use':
"""
A <use> element refers to another SVG element via an xlink:href="#blah"
attribute. We will handle the element by doing an XPath search through
the document, looking for the element with the matching id="blah"
attribute. We then recursively process that element after applying
any necessary (x,y) translation.
Notes:
1. We ignore the height and g attributes as they do not apply to
path-like elements, and
2. Even if the use element has visibility="hidden", SVG still calls
for processing the referenced element. The referenced element is
hidden only if its visibility is "inherit" or "hidden".
3. We may be able to unlink clones using the code in pathmodifier.py
"""
refid = node.get(inkex.addNS('href', 'xlink'))
if refid is not None:
# [1:] to ignore leading '#' in reference
path = '//*[@id="{0}"]'.format(refid[1:])
refnode = node.xpath(path)
if refnode is not None:
x = float(node.get('x', '0'))
y = float(node.get('y', '0'))
# Note: the transform has already been applied
if x != 0 or y != 0:
mat_new2 = simpletransform.composeTransform(matNew, simpletransform.parseTransform('translate({0:f},{1:f})'.format(x, y)))
else:
mat_new2 = matNew
if len(refnode) > 0:
plottable, the_point = self.group_last_pt(refnode[0], mat_new2)
else:
plottable, the_point = self.group_last_pt(refnode, mat_new2)
return plottable, the_point
except:
pass
# Svg Object is not a plottable element;
# Return False and a default point
return False, point
def group_first_pt(self, group, matCurrent = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]):
"""
Input: A Node which we have found to be a group
Output: Boolean value to indicate if a point is plottable
float values for first x,y coordinates of svg element
"""
if len(group) == 0: # Empty group -- The object may not be a group.
return self.getFirstPoint(group, matCurrent)
success = False
point = [float(-1), float(-1)]
# first apply the current matrix transform to this node's transform
matNew = simpletransform.composeTransform( matCurrent, simpletransform.parseTransform( group.get( "transform" ) ) )
# Step through the group, we examine each element until we find a plottable object
for subnode in group:
# Check to see if the subnode we are looking at in this iteration of our for loop is a group
# If it is a group, we must recursively call this function to search for a plottable object
if subnode.tag == inkex.addNS( 'g', 'svg' ) or subnode.tag == 'g':
# Verify that the nested group has objects within it
# otherwise we will not parse it
if subnode is not None:
# Check if group contains plottable elements by recursively calling group_first_pt
# If group contains plottable subnode, then it will return that value and escape the loop
# Else function continues search for first plottable object
success, point = self.group_first_pt(subnode, matNew)
if success:
# Subnode inside nested group is plottable!
# Break from our loop so we can return the first point of this plottable subnode
break
else:
continue
else:
# Node is not a group
# Get its first (x,y) coordinates
# Also get a Boolean value to indicate if the subnode is plottable or not
# If subnode is not plottable, continue to next subnode in the group
success, point = self.getFirstPoint(subnode, matNew)
if success:
# Subnode inside group is plottable!
# Break from our loop so we can return the first point of this plottable subnode
break
else:
continue
return success, point
def group_last_pt(self, group, matCurrent=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]):
"""
Input: A Node which we have found to be a group
Output: The last node within the group which can be plotted
"""
if len(group) == 0: # Empty group -- Did someone send an object that isn't a group?
return self.getLastPoint(group, matCurrent)
success = False
point = [float(-1),float(-1)]
# first apply the current matrix transform to this node's transform
matNew = simpletransform.composeTransform( matCurrent, simpletransform.parseTransform( group.get( "transform" ) ) )
# Step through the group, we examine each element until we find a plottable object
for subnode in reversed(group):
# Check to see if the subnode we are looking at in this iteration of our for loop is a group
# If it is a group, we must recursively call this function to search for a plottable object
if subnode.tag == inkex.addNS( 'g', 'svg' ) or subnode.tag == 'g':
# Verify that the nested group has objects within it
# otherwise we will not parse it
if subnode is not None:
# Check if group contains plottable elements by recursively calling group_last_pt
# If group contains plottable subnode, then it will return that value and escape the loop
# Else function continues search for last plottable object
success, point = self.group_last_pt(subnode, matNew)
if success:
# Subnode inside nested group is plottable!
# Break from our loop so we can return the first point of this plottable subnode
break
else:
continue
else:
# Node is not a group
# Get its first (x,y) coordinates
# Also get a Boolean value to indicate if the subnode is plottable or not
# If subnode is not plottable, continue to next subnode in the group
success, point = self.getLastPoint(subnode, matNew)
if success:
# Subode inside nested group is plottable!
# Break from our loop so we can return the first point of this plottable subnode
break
else:
continue
return success, point
def group2NodeDict(self, group, mat_current=None):
if mat_current is None:
mat_current = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
# first apply the current matrix transform to this node's transform
matNew = simpletransform.composeTransform( mat_current, simpletransform.parseTransform( group.get( "transform" ) ) )
nodes_in_group = []
# Step through the group, we examine each element until we find a plottable object
for subnode in group:
# Check to see if the subnode we are looking at in this iteration of our for loop is a group
# If it is a group, we must recursively call this function to search for a plottable object
if subnode.tag == inkex.addNS( 'g', 'svg' ) or subnode.tag == 'g':
# Verify that the nested group has objects within it
# otherwise we will not parse it
if subnode is not None:
# Check if group contains plottable elements by recursively calling group_first_pt
# If group contains plottable subnode, then it will return that value and escape the loop
# Else function continues search for first plottable object
nodes_in_group.extend(self.group2NodeDict(subnode, matNew))
else:
simpletransform.applyTransformToNode(matNew, subnode)
nodes_in_group.append(subnode)
return nodes_in_group
def ReorderNodeList(self, coord_dict, group_dict):
# Re-order the given set of SVG elements, using a simple "greedy" algorithm.
# The first object will be the element closest to the origin
# After this choice, the algorithm loops through all remaining elements looking for the element whose first x,y
# coordinates are closest to the the previous choice's last x,y coordinates
# This process continues until all elements have been sorted into ordered_element_list and removed from group_dict
ordered_layer_element_list = []
# Continue until all elements have been re-ordered
while group_dict:
nearest_dist = float('inf')
for key,node in group_dict.items():
# Is this node non-plottable?
# If so, exit loop and append element to ordered_layer_element_list
if not coord_dict[key][0]:
# Object is not Plottable
nearest = node
nearest_id = key
continue
# If we reach this point, node is plottable and needs to be considered in our algo
entry_x = coord_dict[key][1] # x-coordinate of first point of the path
entry_y = coord_dict[key][2] # y-coordinate of first point of the path
exit_x = coord_dict[key][3] # x-coordinate of last point of the path
exit_y = coord_dict[key][4] # y-coordinate of last point of the path
object_dist = (entry_x-self.x_last)*(entry_x-self.x_last) + (entry_y-self.y_last) * (entry_y-self.y_last)
# This is actually the distance squared; calculating it rather than the pythagorean distance
# saves a square root calculation. Right now, we only care about _which distance is less_
# not the exact value of it, so this is a harmless shortcut.
# If this distance is smaller than the previous element's distance, then replace the previous
# element's entry with our current element's distance
if nearest_dist >= object_dist:
# We have found an element closer than the previous closest element
nearest = node
nearest_id = key
nearest_dist = object_dist
nearest_start_x = entry_x
nearest_start_y = entry_y
# Now that the closest object has been determined, it is time to add it to the
# optimized list of closest objects
ordered_layer_element_list.append(nearest)
# To determine the closest object in the next iteration of the loop,
# we must save the last x,y coor of this element
# If this element is plottable, then save the x,y coordinates
# If this element is non-plottable, then do not save the x,y coordinates
if coord_dict[nearest_id][0]:
# Also, draw line indicating that we've found a new point.
if self.preview_rendering:
preview_path = [] # pen-up path data for preview
preview_path.append("M{0:.3f} {1:.3f}".format(
self.x_last, self.y_last))
preview_path.append("{0:.3f} {1:.3f}".format(
nearest_start_x, nearest_start_y))
self.p_style.update({'stroke': self.color_index(self.layer_index)})
path_attrs = {
'style': simplestyle.formatStyle( self.p_style ),
'd': " ".join(preview_path)}
etree.SubElement( self.preview_layer,
inkex.addNS( 'path', 'svg '), path_attrs, nsmap=inkex.NSS )
self.x_last = coord_dict[nearest_id][3]
self.y_last = coord_dict[nearest_id][4]
# Remove this element from group_dict to indicate it has been optimized
del group_dict[nearest_id]
# Once all elements have been removed from the group_dictionary
# Return the optimized list of svg elements in the layer
return ordered_layer_element_list
def color_index(self, index):
index = index % 9
if index == 0:
return "rgb(255, 0, 0))"
elif index == 1:
return "rgb(170, 85, 0))"
elif index == 2:
return "rgb(85, 170, 0))"
elif index == 3:
return "rgb(0, 255, 0))"
elif index == 4:
return "rgb(0, 170, 85))"
elif index == 5:
return "rgb(0, 85, 170))"
elif index == 6:
return "rgb(0, 0, 255))"
elif index == 7:
return "rgb(85, 0, 170))"
else:
return "rgb(170, 0, 85))"
def getDocProps(self):
"""
Get the document's height and width attributes from the <svg> tag.
Use a default value in case the property is not present or is
expressed in units of percentages.
"""
self.svg_height = plot_utils.getLengthInches(self, 'height')
self.svg_width = plot_utils.getLengthInches(self, 'width')
width_string = self.svg.get('width')
if width_string:
value, units = plot_utils.parseLengthWithUnits(width_string)
self.doc_units = units
if self.auto_rotate and (self.svg_height > self.svg_width):
self.printPortrait = True
if self.svg_height is None or self.svg_width is None:
return False
else:
return True
def get_output(self):
# Return serialized copy of svg document output
result = etree.tostring(self.document)
return result.decode("utf-8")
# Create effect instance and apply it.
if __name__ == '__main__':
effect = ReorderEffect()
exit_status.run(effect.affect)
|
bev-a-tron/pledge_service
|
refs/heads/master
|
build/stripe/api_requestor.py
|
10
|
import calendar
import datetime
import platform
import time
import urllib
import urlparse
import warnings
import stripe
from stripe import error, http_client, version, util
def _encode_datetime(dttime):
if dttime.tzinfo and dttime.tzinfo.utcoffset(dttime) is not None:
utc_timestamp = calendar.timegm(dttime.utctimetuple())
else:
utc_timestamp = time.mktime(dttime.timetuple())
return int(utc_timestamp)
def _api_encode(data):
for key, value in data.iteritems():
key = util.utf8(key)
if value is None:
continue
elif hasattr(value, 'stripe_id'):
yield (key, value.stripe_id)
elif isinstance(value, list) or isinstance(value, tuple):
for subvalue in value:
yield ("%s[]" % (key,), util.utf8(subvalue))
elif isinstance(value, dict):
subdict = dict(('%s[%s]' % (key, subkey), subvalue) for
subkey, subvalue in value.iteritems())
for subkey, subvalue in _api_encode(subdict):
yield (subkey, subvalue)
elif isinstance(value, datetime.datetime):
yield (key, _encode_datetime(value))
else:
yield (key, util.utf8(value))
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlparse.urlsplit(url)
if base_query:
query = '%s&%s' % (base_query, query)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
class APIRequestor(object):
def __init__(self, key=None, client=None):
self.api_key = key
from stripe import verify_ssl_certs
self._client = client or http_client.new_default_http_client(
verify_ssl_certs=verify_ssl_certs)
@classmethod
def api_url(cls, url=''):
warnings.warn(
'The `api_url` class method of APIRequestor is '
'deprecated and will be removed in version 2.0.'
'If you need public access to this function, please email us '
'at support@stripe.com.',
DeprecationWarning)
return '%s%s' % (stripe.api_base, url)
@classmethod
def _deprecated_encode(cls, stk, key, value):
warnings.warn(
'The encode_* class methods of APIRequestor are deprecated and '
'will be removed in version 2.0. '
'If you need public access to this function, please email us '
'at support@stripe.com.',
DeprecationWarning, stacklevel=2)
stk.extend(_api_encode({key: value}))
@classmethod
def encode_dict(cls, stk, key, value):
cls._deprecated_encode(stk, key, value)
@classmethod
def encode_list(cls, stk, key, value):
cls._deprecated_encode(stk, key, value)
@classmethod
def encode_datetime(cls, stk, key, value):
cls._deprecated_encode(stk, key, value)
@classmethod
def encode_none(cls, stk, key, value):
cls._deprecated_encode(stk, key, value)
@classmethod
def encode(cls, d):
"""
Internal: encode a string for url representation
"""
warnings.warn(
'The `encode` class method of APIRequestor is deprecated and '
'will be removed in version 2.0.'
'If you need public access to this function, please email us '
'at support@stripe.com.',
DeprecationWarning)
return urllib.urlencode(list(_api_encode(d)))
@classmethod
def build_url(cls, url, params):
warnings.warn(
'The `build_url` class method of APIRequestor is deprecated and '
'will be removed in version 2.0.'
'If you need public access to this function, please email us '
'at support@stripe.com.',
DeprecationWarning)
return _build_api_url(url, cls.encode(params))
def request(self, method, url, params=None):
rbody, rcode, my_api_key = self.request_raw(
method.lower(), url, params)
resp = self.interpret_response(rbody, rcode)
return resp, my_api_key
def handle_api_error(self, rbody, rcode, resp):
try:
err = resp['error']
except (KeyError, TypeError):
raise error.APIError(
"Invalid response object from API: %r (HTTP response code "
"was %d)" % (rbody, rcode),
rbody, rcode, resp)
if rcode in [400, 404]:
raise error.InvalidRequestError(
err.get('message'), err.get('param'), rbody, rcode, resp)
elif rcode == 401:
raise error.AuthenticationError(
err.get('message'), rbody, rcode, resp)
elif rcode == 402:
raise error.CardError(err.get('message'), err.get('param'),
err.get('code'), rbody, rcode, resp)
else:
raise error.APIError(err.get('message'), rbody, rcode, resp)
def request_raw(self, method, url, params=None):
"""
Mechanism for issuing an API call
"""
from stripe import api_version
if self.api_key:
my_api_key = self.api_key
else:
from stripe import api_key
my_api_key = api_key
if my_api_key is None:
raise error.AuthenticationError(
'No API key provided. (HINT: set your API key using '
'"stripe.api_key = <API-KEY>"). You can generate API keys '
'from the Stripe web interface. See https://stripe.com/api '
'for details, or email support@stripe.com if you have any '
'questions.')
abs_url = '%s%s' % (stripe.api_base, url)
encoded_params = urllib.urlencode(list(_api_encode(params or {})))
if method == 'get' or method == 'delete':
if params:
abs_url = _build_api_url(abs_url, encoded_params)
post_data = None
elif method == 'post':
post_data = encoded_params
else:
raise error.APIConnectionError(
'Unrecognized HTTP method %r. This may indicate a bug in the '
'Stripe bindings. Please contact support@stripe.com for '
'assistance.' % (method,))
ua = {
'bindings_version': version.VERSION,
'lang': 'python',
'publisher': 'stripe',
'httplib': self._client.name,
}
for attr, func in [['lang_version', platform.python_version],
['platform', platform.platform],
['uname', lambda: ' '.join(platform.uname())]]:
try:
val = func()
except Exception, e:
val = "!! %s" % (e,)
ua[attr] = val
headers = {
'X-Stripe-Client-User-Agent': util.json.dumps(ua),
'User-Agent': 'Stripe/v1 PythonBindings/%s' % (version.VERSION,),
'Authorization': 'Bearer %s' % (my_api_key,)
}
if method == 'post':
headers['Content-Type'] = 'application/x-www-form-urlencoded'
if api_version is not None:
headers['Stripe-Version'] = api_version
rbody, rcode = self._client.request(
method, abs_url, headers, post_data)
util.logger.info(
'API request to %s returned (response code, response body) of '
'(%d, %r)',
abs_url, rcode, rbody)
return rbody, rcode, my_api_key
def interpret_response(self, rbody, rcode):
try:
if hasattr(rbody, 'decode'):
rbody = rbody.decode('utf-8')
resp = util.json.loads(rbody)
except Exception:
raise error.APIError(
"Invalid response body from API: %s "
"(HTTP response code was %d)" % (rbody, rcode),
rbody, rcode)
if not (200 <= rcode < 300):
self.handle_api_error(rbody, rcode, resp)
return resp
# Deprecated request handling. Will all be removed in 2.0
def _deprecated_request(self, impl, method, url, headers, params):
warnings.warn(
'The *_request functions of APIRequestor are deprecated and '
'will be removed in version 2.0. Please use the client classes '
' in `stripe.http_client` instead',
DeprecationWarning, stacklevel=2)
method = method.lower()
if method == 'get' or method == 'delete':
if params:
url = self.build_url(url, params)
post_data = None
elif method == 'post':
post_data = self.encode(params)
else:
raise error.APIConnectionError(
'Unrecognized HTTP method %r. This may indicate a bug in the '
'Stripe bindings. Please contact support@stripe.com for '
'assistance.' % (method,))
client = impl(verify_ssl_certs=self._client._verify_ssl_certs)
return client.request(method, url, headers, post_data)
def _deprecated_handle_error(self, impl, *args):
warnings.warn(
'The handle_*_error functions of APIRequestor are deprecated and '
'will be removed in version 2.0. Please use the client classes '
' in `stripe.http_client` instead',
DeprecationWarning, stacklevel=2)
client = impl(verify_ssl_certs=self._client._verify_ssl_certs)
return client._handle_request_error(*args)
def requests_request(self, meth, abs_url, headers, params):
from stripe.http_client import RequestsClient
return self._deprecated_request(RequestsClient, meth, abs_url,
headers, params)
def handle_requests_error(self, err):
from stripe.http_client import RequestsClient
return self._deprecated_handle_error(RequestsClient, err)
def pycurl_request(self, meth, abs_url, headers, params):
from stripe.http_client import PycurlClient
return self._deprecated_request(PycurlClient, meth, abs_url,
headers, params)
def handle_pycurl_error(self, err):
from stripe.http_client import PycurlClient
return self._deprecated_handle_error(PycurlClient, err)
def urlfetch_request(self, meth, abs_url, headers, params):
from stripe.http_client import UrlFetchClient
return self._deprecated_request(UrlFetchClient, meth, abs_url,
headers, params)
def handle_urlfetch_error(self, err, abs_url):
from stripe.http_client import UrlFetchClient
return self._deprecated_handle_error(UrlFetchClient, err, abs_url)
def urllib2_request(self, meth, abs_url, headers, params):
from stripe.http_client import Urllib2Client
return self._deprecated_request(Urllib2Client, meth, abs_url,
headers, params)
def handle_urllib2_error(self, err, abs_url):
from stripe.http_client import Urllib2Client
return self._deprecated_handle_error(Urllib2Client, err)
|
etalab/weckan
|
refs/heads/master
|
weckan/controllers/dataset.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import logging
import math
import requests
from biryani1 import strings
from datetime import datetime
from urllib import urlencode
from pkg_resources import resource_stream
from sqlalchemy.sql import func, or_
from ckanext.etalab.plugins import year_or_month_or_day_re
from ckanext.youckan.models import DatasetAlert, AlertType
from weckan import templates, urls, wsgihelpers, conf, contexts, auth, queries, territories, forms
from weckan.model import Activity, meta, Package, Group, UserFollowingDataset, UserFollowingGroup, Member, repo
from weckan.model import PACKAGE_NAME_MAX_LENGTH, PACKAGE_NAME_MAX_LENGTH
from weckan.tools import ckan_api, parse_page
_ = lambda s: s
DB = meta.Session
log = logging.getLogger(__name__)
SEARCH_PAGE_SIZE = 20
SEARCH_TIMEOUT = 2
NB_DATASETS = 12
QA_CEILS = {
'warning': 10,
'error': 10,
'criticals': 1,
}
EXCLUDED_PATTERNS = (
'activity',
'delete',
'edit',
'follow',
'new',
'new_metadata',
'new_resource',
)
SPECIAL_EXTRAS = (
'temporal_coverage_from',
'temporal_coverage_to',
'territorial_coverage',
'territorial_coverage_granularity',
'frequency',
)
LICENSES = json.load(resource_stream('ckanext.etalab', 'public/licenses.json'))
ALERT_TYPE_NAMES = {
AlertType.ILLEGAL: _('Illegal content'),
AlertType.TENDENCIOUS: _('Tendencious content'),
AlertType.OTHER: _('Other'),
}
class LicenseField(forms.SelectField):
@property
def choices(self):
return [(license['id'], license['title']) for license in LICENSES if license['status'] == 'active']
@choices.setter
def choices(self, value):
pass
class GroupsField(forms.SelectMultipleField):
@property
def choices(self):
groups = DB.query(Group).filter(Group.state == 'active', ~Group.is_organization)
return [(group.id, group.display_name) for group in groups]
@choices.setter
def choices(self, value):
pass
class YMDField(forms.StringField):
'''
A field accepting a date as a day, a month or a year.
'''
def _value(self):
if self.data:
return '/'.join(reversed(self.data.split('-')))
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
self.data = '-'.join(reversed(valuelist[0].split('/')))
else:
self.data = None
def year_or_month_or_day(form, field):
if not year_or_month_or_day_re.match(field.data):
raise forms.validators.ValidationError(field._('Should be either year, a month or a day'))
class PrivateField(forms.BooleanField):
def is_visible(self, user):
return len(user.organizations) > 0
class DatasetForm(forms.Form):
title = forms.StringField(_('Title'), [forms.validators.required()])
notes = forms.MarkdownField(_('Description'), [forms.validators.required()])
owner_org = forms.PublishAsField(_('Publish as'))
tags = forms.TagField(_('Tags'),
description=_('Tags only contain alphanumeric characters or symbols: -_.'))
groups = GroupsField(_('Topics'))
temporal_coverage_from = YMDField(_('Temporal coverage start'),
validators=[forms.validators.Optional(), year_or_month_or_day],
description=_('A year (YYYY), a month (MM/YYYY) or a day (DD/MM/YYYY)'))
temporal_coverage_to = YMDField(_('Temporal coverage end'),
validators=[forms.validators.Optional(), year_or_month_or_day],
description=_('A year (YYYY), a month (MM/YYYY) or a day (DD/MM/YYYY)'))
territorial_coverage = forms.TerritoryField(_('Territorial coverage'))
territorial_coverage_granularity = forms.SelectField(_('Territorial coverage granularity'),
# description=_('Dataset update periodicity'),
default=None,
choices=(
(None, _('None')),
('poi', _('POI')),
('iris', _('Iris (Insee districts)')),
('commune', _('Town')),
('canton', _('Canton')),
('epci', _('Intermunicipal (EPCI)')),
('department', _('County')),
('region', _('Region')),
('pays', _('Country')),
('other', _('Other')),
)
)
frequency = forms.SelectField(_('Frequency'),
description=_('Dataset update periodicity'),
default=None,
choices=(
(None, _('None')),
('ponctuelle', _('Punctual')),
('temps réel', _('Real time')),
('quotidienne', _('Daily')),
('hebdomadaire', _('Weekly')),
('bimensuelle', _('Fortnighly')),
('mensuelle', _('Mensuelle')),
('bimestrielle', _('Bimonthly')),
('trimestrielle', _('Quaterly')),
('semestrielle', _('Biannual')),
('annuelle', _('Annual')),
('triennale', _('Triennial')),
('quinquennale', _('Quinquennial')),
)
)
license_id = LicenseField(_('License'), default='notspecified')
private = PrivateField(_('Private'), default=False, validators=[forms.Requires('owner_org')])
class DatasetExtrasForm(forms.Form):
key = forms.StringField(_('Key'), [forms.validators.required()])
value = forms.StringField(_('Value'), [forms.validators.required()])
old_key = forms.StringField(_('Old key'))
def build_territorial_coverage(dataset):
return {
'name': ', '.join(
name.strip().rsplit('/', 1)[-1].title()
for name in dataset.extras.get('territorial_coverage', '').split(',')
),
'granularity': dataset.extras.get('territorial_coverage_granularity', '').title() or None,
}
def build_temporal_coverage(dataset):
temporal_coverage = {
'from': dataset.extras.get('temporal_coverage_from', None),
'to': dataset.extras.get('temporal_coverage_to', None),
}
try:
temporal_coverage['from'] = datetime.strptime(temporal_coverage['from'], '%Y-%m-%d')
except:
pass
try:
temporal_coverage['to'] = datetime.strptime(temporal_coverage['to'], '%Y-%m-%d')
except:
pass
return temporal_coverage
def build_slug(title, previous=None):
base_slug = strings.slugify(title)[:PACKAGE_NAME_MAX_LENGTH]
exists_query = DB.query(Package.name)
slug_exists = lambda s: exists_query.filter(Package.name == s).count() > 0
if base_slug == previous or not slug_exists(base_slug):
return base_slug
idx = 0
while True:
suffix = '-{0}'.format(idx)
slug = ''.join([base_slug[:-len(suffix)], suffix])
if slug == previous or not slug_exists(slug):
return slug
idx += 1
def serialize(query):
'''Build datasets for display from a queryset'''
datasets = []
for dataset, organization in query:
datasets.append({
'name': dataset.name,
'title': dataset.title,
'display_name': dataset.display_name,
'notes': dataset.notes,
'organization': organization,
'temporal_coverage': build_temporal_coverage(dataset),
'territorial_coverage': build_territorial_coverage(dataset),
'periodicity': dataset.extras.get('frequency', None),
'original': queries.forked_from(dataset).first(),
'nb_reuses': len(dataset.related),
})
return datasets
def search(query, request, page=1, page_size=SEARCH_PAGE_SIZE, group=None, organization=None):
'''Perform a Dataset search given a ``query``'''
from ckan.lib import search
if request.cookies.get('territory-infos', '').count('|') == 1:
territory_key, _ = request.cookies.get('territory-infos').split('|')
territory = territories.fetch(*territory_key.split('/')) if territory_key else {}
else:
territory = {}
page = max(page, 1)
page_zero = page - 1
params = {
'bf': u'{}^2'.format(
dict(
ArrondissementOfCommuneOfFrance='weight_commune',
CommuneOfFrance='weight_commune',
Country='weight',
DepartmentOfFrance='weight_department',
OverseasCollectivityOfFrance='weight_department',
RegionOfFrance='weight_region',
).get(territory.get('kind'), 'weight'),
),
'defType': u'edismax',
'fq': '+dataset_type:dataset',
'q': query or '',
'qf': u'name title groups^0.5 notes^0.5 tags^0.5 text^0.25',
'rows': page_size,
'sort': 'score desc, metadata_modified desc',
'start': page_zero * page_size,
}
if group:
group_name = group.name if isinstance(group, Group) else group
params['fq'] = ' '.join([params['fq'], '+groups:{0}'.format(group_name)])
if organization:
org_name = organization.name if isinstance(organization, Group) else organization
params['fq'] = ' '.join([params['fq'], '+organization:{0}'.format(org_name)])
# Territory search if specified
ancestors_kind_code = territory.get('ancestors_kind_code')
if ancestors_kind_code:
kind_codes = [
'{}/{}'.format(ancestor_kind_code['kind'], ancestor_kind_code['code'])
for ancestor_kind_code in ancestors_kind_code
]
params['fq'] = '{} +covered_territories:({})'.format(params['fq'], ' OR '.join(kind_codes))
query = search.query_for(Package)
query.run(params)
if not query.results:
return 'datasets', {'results': [], 'total': 0}
datasets_query = queries.datasets()
datasets_query = datasets_query.filter(Package.name.in_(query.results))
datasets = serialize(datasets_query.all())
return 'datasets', {
'results': sorted(datasets, key=lambda d: query.results.index(d['name'])),
'total': query.count,
'page': page,
'page_size': page_size,
'total_pages': int(math.ceil(query.count / float(page_size))),
}
def get_page_url_pattern(request):
'''Get a formattable page url pattern from incoming request URL'''
url_pattern_params = {}
for key, value in request.params.iteritems():
if key != 'page':
url_pattern_params[key] = unicode(value).encode('utf-8')
if url_pattern_params:
return '?'.join([request.path, urlencode(url_pattern_params)]) + '&page={page}'
else:
return '?'.join([request.path, 'page={page}'])
def get_quality(dataset_name):
'''Fetch the dataset quality scores from COW'''
url = '{0}/api/1/datasets/{1}/ranking'.format(conf['cow_url'], dataset_name)
try:
response = requests.get(url, timeout=SEARCH_TIMEOUT)
response.raise_for_status()
except requests.RequestException as request_exception:
log.warning('Unable to fetch quality scores for %s: %s', dataset_name, request_exception)
return None
data = response.json().get('value', {})
return data
@wsgihelpers.wsgify
def display(request):
user = auth.get_user_from_request(request)
dataset_name = request.urlvars.get('name')
query = DB.query(Package, Group, func.min(Activity.timestamp))
query = query.outerjoin(Group, Group.id == Package.owner_org)
query = query.outerjoin(Activity, Activity.object_id == Package.id)
query = query.filter(or_(
Package.name == dataset_name,
Package.id == dataset_name
))
query = query.group_by(Package, Group)
if not query.count():
return wsgihelpers.not_found(contexts.Ctx(request))
dataset, organization, timestamp = query.first()
periodicity = dataset.extras.get('frequency', None)
supplier_id = dataset.extras.get('supplier_id', None)
supplier = DB.query(Group).filter(Group.id == supplier_id).first() if supplier_id else None
return templates.render_site('dataset.html', request,
dataset=dataset,
publication_date=timestamp,
organization=organization,
is_following_org=UserFollowingGroup.is_following(user.id, organization.id) if organization and user else False,
supplier=supplier,
owner=queries.owner(dataset).first(),
nb_followers=UserFollowingDataset.follower_count(dataset.id),
is_following=UserFollowingDataset.is_following(user.id, dataset.id) if user else False,
territorial_coverage=build_territorial_coverage(dataset),
temporal_coverage=build_temporal_coverage(dataset),
periodicity=periodicity,
groups=dataset.get_groups('group'),
can_edit=auth.can_edit_dataset(user, dataset),
is_fork=queries.is_fork(dataset),
quality=get_quality(dataset.name),
ceils=QA_CEILS,
territory=territories.get_cookie(request),
bot_name=conf['bot_name'],
alerts=DatasetAlert.get_open_for(dataset),
alert_types=ALERT_TYPE_NAMES,
)
@wsgihelpers.wsgify
def search_more(request):
query = request.params.get('q', '')
page = parse_page(request)
_, results = search(query, request, page, SEARCH_PAGE_SIZE)
return templates.render_site('search-datasets.html', request,
search_query=query,
url_pattern=get_page_url_pattern(request),
datasets=results
)
@wsgihelpers.wsgify
def recent_datasets(request):
ctx = contexts.Ctx(request)
page = parse_page(request)
last_datasets = queries.last_datasets(False)
count = last_datasets.count()
end = (page * NB_DATASETS) + 1
start = end - NB_DATASETS
return templates.render_site('search-datasets.html', request,
title = ctx._('Recent datasets'),
url_pattern=get_page_url_pattern(request),
datasets={
'total': count,
'page': page,
'page_size': NB_DATASETS,
'total_pages': count / NB_DATASETS,
'results': serialize(last_datasets[start:end])
}
)
@wsgihelpers.wsgify
def popular_datasets(request):
ctx = contexts.Ctx(request)
page = parse_page(request)
ident, results = search(None, request, page, SEARCH_PAGE_SIZE)
return templates.render_site('search-datasets.html', request,
title=ctx._('Popular datasets'),
url_pattern=get_page_url_pattern(request),
datasets=results
)
@wsgihelpers.wsgify
def autocomplete(request):
query = request.params.get('q', '')
num = int(request.params.get('num', NB_DATASETS))
_, results = search(query, request, 1, num)
context = contexts.Ctx(request)
headers = wsgihelpers.handle_cross_origin_resource_sharing(context)
data = [{
'name': dataset['name'],
'title': dataset['display_name'],
'image_url': (
(dataset['organization'].image_url if dataset['organization'] else None)
or templates.static('/img/placeholder_producer.png')
),
} for dataset in results['results']]
return wsgihelpers.respond_json(context, data, headers=headers)
def extras_from_form(form):
extras = {
'temporal_coverage_from': form.temporal_coverage_from.data,
'temporal_coverage_to': form.temporal_coverage_to.data,
'territorial_coverage': ','.join(form.territorial_coverage.data),
'territorial_coverage_granularity': form.territorial_coverage_granularity.data,
'frequency': form.frequency.data,
}
return [{'key': key, 'value': value} for key, value in extras.items() if value]
def tags_from_form(form):
return [{'name': tag} for tag in form.tags.data if tag]
def fix_groups(dataset, group_ids):
repo.new_revision()
groups = dataset.get_groups('group')
for group_id in group_ids:
group = Group.get(group_id)
if not group in groups:
member = Member(group=group, table_id=dataset.id, table_name='package')
DB.add(member)
for group in groups:
if group.id in group_ids:
continue
member = DB.query(Member).filter(
Member.group == group,
Member.table_name == 'package',
Member.table_id == dataset.id,
Member.state == 'active'
).first()
if member:
member.state = 'deleted'
DB.add(member)
DB.commit()
@wsgihelpers.wsgify
def create(request):
context = contexts.Ctx(request)
lang = request.urlvars.get('lang', templates.DEFAULT_LANG)
user = auth.get_user_from_request(request)
if not user:
return wsgihelpers.unauthorized(context) # redirect to login/register ?
form = DatasetForm(request.POST, i18n=context.translator)
if request.method == 'POST' and form.validate():
name = build_slug(form.title.data)
ckan_api('package_create', user, {
'name': name,
'title': form.title.data,
'notes': form.notes.data,
'owner_org': form.owner_org.data,
'private': form.private.data,
'license_id': form.license_id.data,
'extras': extras_from_form(form),
'tags': tags_from_form(form),
})
dataset = Package.by_name(name)
fix_groups(dataset, form.groups.data)
redirect_url = urls.get_url(lang, 'dataset/new_resource', name)
return wsgihelpers.redirect(context, location=redirect_url)
back_url = urls.get_url(lang)
return templates.render_site('forms/dataset-create-form.html', request, form=form, back_url=back_url)
@wsgihelpers.wsgify
def edit(request):
context = contexts.Ctx(request)
lang = request.urlvars.get('lang', templates.DEFAULT_LANG)
user = auth.get_user_from_request(request)
if not user:
return wsgihelpers.unauthorized(context) # redirect to login/register ?
dataset_name = request.urlvars.get('name')
dataset = Package.get(dataset_name)
if not dataset:
return wsgihelpers.not_found(context)
form = DatasetForm(request.POST, dataset,
frequency=dataset.extras.get('frequency'),
territorial_coverage=dataset.extras.get('territorial_coverage', '').split(','),
territorial_coverage_granularity=dataset.extras.get('territorial_coverage_granularity'),
temporal_coverage_from=dataset.extras.get('temporal_coverage_from'),
temporal_coverage_to=dataset.extras.get('temporal_coverage_to'),
tags=[tag.name for tag in dataset.get_tags()],
groups=[group.id for group in dataset.get_groups('group')],
i18n=context.translator
)
if request.method == 'POST' and form.validate():
name = build_slug(form.title.data, dataset.name)
extras = [{'key': key, 'value': value} for key, value in dataset.extras.items() if key not in SPECIAL_EXTRAS]
extras.extend(extras_from_form(form))
ckan_api('package_update', user, {
'id': dataset.id,
'name': name,
'title': form.title.data,
'notes': form.notes.data,
'owner_org': form.owner_org.data,
'private': form.private.data,
'license_id': form.license_id.data,
'extras': extras,
'tags': tags_from_form(form),
'resources': [{
'id': resource.id,
'url': resource.url,
'description': resource.description,
'format': resource.format,
'name': resource.name,
'resource_type': resource.resource_type,
} for resource in dataset.active_resources
],
})
dataset = Package.by_name(name)
fix_groups(dataset, form.groups.data)
redirect_url = urls.get_url(lang, 'dataset', name)
return wsgihelpers.redirect(context, location=redirect_url)
delete_url = urls.get_url(lang, 'dataset/delete', dataset.name)
back_url = urls.get_url(lang, 'dataset', dataset.name)
return templates.render_site('forms/dataset-edit-form.html', request,
dataset=dataset, form=form, back_url=back_url, delete_url=delete_url)
@wsgihelpers.wsgify
def extras(request):
context = contexts.Ctx(request)
lang = request.urlvars.get('lang', templates.DEFAULT_LANG)
user = auth.get_user_from_request(request)
if not user:
return wsgihelpers.unauthorized(context) # redirect to login/register ?
dataset_name = request.urlvars.get('name')
dataset = Package.get(dataset_name)
if not dataset:
return wsgihelpers.not_found(context)
if request.method == 'POST':
headers = wsgihelpers.handle_cross_origin_resource_sharing(context)
form = DatasetExtrasForm(request.POST)
if form.validate():
extras = [
{'key': key, 'value': value}
for key, value in dataset.extras.items()
if not key == (form.old_key.data or form.key.data)
]
extras.append({'key': form.key.data, 'value': form.value.data})
data = ckan_api('package_update', user, {
'id': dataset.id,
'name': dataset.name,
'title': dataset.title,
'notes': dataset.notes,
'owner_org': dataset.owner_org,
'private': dataset.private,
'license_id': dataset.license_id,
'extras': extras,
'tags': [{'name': package_tag.tag.name} for package_tag in dataset.package_tag_all],
'resources': [{
'id': resource.id,
'url': resource.url,
'description': resource.description,
'format': resource.format,
'name': resource.name,
'resource_type': resource.resource_type,
} for resource in dataset.active_resources
],
})
if data['success']:
return wsgihelpers.respond_json(context, {'key': form.key.data, 'value': form.value.data}, headers=headers, code=200)
return wsgihelpers.respond_json(context, {}, headers=headers, code=400)
redirect_url = urls.get_url(lang, 'dataset', dataset.name)
return wsgihelpers.redirect(context, location=redirect_url)
extras = [(key, value) for key, value in dataset.extras.items() if key not in SPECIAL_EXTRAS]
back_url = urls.get_url(lang, 'dataset', dataset.name)
return templates.render_site('forms/dataset-extras-form.html', request, dataset=dataset, extras=extras, back_url=back_url)
@wsgihelpers.wsgify
def delete_extra(request):
context = contexts.Ctx(request)
headers = wsgihelpers.handle_cross_origin_resource_sharing(context)
user = auth.get_user_from_request(request)
if not user:
return wsgihelpers.unauthorized(context) # redirect to login/register ?
dataset_name = request.urlvars.get('name')
dataset = Package.get(dataset_name)
if not dataset:
return wsgihelpers.not_found(context)
extra_key = request.urlvars.get('key', '').strip().decode('utf8')
if not extra_key in dataset.extras.keys():
return wsgihelpers.not_found(context)
extras = [{'key': key, 'value': value} for key, value in dataset.extras.items() if not key == extra_key]
extras.append({'key': extra_key, 'value': dataset.extras.get(extra_key), 'deleted': True})
data = ckan_api('package_update', user, {
'id': dataset.id,
'name': dataset.name,
'title': dataset.title,
'notes': dataset.notes,
'owner_org': dataset.owner_org,
'private': dataset.private,
'license_id': dataset.license_id,
'extras': extras,
'tags': [{'name': package_tag.tag.name} for package_tag in dataset.package_tag_all],
'resources': [{
'id': resource.id,
'url': resource.url,
'description': resource.description,
'format': resource.format,
'name': resource.name,
'resource_type': resource.resource_type,
} for resource in dataset.active_resources
],
})
if data['success']:
return wsgihelpers.respond_json(context, {}, headers=headers, code=200)
return wsgihelpers.respond_json(context, {}, headers=headers, code=400)
routes = (
('GET', r'^(/(?P<lang>\w{2}))?/dataset/?$', search_more),
('GET', r'^(/(?P<lang>\w{2}))?/datasets?/autocomplete/?$', autocomplete),
('GET', r'^(/(?P<lang>\w{2}))?/datasets?/popular/?$', popular_datasets),
('GET', r'^(/(?P<lang>\w{2}))?/datasets?/recent/?$', recent_datasets),
(('GET','POST'), r'^(/(?P<lang>\w{2}))?/dataset/new/?$', create),
(('GET','POST'), r'^(/(?P<lang>\w{2}))?/dataset/edit/(?P<name>[\w_-]+)/?$', edit),
(('GET','POST'), r'^(/(?P<lang>\w{2}))?/dataset/extras/(?P<name>[\w_-]+)/?$', extras),
('DELETE', r'^(/(?P<lang>\w{2}))?/dataset/extras/(?P<name>[\w_-]+)/(?P<key>.+)/?$', delete_extra),
('GET', r'^(/(?P<lang>\w{{2}}))?/dataset/(?!{0}(/|$))(?P<name>[\w_-]+)/?$'.format('|'.join(EXCLUDED_PATTERNS)), display),
)
|
fungos/gemuo
|
refs/heads/master
|
src/lumber.py
|
1
|
#!/usr/bin/python
#
# GemUO
#
# (c) 2005-2012 Max Kellermann <max@duempel.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
from twisted.python import log
from twisted.internet import reactor
from uo.skills import *
from uo.entity import *
import gemuo.config
from gemuo.simple import simple_run, simple_later
from gemuo.data import TileCache
from gemuo.map import BridgeMap, WorldMap, CacheMap
from gemuo.entity import Position
from gemuo.locations import nearest_bank, is_rel_por
from gemuo.exhaust import ExhaustDatabase
from gemuo.resource import find_statics_resource_block, reachable_resource
from gemuo.defer import deferred_find_player_item
from gemuo.target import Target
from gemuo.engine import Engine
from gemuo.engine.messages import PrintMessages
from gemuo.engine.guards import Guards
from gemuo.engine.equip import Equip
from gemuo.engine.lumber import Lumber
from gemuo.engine.walk import PathFindWalkRectangle, PathFindWalkAny
from gemuo.engine.watch import Watch
from gemuo.engine.items import OpenBank
from gemuo.engine.restock import Restock
from gemuo.engine.death import AutoResurrect
from gemuo.engine.gm import DetectGameMaster
from gemuo.engine.relpor import RelPorCaptcha
from gemuo.engine.training import SkillTraining
from gemuo.engine.boards_relpor import MakeBoardsRelpor
BANK = None
def find_tree(map, exhaust_db, position):
center = Position((position.x * 7 + BANK[4]) / 8,
(position.y * 7 + BANK[5]) / 8)
return find_statics_resource_block(map, center, TREES, exhaust_db)
def passable_positions_around(map, x, y, z, distance):
positions = []
for ix in range(x - distance, x + distance + 1):
for iy in range(y - distance, y + distance + 1):
if map.is_passable(ix, iy, z):
positions.append((ix, iy))
return positions
class AutoLumber(Engine):
def __init__(self, client, map, exhaust_db):
Engine.__init__(self, client)
self.world = client.world
self.player = client.world.player
self.map = map
self.exhaust_db = exhaust_db
if self.player.mass_remaining() < 50:
self._success()
return
self._walk()
def _lumbered(self, result):
if self.player.is_dead() or self.player.mass_remaining() < 50 or self.world.combatant is not None:
# too heavy, finish this engine
self._success()
return
reactor.callLater(0.5, self._make_boards)
def _make_boards(self):
if is_rel_por(self.world):
MakeBoardsRelpor(self._client)
reactor.callLater(0.5, self._walk)
def _equipped(self, result):
tree = reachable_resource(self.player.position, self.trees, 2)
if tree is None:
print "No tree??"
reactor.callLater(0.5, self._walk)
return
tree = Target(x=tree.x, y=tree.y, z=tree.z, graphic=tree.item_id)
d = Lumber(self._client, self.map, tree, self.exhaust_db).deferred
d.addCallbacks(self._lumbered, self._success)
def _walked(self, result):
# make sure an axe is equipped
d = Equip(self._client, lambda x: x.item_id in ITEMS_AXE).deferred
d.addCallbacks(self._equipped, self._failure)
def _walk_failed(self, fail):
# walking to this tree failed for some reason; mark this 8x8
# as "exhausted", so we won't try it again for a while
tree = self.trees[0]
self.exhaust_db.set_exhausted(tree.x/8, tree.y/8)
self._walk()
def _walk(self):
position = self.player.position
if position is None:
self._failure()
return
self.trees = find_tree(self.map, self.exhaust_db, position)
if self.trees is None:
self._failure()
return
positions = set()
for resource in self.trees:
for p in passable_positions_around(self.map, resource.x, resource.y, resource.z, 2):
positions.add(Position(p[0], p[1]))
self.map.flush_cache()
d = PathFindWalkAny(self._client, self.map, positions).deferred
d.addCallbacks(self._walked, self._walk_failed)
class Bank(Engine):
def __init__(self, client, map):
Engine.__init__(self, client)
self._map = map
self.tries = 5
print "Bank"
self._walk()
def _walk(self):
self._map.flush_cache()
d = PathFindWalkRectangle(self._client, self._map, BANK).deferred
d.addCallbacks(self._walked, self._walk_failed)
def _walk_failed(self, fail):
if self._client.is_dead():
self._success()
return
self.tries -= 1
if self.tries > 0:
self._walk()
else:
self._failure(fail)
def _walked(self, result):
d = OpenBank(self._client).deferred
d.addCallbacks(self._opened, self._walk_failed)
def _out_filter(self, x):
return x.item_id not in ITEMS_AXE
def _opened(self, bank):
d = Restock(self._client, bank, func=self._out_filter,
counts=((ITEMS_AXE, 1),)).deferred
d.addCallbacks(self._restocked, self._failure)
def _restocked(self, result):
self._success()
class AutoHarvest(Engine):
def __init__(self, client, map, exhaust_db):
Engine.__init__(self, client)
self.world = client.world
self.player = client.world.player
self.map = map
self.exhaust_db = exhaust_db
self._check()
def _restocked(self, result):
if self.player.is_dead():
log.msg("Waiting for resurrection")
reactor.callLater(10, self._check)
return
if self.world.combatant is not None:
log.msg("Waiting until combat is over")
reactor.callLater(5, self._restocked, result)
return
self._begin_lumber()
def _restock(self):
d = Bank(self._client, self.map).deferred
d.addCallbacks(self._restocked, self._failure)
def _found_axe(self, axe):
self._begin_lumber()
def _no_axe(self, fail):
self._restock()
def _check(self):
if self.player.is_dead():
log.msg("Waiting for resurrection")
reactor.callLater(10, self._check)
elif self.player.mass_remaining() < 50 or self.world.combatant is not None:
log.msg("Flee to safe place until combat is over")
self._restock()
else:
d = deferred_find_player_item(self._client, lambda x: x.item_id in ITEMS_AXE)
d.addCallbacks(self._found_axe, self._no_axe)
def _lumbered(self, result):
self._check()
def _begin_lumber(self):
d = AutoLumber(self._client, self.map, self.exhaust_db).deferred
d.addCallbacks(self._lumbered, self._failure)
def begin(client):
tc = TileCache(gemuo.config.require_data_path())
m = CacheMap(WorldMap(BridgeMap(tc.get_map(0)), client.world))
exhaust_db = ExhaustDatabase('/tmp/trees.db')
global BANK
BANK = nearest_bank(client.world, client.world.player.position)
AutoResurrect(client, m)
#return Bank(client, m)
return AutoHarvest(client, m, exhaust_db)
def run(client):
Watch(client)
Guards(client)
DetectGameMaster(client)
RelPorCaptcha(client)
PrintMessages(client)
SkillTraining(client, (SKILL_HIDING,), round_robin=False)
return simple_later(1, begin, client)
simple_run(run)
|
zooba/PTVS
|
refs/heads/master
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/cryptography/hazmat/primitives/kdf/scrypt.py
|
13
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import sys
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import ScryptBackend
from cryptography.hazmat.primitives import constant_time
from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
# This is used by the scrypt tests to skip tests that require more memory
# than the MEM_LIMIT
_MEM_LIMIT = sys.maxsize // 2
@utils.register_interface(KeyDerivationFunction)
class Scrypt(object):
def __init__(self, salt, length, n, r, p, backend):
if not isinstance(backend, ScryptBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement ScryptBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
self._length = length
utils._check_bytes("salt", salt)
if n < 2 or (n & (n - 1)) != 0:
raise ValueError("n must be greater than 1 and be a power of 2.")
if r < 1:
raise ValueError("r must be greater than or equal to 1.")
if p < 1:
raise ValueError("p must be greater than or equal to 1.")
self._used = False
self._salt = salt
self._n = n
self._r = r
self._p = p
self._backend = backend
def derive(self, key_material):
if self._used:
raise AlreadyFinalized("Scrypt instances can only be used once.")
self._used = True
utils._check_byteslike("key_material", key_material)
return self._backend.derive_scrypt(
key_material, self._salt, self._length, self._n, self._r, self._p
)
def verify(self, key_material, expected_key):
derived_key = self.derive(key_material)
if not constant_time.bytes_eq(derived_key, expected_key):
raise InvalidKey("Keys do not match.")
|
pdellaert/ansible
|
refs/heads/devel
|
lib/ansible/modules/database/mongodb/mongodb_parameter.py
|
45
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
# Sponsored by Infopro Digital. http://www.infopro-digital.com/
# Sponsored by E.T.A.I. http://www.etai.fr/
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mongodb_parameter
short_description: Change an administrative parameter on a MongoDB server.
description:
- Change an administrative parameter on a MongoDB server.
version_added: "2.1"
options:
login_user:
description:
- The username used to authenticate with
login_password:
description:
- The password used to authenticate with
login_host:
description:
- The host running the database
default: localhost
login_port:
description:
- The port to connect to
default: 27017
login_database:
description:
- The database where login credentials are stored
replica_set:
description:
- Replica set to connect to (automatically connects to primary for writes)
database:
description:
- The name of the database to add/remove the user from
required: true
ssl:
description:
- Whether to use an SSL connection when connecting to the database
type: bool
default: 'no'
param:
description:
- MongoDB administrative parameter to modify
required: true
value:
description:
- MongoDB administrative parameter value to set
required: true
param_type:
description:
- Define the parameter value (str, int)
default: str
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author: "Loic Blot (@nerzhul)"
'''
EXAMPLES = '''
# Set MongoDB syncdelay to 60 (this is an int)
- mongodb_parameter:
param: syncdelay
value: 60
param_type: int
'''
RETURN = '''
before:
description: value before modification
returned: success
type: str
after:
description: value after modification
returned: success
type: str
'''
import os
import traceback
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_native
# =========================================
# MongoDB module specific support methods.
#
def load_mongocnf():
config = configparser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
try:
config.readfp(open(mongocnf))
creds = dict(
user=config.get('client', 'user'),
password=config.get('client', 'pass')
)
except (configparser.NoOptionError, IOError):
return False
return creds
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default=27017, type='int'),
login_database=dict(default=None),
replica_set=dict(default=None),
param=dict(default=None, required=True),
value=dict(default=None, required=True),
param_type=dict(default="str", choices=['str', 'int']),
ssl=dict(default=False, type='bool'),
)
)
if not pymongo_found:
module.fail_json(msg=missing_required_lib('pymongo'))
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
ssl = module.params['ssl']
param = module.params['param']
param_type = module.params['param_type']
value = module.params['value']
# Verify parameter is coherent with specified type
try:
if param_type == 'int':
value = int(value)
except ValueError:
module.fail_json(msg="value '%s' is not %s" % (value, param_type))
try:
if replica_set:
client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
else:
client = MongoClient(login_host, int(login_port), ssl=ssl)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None or login_user is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password, source=login_database)
except ConnectionFailure as e:
module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
db = client.admin
try:
after_value = db.command("setParameter", **{param: value})
except OperationFailure as e:
module.fail_json(msg="unable to change parameter: %s" % to_native(e), exception=traceback.format_exc())
if "was" not in after_value:
module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.")
else:
module.exit_json(changed=(value != after_value["was"]), before=after_value["was"],
after=value)
if __name__ == '__main__':
main()
|
mjg59/python-broadlink
|
refs/heads/master
|
broadlink/protocol.py
|
1
|
import datetime as dt
import time
class Datetime:
"""Helps to pack and unpack datetime objects for the Broadlink protocol."""
@staticmethod
def pack(datetime: dt.datetime) -> bytes:
"""Pack the timestamp to be sent over the Broadlink protocol."""
data = bytearray(12)
utcoffset = int(datetime.utcoffset().total_seconds() / 3600)
data[:0x04] = utcoffset.to_bytes(4, "little", signed=True)
data[0x04:0x06] = datetime.year.to_bytes(2, "little")
data[0x06] = datetime.minute
data[0x07] = datetime.hour
data[0x08] = int(datetime.strftime('%y'))
data[0x09] = datetime.isoweekday()
data[0x0A] = datetime.day
data[0x0B] = datetime.month
return data
@staticmethod
def unpack(data: bytes) -> dt.datetime:
"""Unpack a timestamp received over the Broadlink protocol."""
utcoffset = int.from_bytes(data[0x00:0x04], "little", signed=True)
year = int.from_bytes(data[0x04:0x06], "little")
minute = data[0x06]
hour = data[0x07]
subyear = data[0x08]
isoweekday = data[0x09]
day = data[0x0A]
month = data[0x0B]
tz_info = dt.timezone(dt.timedelta(hours=utcoffset))
datetime = dt.datetime(year, month, day, hour, minute, 0, 0, tz_info)
if datetime.isoweekday() != isoweekday:
raise ValueError("isoweekday does not match")
if int(datetime.strftime('%y')) != subyear:
raise ValueError("subyear does not match")
return datetime
@staticmethod
def now() -> dt.datetime:
"""Return the current date and time with timezone info."""
tz_info = dt.timezone(dt.timedelta(seconds=-time.timezone))
return dt.datetime.now(tz_info)
|
gmacon/mongoengine
|
refs/heads/master
|
benchmark.py
|
28
|
#!/usr/bin/env python
import timeit
def cprofile_main():
from pymongo import Connection
connection = Connection()
connection.drop_database('timeit_test')
connection.disconnect()
from mongoengine import Document, DictField, connect
connect("timeit_test")
class Noddy(Document):
fields = DictField()
for i in range(1):
noddy = Noddy()
for j in range(20):
noddy.fields["key" + str(j)] = "value " + str(j)
noddy.save()
def main():
"""
0.4 Performance Figures ...
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - Pymongo
3.86744189262
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine
6.23374891281
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, safe=False, validate=False
5.33027005196
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, safe=False, validate=False, cascade=False
pass - No Cascade
0.5.X
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - Pymongo
3.89597702026
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine
21.7735359669
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, safe=False, validate=False
19.8670389652
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, safe=False, validate=False, cascade=False
pass - No Cascade
0.6.X
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - Pymongo
3.81559205055
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine
10.0446798801
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, safe=False, validate=False
9.51354718208
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, safe=False, validate=False, cascade=False
9.02567505836
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, force=True
8.44933390617
0.7.X
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - Pymongo
3.78801012039
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine
9.73050498962
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, safe=False, validate=False
8.33456707001
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, safe=False, validate=False, cascade=False
8.37778115273
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, force=True
8.36906409264
0.8.X
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - Pymongo
3.69964408875
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - Pymongo write_concern={"w": 0}
3.5526599884
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine
7.00959801674
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries without continual assign - MongoEngine
5.60943293571
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine - write_concern={"w": 0}, cascade=True
6.715102911
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, write_concern={"w": 0}, validate=False, cascade=True
5.50644683838
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, write_concern={"w": 0}, validate=False
4.69851183891
----------------------------------------------------------------------------------------------------
Creating 10000 dictionaries - MongoEngine, force_insert=True, write_concern={"w": 0}, validate=False
4.68946313858
----------------------------------------------------------------------------------------------------
"""
print("Benchmarking...")
setup = """
from pymongo import MongoClient
connection = MongoClient()
connection.drop_database('timeit_test')
"""
stmt = """
from pymongo import MongoClient
connection = MongoClient()
db = connection.timeit_test
noddy = db.noddy
for i in range(10000):
example = {'fields': {}}
for j in range(20):
example['fields']["key"+str(j)] = "value "+str(j)
noddy.save(example)
myNoddys = noddy.find()
[n for n in myNoddys] # iterate
"""
print("-" * 100)
print("""Creating 10000 dictionaries - Pymongo""")
t = timeit.Timer(stmt=stmt, setup=setup)
print(t.timeit(1))
stmt = """
from pymongo import MongoClient
connection = MongoClient()
db = connection.timeit_test
noddy = db.noddy
for i in range(10000):
example = {'fields': {}}
for j in range(20):
example['fields']["key"+str(j)] = "value "+str(j)
noddy.save(example, write_concern={"w": 0})
myNoddys = noddy.find()
[n for n in myNoddys] # iterate
"""
print("-" * 100)
print("""Creating 10000 dictionaries - Pymongo write_concern={"w": 0}""")
t = timeit.Timer(stmt=stmt, setup=setup)
print(t.timeit(1))
setup = """
from pymongo import MongoClient
connection = MongoClient()
connection.drop_database('timeit_test')
connection.disconnect()
from mongoengine import Document, DictField, connect
connect("timeit_test")
class Noddy(Document):
fields = DictField()
"""
stmt = """
for i in range(10000):
noddy = Noddy()
for j in range(20):
noddy.fields["key"+str(j)] = "value "+str(j)
noddy.save()
myNoddys = Noddy.objects()
[n for n in myNoddys] # iterate
"""
print("-" * 100)
print("""Creating 10000 dictionaries - MongoEngine""")
t = timeit.Timer(stmt=stmt, setup=setup)
print(t.timeit(1))
stmt = """
for i in range(10000):
noddy = Noddy()
fields = {}
for j in range(20):
fields["key"+str(j)] = "value "+str(j)
noddy.fields = fields
noddy.save()
myNoddys = Noddy.objects()
[n for n in myNoddys] # iterate
"""
print("-" * 100)
print("""Creating 10000 dictionaries without continual assign - MongoEngine""")
t = timeit.Timer(stmt=stmt, setup=setup)
print(t.timeit(1))
stmt = """
for i in range(10000):
noddy = Noddy()
for j in range(20):
noddy.fields["key"+str(j)] = "value "+str(j)
noddy.save(write_concern={"w": 0}, cascade=True)
myNoddys = Noddy.objects()
[n for n in myNoddys] # iterate
"""
print("-" * 100)
print("""Creating 10000 dictionaries - MongoEngine - write_concern={"w": 0}, cascade = True""")
t = timeit.Timer(stmt=stmt, setup=setup)
print(t.timeit(1))
stmt = """
for i in range(10000):
noddy = Noddy()
for j in range(20):
noddy.fields["key"+str(j)] = "value "+str(j)
noddy.save(write_concern={"w": 0}, validate=False, cascade=True)
myNoddys = Noddy.objects()
[n for n in myNoddys] # iterate
"""
print("-" * 100)
print("""Creating 10000 dictionaries - MongoEngine, write_concern={"w": 0}, validate=False, cascade=True""")
t = timeit.Timer(stmt=stmt, setup=setup)
print(t.timeit(1))
stmt = """
for i in range(10000):
noddy = Noddy()
for j in range(20):
noddy.fields["key"+str(j)] = "value "+str(j)
noddy.save(validate=False, write_concern={"w": 0})
myNoddys = Noddy.objects()
[n for n in myNoddys] # iterate
"""
print("-" * 100)
print("""Creating 10000 dictionaries - MongoEngine, write_concern={"w": 0}, validate=False""")
t = timeit.Timer(stmt=stmt, setup=setup)
print(t.timeit(1))
stmt = """
for i in range(10000):
noddy = Noddy()
for j in range(20):
noddy.fields["key"+str(j)] = "value "+str(j)
noddy.save(force_insert=True, write_concern={"w": 0}, validate=False)
myNoddys = Noddy.objects()
[n for n in myNoddys] # iterate
"""
print("-" * 100)
print("""Creating 10000 dictionaries - MongoEngine, force_insert=True, write_concern={"w": 0}, validate=False""")
t = timeit.Timer(stmt=stmt, setup=setup)
print(t.timeit(1))
if __name__ == "__main__":
main()
|
dlopes-samba/dlopes-maps-sambatech
|
refs/heads/master
|
django/test/__init__.py
|
247
|
"""
Django Unit Test and Doctest framework.
"""
from django.test.client import Client, RequestFactory
from django.test.testcases import TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
|
bonitadecker77/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/quopri.py
|
57
|
#! /usr/bin/env python3
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = b'='
MAXLINESIZE = 76
HEX = b'0123456789ABCDEF'
EMPTYSTRING = b''
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular byte ordinal needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
assert isinstance(c, bytes)
if c in b' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == b'_':
return header
return c == ESCAPE or not (b' ' <= c <= b'~')
def quote(c):
"""Quote a single character."""
assert isinstance(c, bytes) and len(c)==1
c = ord(c)
return ESCAPE + bytes((HEX[c//16], HEX[c%16]))
def encode(input, output, quotetabs, header=False):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per
RFC 1522.
"""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs=quotetabs, header=header)
output.write(odata)
return
def write(s, output=output, lineEnd=b'\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in b' \t':
output.write(s[:-1] + quote(s[-1:]) + lineEnd)
elif s == b'.':
output.write(quote(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = b''
if line[-1:] == b'\n':
line = line[:-1]
stripped = b'\n'
# Calculate the un-length-limited encoded line
for c in line:
c = bytes((c,))
if needsquoting(c, quotetabs, header):
c = quote(c)
if header and c == b' ':
outline.append(b'_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs=False, header=False):
if b2a_qp is not None:
return b2a_qp(s, quotetabs=quotetabs, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header=False):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header=header)
output.write(odata)
return
new = b''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1:n] == b'\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1:n] in b" \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i:i+1]
if c == b'_' and header:
new = new + b' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]):
new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + b'\n')
new = b''
if new:
output.write(new)
def decodestring(s, header=False):
if a2b_qp is not None:
return a2b_qp(s, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
decode(infp, outfp, header=header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII."""
assert isinstance(c, bytes)
return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
c = bytes((c,))
if b'0' <= c <= b'9':
i = ord('0')
elif b'a' <= c <= b'f':
i = ord('a')-10
elif b'A' <= c <= b'F':
i = ord(b'A')-10
else:
assert False, "non-hex digit "+repr(c)
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("usage: quopri [-t | -d] [file] ...")
print("-t: quote tabs")
print("-d: decode; default encode")
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print("-t and -d are mutually exclusive")
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin.buffer
else:
try:
fp = open(file, "rb")
except IOError as msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
try:
if deco:
decode(fp, sys.stdout.buffer)
else:
encode(fp, sys.stdout.buffer, tabs)
finally:
if file != '-':
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main()
|
joequery/django
|
refs/heads/master
|
tests/admin_changelist/urls.py
|
810
|
from django.conf.urls import url
from . import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
molejar/pyIMX
|
refs/heads/master
|
imx/img/images.py
|
1
|
# Copyright (c) 2017-2018 Martin Olejar
#
# SPDX-License-Identifier: BSD-3-Clause
# The BSD-3-Clause license for this file can be found in the LICENSE file included with this distribution
# or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText
from io import BytesIO, BufferedReader, SEEK_END, SEEK_CUR
from .misc import read_raw_data, read_raw_segment
from .header import Header, Header2
from .segments import SegTag, SegIVT2, SegBDT, SegAPP, SegDCD, SegCSF, SegIVT3a, SegIVT3b, SegBDS3a, SegBDS3b, \
SegBIC1
########################################################################################################################
# i.MX Image Public Methods
########################################################################################################################
def parse(stream, step=0x100, size=None):
""" Common parser for all versions of i.MX boot images
:param stream: stream buffer to image
:param step: Image searching step
:param size: parsing size
:return: the object of boot image
"""
if isinstance(stream, (bytes, bytearray)):
stream = BytesIO(stream)
if not isinstance(stream, (BufferedReader, BytesIO)):
raise TypeError(" Not correct value type: \"{}\" !".format(type(stream)))
# calculate stream size
start_index = stream.tell()
last_index = stream.seek(0, SEEK_END)
stream.seek(start_index)
if size:
last_index = min(start_index + size, last_index)
while start_index < (last_index - Header.SIZE):
raw = read_raw_data(stream, Header.SIZE, no_seek=True)
if raw[0] == SegTag.IVT2 and ((raw[1] << 8) | raw[2]) == SegIVT2.SIZE and raw[3] in (0x40, 0x41, 0x42):
return BootImg2.parse(stream)
elif raw[0] == SegTag.IVT2 and ((raw[1] << 8) | raw[2]) == SegIVT3b.SIZE and raw[3] in (0x43,):
return BootImg3b.parse(stream)
elif raw[0] == SegTag.IVT3 and ((raw[1] << 8) | raw[2]) == SegIVT3a.SIZE and raw[3] in (0x43,):
return BootImg3a.parse(stream)
elif raw[3] == SegTag.BIC1:
return BootImg4.parse(stream)
else:
start_index = stream.seek(step, SEEK_CUR)
raise Exception(' Not an i.MX Boot Image !')
########################################################################################################################
# i.MX Boot Image Classes
########################################################################################################################
class EnumAppType:
SCFW = 1
M4_0 = 2
M4_1 = 3
APP = 4
A35 = 4
A53 = 4
A72 = 5
SCD = 6
class BootImgBase(object):
""" IMX Boot Image Base """
@property
def dcd(self):
return self._dcd
@dcd.setter
def dcd(self, value):
assert isinstance(value, SegDCD)
self._dcd = value
def __init__(self, address, offset):
""" Initialize boot image object
:param address: The start address of img in target memory
:param offset: The IVT offset
:return: BootImage object
"""
self.offset = offset
self.address = address
self._dcd = None
def info(self):
raise NotImplementedError()
def add_image(self, data, img_type, address):
raise NotImplementedError()
def export(self):
raise NotImplementedError()
@classmethod
def parse(cls, stream, step=0x100, size=None):
raise NotImplementedError()
########################################################################################################################
# Boot Image V1 Segments (i.MX5)
########################################################################################################################
# Obsolete, will not be implemented
########################################################################################################################
# Boot Image V2 (i.MX6, i.MX7)
########################################################################################################################
class BootImg2(BootImgBase):
""" IMX Boot Image v2 """
# The value of CSF segment size
CSF_SIZE = 0x2000
# The align value of APP segment
APP_ALIGN = 0x1000
# The value of img head size
# offset | size
HEAD_SIZE = {0x400: 0xC00,
0x100: 0x300}
@property
def version(self):
return self._ivt.version
@version.setter
def version(self, value):
self._ivt.version = value
@property
def plugin(self):
return self._plg
@plugin.setter
def plugin(self, value):
assert isinstance(value, bool)
self._plg = value
@property
def ivt(self):
return self._ivt
@ivt.setter
def ivt(self, value):
assert isinstance(value, SegIVT2)
self._ivt = value
@property
def bdt(self):
return self._bdt
@bdt.setter
def bdt(self, value):
assert isinstance(value, SegBDT)
self._bdt = value
@property
def app(self):
return self._app
@app.setter
def app(self, value):
assert isinstance(value, SegAPP)
self._app = value
@property
def csf(self):
return self._csf
@csf.setter
def csf(self, value):
assert isinstance(value, SegCSF)
self._csf = value
@property
def size(self):
sum = self.ivt.space
sum += self.bdt.space
sum += self.dcd.space
sum += self.app.space
sum += self.csf.space
return sum
def __init__(self, address=0, offset=0x400, version=0x41, plugin=False):
""" Initialize boot image object
:param address: The start address of img in target memory
:param offset: The IVT offset
:param version: The version of boot img format
:return: BootImage object
"""
super().__init__(address, offset)
self._ivt = SegIVT2(version)
self._bdt = SegBDT()
self._app = SegAPP()
self._dcd = SegDCD()
self._csf = SegCSF()
self._plg = plugin
def _update(self):
""" Update Image Object """
# Set zero padding for IVT and BDT sections
self.ivt.padding = 0
self.bdt.padding = 0
# Calculate padding for DCD, APP and CSF sections
tmp_val = self.ivt.space + self.bdt.space + self.dcd.size
head_size = 0xC00 if self.offset not in self.HEAD_SIZE else self.HEAD_SIZE[self.offset]
self.dcd.padding = head_size - tmp_val
tmp_val = self.app.size % self.APP_ALIGN
self.app.padding = self.APP_ALIGN - tmp_val if tmp_val > 0 else 0
# Set IVT section
self.ivt.ivt_address = self.address + self.offset
self.ivt.bdt_address = self.ivt.ivt_address + self.ivt.space
if self.dcd.enabled:
self.ivt.dcd_address = self.ivt.bdt_address + self.bdt.space
self.ivt.app_address = self.ivt.dcd_address + self.dcd.space
else:
self.ivt.dcd_address = 0
self.ivt.app_address = self.ivt.bdt_address + self.bdt.space
if self.csf.enabled:
self.ivt.csf_address = self.ivt.app_address + self.app.space
self.csf.padding = self.CSF_SIZE - self.csf.size
else:
self.ivt.csf_address = 0
# Set BDT section
self.bdt.start = self.ivt.ivt_address - self.offset
self.bdt.length = self.size + self.offset
self.bdt.plugin = 1 if self.plugin else 0
def info(self):
self._update()
# Print IVT
msg = "#" * 60 + "\n"
msg += "# IVT (Image Vector Table)\n"
msg += "#" * 60 + "\n\n"
msg += self.ivt.info()
# Print DBI
msg += "#" * 60 + "\n"
msg += "# BDI (Boot Data Info)\n"
msg += "#" * 60 + "\n\n"
msg += self.bdt.info()
# Print DCD
if self.dcd.enabled:
msg += "#" * 60 + "\n"
msg += "# DCD (Device Config Data)\n"
msg += "#" * 60 + "\n\n"
msg += self.dcd.info()
# Print CSF
if self.csf.enabled:
msg += "#" * 60 + "\n"
msg += "# CSF (Code Signing Data)\n"
msg += "#" * 60 + "\n\n"
msg += self.csf.info()
return msg
def add_image(self, data, img_type=EnumAppType.APP, address=0):
""" Add specific image into the main boot image
:param data: Raw data of img
:param img_type: Type of img
:param address: address in RAM
"""
if img_type == EnumAppType.APP:
self.app.data = data
if address != 0:
self.address = address
else:
raise Exception('Unknown data type !')
def export(self):
""" Export image as bytes array
:return: bytes
"""
self._update()
data = self.ivt.export(True)
data += self.bdt.export(True)
data += self.dcd.export(True)
data += self.app.export(True)
data += self.csf.export(True)
return data
@classmethod
def parse(cls, stream, step=0x100, size=None):
""" Parse image from stream buffer or bytes array
:param stream: The stream buffer or bytes array
:param step: Image searching step
:param size: parsing size
:return: BootImg2 object
"""
if isinstance(stream, (bytes, bytearray)):
stream = BytesIO(stream)
if not isinstance(stream, (BufferedReader, BytesIO)):
raise TypeError(" Not correct value type: \"{}\" !".format(type(stream)))
header = None
start_index = stream.tell()
last_index = stream.seek(0, SEEK_END)
stream.seek(start_index)
if size:
last_index = min(start_index + size, last_index)
imx_image = False
while start_index < (last_index - Header.SIZE):
header = Header.parse(read_raw_data(stream, Header.SIZE, no_seek=True))
if header.tag == SegTag.IVT2 or \
header.length == SegIVT2.SIZE or \
header.param in (0x40, 0x41, 0x42, 0x43):
imx_image = True
break
else:
start_index = stream.seek(step, SEEK_CUR)
if not imx_image:
raise Exception(' Not an i.MX Boot Image !')
obj = cls(version=header.param)
img_size = last_index - start_index
if start_index > 0:
obj.offset = start_index
# Parse IVT
obj.ivt = SegIVT2.parse(read_raw_segment(stream, SegTag.IVT2))
# Parse BDT
obj.bdt = SegBDT.parse(read_raw_data(stream, SegBDT.SIZE))
obj.offset = obj.ivt.ivt_address - obj.bdt.start
obj.address = obj.bdt.start
obj.plugin = True if obj.bdt.plugin else False
# Parse DCD
if obj.ivt.dcd_address:
obj.dcd = SegDCD.parse(read_raw_segment(stream, SegTag.DCD))
obj.dcd.padding = (obj.ivt.app_address - obj.ivt.dcd_address) - obj.dcd.size
# Parse APP
app_start = start_index + (obj.ivt.app_address - obj.ivt.ivt_address)
app_size = obj.ivt.csf_address - obj.ivt.app_address if obj.ivt.csf_address else \
obj.bdt.length - (obj.bdt.start - obj.ivt.app_address)
app_size = img_size - app_start if app_size > (img_size - app_start) else app_size
obj.app.data = read_raw_data(stream, app_size, app_start)
obj.app.padding = 0
# Parse CSF
if obj.ivt.csf_address:
csf_start = start_index + (obj.ivt.csf_address - obj.ivt.ivt_address)
obj.csf = SegCSF.parse(read_raw_segment(stream, SegTag.CSF, csf_start))
# obj.csf.padding = csf_start + obj.csf.size
return obj
########################################################################################################################
# Boot Image V2b (i.MX8M)
########################################################################################################################
class BootImg8m(BootImgBase):
""" IMX Boot Image """
# The value of CSF segment size
CSF_SIZE = 0x2000
# The align value of APP segment
APP_ALIGN = 0x1000
# The value of img head size
# offset | size
HEAD_SIZE = {0x400: 0xC00,
0x100: 0x300}
@property
def version(self):
return self._ivt.version
@version.setter
def version(self, value):
self._ivt.version = value
@property
def plugin(self):
return self._plg
@plugin.setter
def plugin(self, value):
assert isinstance(value, bool)
self._plg = value
@property
def ivt(self):
return self._ivt
@ivt.setter
def ivt(self, value):
assert isinstance(value, SegIVT2)
self._ivt = value
@property
def bdt(self):
return self._bdt
@bdt.setter
def bdt(self, value):
assert isinstance(value, SegBDT)
self._bdt = value
@property
def app(self):
return self._app
@app.setter
def app(self, value):
assert isinstance(value, SegAPP)
self._app = value
@property
def csf(self):
return self._csf
@csf.setter
def csf(self, value):
assert isinstance(value, SegCSF)
self._csf = value
@property
def size(self):
sum = self.ivt.space
sum += self.bdt.space
sum += self.dcd.space
sum += self.app.space
sum += self.csf.space
return sum
def __init__(self, address=0, offset=0x400, version=0x41, plugin=False):
""" Initialize boot image object
:param address: The start address of img in target memory
:param offset: The IVT offset
:param version: The version of boot img format
:return: BootImage object
"""
super().__init__(address, offset)
self._ivt = SegIVT2(version)
self._bdt = SegBDT()
self._app = SegAPP()
self._dcd = SegDCD()
self._csf = SegCSF()
self._plg = plugin
def _update(self):
# Set zero padding for IVT and BDT sections
self.ivt.padding = 0
self.bdt.padding = 0
# Calculate padding for DCD, APP and CSF sections
tmp_val = self.ivt.space + self.bdt.space + self.dcd.size
head_size = 0xC00 if self.offset not in self.HEAD_SIZE else self.HEAD_SIZE[self.offset]
self.dcd.padding = head_size - tmp_val
tmp_val = self.app.size % self.APP_ALIGN
self.app.padding = self.APP_ALIGN - tmp_val if tmp_val > 0 else 0
# Set IVT section
self.ivt.ivt_address = self.address + self.offset
self.ivt.bdt_address = self.ivt.ivt_address + self.ivt.space
if self.dcd.enabled:
self.ivt.dcd_address = self.ivt.bdt_address + self.bdt.space
self.ivt.app_address = self.ivt.dcd_address + self.dcd.space
else:
self.ivt.dcd_address = 0
self.ivt.app_address = self.ivt.bdt_address + self.bdt.space
if self.csf.enabled:
self.ivt.csf_address = self.ivt.app_address + self.app.space
self.csf.padding = self.CSF_SIZE - self.csf.size
else:
self.ivt.csf_address = 0
# Set BDT section
self.bdt.start = self.ivt.ivt_address - self.offset
self.bdt.length = self.size + self.offset
self.bdt.plugin = 1 if self.plugin else 0
def info(self):
self._update()
# Print IVT
msg = "#" * 60 + "\n"
msg += "# IVT (Image Vector Table)\n"
msg += "#" * 60 + "\n\n"
msg += self.ivt.info()
# Print DBI
msg += "#" * 60 + "\n"
msg += "# BDI (Boot Data Info)\n"
msg += "#" * 60 + "\n\n"
msg += self.bdt.info()
# Print DCD
if self.dcd.enabled:
msg += "#" * 60 + "\n"
msg += "# DCD (Device Config Data)\n"
msg += "#" * 60 + "\n\n"
msg += self.dcd.info()
# Print CSF
if self.csf.enabled:
msg += "#" * 60 + "\n"
msg += "# CSF (Code Signing Data)\n"
msg += "#" * 60 + "\n\n"
msg += self.csf.info()
return msg
def add_image(self, data, img_type=EnumAppType.APP, address=0):
""" Add specific image into the main boot image
:param data: Raw data of img
:param img_type: Type of img
:param address: address in RAM
:return:
"""
if img_type == EnumAppType.APP:
self.app.data = data
if address != 0:
self.address = address
else:
raise Exception('Unknown data type !')
def export(self):
""" Export Image as bytes array
:return: bytes
"""
self._update()
data = self.ivt.export(True)
data += self.bdt.export(True)
data += self.dcd.export(True)
data += self.app.export(True)
data += self.csf.export(True)
return data
@classmethod
def parse(cls, stream, step=0x100, size=None):
""" Parse image from stream buffer or bytes array
:param stream: The stream buffer or bytes array
:param step: Image searching step
:param size: parsing size
:return: BootImg2 object
"""
if isinstance(stream, (bytes, bytearray)):
stream = BytesIO(stream)
if not isinstance(stream, (BufferedReader, BytesIO)):
raise TypeError(" Not correct value type: \"{}\" !".format(type(stream)))
header = None
start_index = stream.tell()
last_index = stream.seek(0, SEEK_END)
stream.seek(start_index)
if size:
last_index = min(start_index + size, last_index)
imx_image = False
while start_index < (last_index - Header.SIZE):
header = Header.parse(read_raw_data(stream, Header.SIZE, no_seek=True))
if header.tag == SegTag.IVT2 or \
header.length == SegIVT2.SIZE or \
header.param in (0x40, 0x41, 0x42, 0x43):
imx_image = True
break
else:
start_index = stream.seek(step, SEEK_CUR)
if not imx_image:
raise Exception(' Not an i.MX Boot Image !')
obj = cls(version=header.param)
img_size = last_index - start_index
if start_index > 0:
obj.offset = start_index
# Parse IVT
obj.ivt = SegIVT2.parse(read_raw_segment(stream, SegTag.IVT2))
# Parse BDT
obj.bdt = SegBDT.parse(read_raw_data(stream, SegBDT.SIZE))
obj.offset = obj.ivt.ivt_address - obj.bdt.start
obj.address = obj.bdt.start
obj.plugin = True if obj.bdt.plugin else False
# Parse DCD
if obj.ivt.dcd_address:
obj.dcd = SegDCD.parse(read_raw_segment(stream, SegTag.DCD))
obj.dcd.padding = (obj.ivt.app_address - obj.ivt.dcd_address) - obj.dcd.size
# Parse APP
app_start = start_index + (obj.ivt.app_address - obj.ivt.ivt_address)
app_size = obj.ivt.csf_address - obj.ivt.app_address if obj.ivt.csf_address else \
obj.bdt.length - (obj.bdt.start - obj.ivt.app_address)
app_size = img_size - app_start if app_size > (img_size - app_start) else app_size
obj.app.data = read_raw_data(stream, app_size, app_start)
obj.app.padding = 0
# Parse CSF
#if obj.ivt.csf_address:
# obj.csf = SegCSF.parse(buffer)
# obj.csf.padding = obj.bdt.length - ((obj.ivt.csf_address - obj.ivt.ivt_address) + obj.csf.size)
return obj
########################################################################################################################
# Boot Image V3a: i.MX8QXP-A0
########################################################################################################################
class BootImg3a(BootImgBase):
""" i.MX Boot Image v3a """
IMG_TYPE_CSF = 0x01
IMG_TYPE_SCD = 0x02
IMG_TYPE_EXEC = 0x03
IMG_TYPE_DATA = 0x04
SCFW_FLAGS_APP = 0x01355FC4
SCFW_FLAGS_M4_0 = 0x4a5162
SCFW_FLAGS_M4_1 = 0x4f52a3
SCFW_FLAGS_SCFW = 0x1
INITIAL_LOAD_ADDR_SCU_ROM = 0x2000e000
INITIAL_LOAD_ADDR_AP_ROM = 0x00110000
INITIAL_LOAD_ADDR_FLEXSPI = 0x08000000
# The value of CSF segment size
CSF_SIZE = 0x2000
# The align value of APP segment
IMG_AUTO_ALIGN = 0x10
SECTOR_SIZE = 0x200
APP_ALIGN = 0x1200
# The value of img head size
# offset | size
HEAD_SIZE = {0x400: 0xC400,
0x1000: 0x1400}
PADDING_VAL = 0x00
COUNT_OF_CONTAINERS = 2
@property
def plg(self):
return self._plg
@plg.setter
def plg(self, value):
assert isinstance(value, bool)
self._plg = value
@property
def ivt(self):
return self._ivt
@ivt.setter
def ivt(self, value):
assert isinstance(value, list) and isinstance(value[0], SegIVT3a)
self._ivt = value
@property
def bdt(self):
return self._bdt
@bdt.setter
def bdt(self, value):
assert isinstance(value, list) and isinstance(value[0], SegBDS3a)
self._bdt = value
@property
def app(self):
return self._app
@app.setter
def app(self, value):
self._app = value
@property
def csf(self):
return self._csf
@csf.setter
def csf(self, value):
assert isinstance(value, SegCSF)
self._csf = value
def __init__(self, address=0, offset=0x400, version=0x43):
""" Initialize boot image object
:param address: The start address of img in target memory
:param offset: The IVT offset
:param version: The version of boot img format
:return: BootImage object
"""
super().__init__(address, offset)
self._ivt = [SegIVT3a(version), SegIVT3a(version)]
self._ivt[0].next = self._ivt[0].size
self._ivt[0].version = 0x01
self._ivt[1].version = 0x01
self._bdt = [SegBDS3a(), SegBDS3a()]
self._app = [[SegAPP() for i in range(SegBDS3a.IMAGES_MAX_COUNT)],
[SegAPP() for i in range(SegBDS3a.IMAGES_MAX_COUNT)]]
self._dcd = SegDCD()
self._csf = SegCSF()
self._plg = False
if not isinstance(self.address, list):
self.address = [self.INITIAL_LOAD_ADDR_SCU_ROM, self.INITIAL_LOAD_ADDR_AP_ROM]
self._sdc_address = 0
@staticmethod
def _compute_padding(size, sector_size):
return ((size // sector_size + (size % sector_size > 0)) * sector_size) - size
def _update(self):
# Set zero padding for IVT and BDT sections
for container in range(self.COUNT_OF_CONTAINERS):
self.ivt[container].padding = 0
self.bdt[container].padding = 0
# Set IVT section
self.ivt[container].ivt_address = self.address[container] + self.offset + \
container * self.ivt[container].size
self.ivt[container].bdt_address = self.ivt[container].ivt_address + \
self.ivt[container].space * (self.COUNT_OF_CONTAINERS - container) + \
container * self.bdt[container].size
if container == 0:
if self.dcd.enabled:
self.ivt[container].dcd_address = self.ivt[container].bdt_address + self.bdt[container].space * 2
if self.csf.enabled:
self.ivt[container].csf_address = self.ivt[container].dcd_address + self.dcd.space
else:
self.ivt[container].csf_address = 0
else:
self.ivt[container].dcd_address = 0
if self.csf.enabled:
self.ivt[container].csf_address = self.ivt[container].bdt_address + \
self.bdt[container].space * 2
else:
self.ivt[container].csf_address = 0
else:
self.ivt[container].dcd_address = 0
self.ivt[container].csf_address = 0
self.app[container][0].padding = self._compute_padding(self.bdt[container].images[0].image_size,
self.SECTOR_SIZE)
if self.bdt[container].images_count != 0:
self.bdt[container].boot_data_size = self.bdt[container].size
if container == 0:
self.bdt[container].images[0].image_source = self.APP_ALIGN
else:
last_image_index = self.bdt[container - 1].images_count - 1
last_image_address = self.bdt[container - 1].images[last_image_index].image_source
self.bdt[container].images[0].image_source = last_image_address + \
self.app[container - 1][last_image_index].space
for i in range(self.bdt[container].images_count - 1):
self.bdt[container].images[i + 1].image_source = self.bdt[container].images[i].image_source + \
self.app[container][i].space
self.app[container][i + 1].padding = self._compute_padding(self.bdt[container].images[i + 1].image_size,
self.SECTOR_SIZE)
if container == self.COUNT_OF_CONTAINERS - 1:
self.app[container][self.bdt[container].images_count - 1].padding = 0
# Set BDT section
def info(self):
self._update()
# Print IVT
msg = "#" * 60 + "\n"
msg += "# IVT (Image Vector Table)\n"
msg += "#" * 60 + "\n\n"
for index, ivt in enumerate(self.ivt):
msg += "-" * 60 + "\n"
msg += "- IVT[{}]\n".format(index)
msg += "-" * 60 + "\n\n"
msg += ivt.info()
# Print BDI
msg += "#" * 60 + "\n"
msg += "# BDI (Boot Data Info)\n"
msg += "#" * 60 + "\n\n"
for index, bdi in enumerate(self.bdt):
msg += "-" * 60 + "\n"
msg += "- BDI[{}]\n".format(index)
msg += "-" * 60 + "\n\n"
msg += bdi.info()
# Print DCD
if self.dcd.enabled:
msg += "#" * 60 + "\n"
msg += "# DCD (Device Config Data)\n"
msg += "#" * 60 + "\n\n"
msg += self.dcd.info()
# Print CSF
if self.csf.enabled:
msg += "#" * 60 + "\n"
msg += "# CSF (Code Signing Data)\n"
msg += "#" * 60 + "\n\n"
msg += self.csf.info()
return msg
def add_image(self, data, img_type=EnumAppType.APP, address=0):
""" Add specific image into the main boot image
:param data: Raw data of image
:param img_type: Type of image
:param address: address in RAM
:return:
"""
if img_type == EnumAppType.A35:
image_index = self.bdt[1].images_count
self.bdt[1].images[image_index].image_destination = address
self.bdt[1].images[image_index].image_entry = address
self.bdt[1].images[image_index].image_size = len(data)
self.bdt[1].images[image_index].rom_flags = 0
self.bdt[1].images[image_index].hab_flags = self.IMG_TYPE_EXEC
self.bdt[1].images[image_index].scfw_flags = self.SCFW_FLAGS_APP
self.bdt[1].images_count += 1
self.app[1][image_index].data = data
self.app[1][image_index].padding = self._compute_padding(len(data), self.SECTOR_SIZE)
elif img_type == EnumAppType.M4_0 or img_type == EnumAppType.M4_1:
image_index = self.bdt[0].images_count
self.bdt[0].images[image_index].image_destination = address
self.bdt[0].images[image_index].image_entry = address
self.bdt[0].images[image_index].image_size = len(data)
self.bdt[0].images[image_index].rom_flags = 0
self.bdt[0].images[image_index].hab_flags = self.IMG_TYPE_EXEC
self.bdt[0].images[image_index].scfw_flags = self.SCFW_FLAGS_M4_0 if img_type == EnumAppType.M4_0 else \
self.SCFW_FLAGS_M4_1
self.bdt[0].images_count += 1
self.app[0][image_index].data = data
self.app[0][image_index].padding = self._compute_padding(len(data), self.SECTOR_SIZE)
elif img_type == EnumAppType.SCFW:
image_index = self.bdt[0].images_count
self.bdt[0].images[image_index].image_destination = 0x1ffe0000
self.bdt[0].images[image_index].image_entry = 0x1ffe0000
self.bdt[0].images[image_index].image_size = len(data)
self.bdt[0].images[image_index].rom_flags = 0
self.bdt[0].images[image_index].hab_flags = self.IMG_TYPE_EXEC
self.bdt[0].images[image_index].scfw_flags = self.SCFW_FLAGS_SCFW
self.bdt[0].images_count += 1
self.app[0][image_index].data = data
self.app[0][image_index].padding = self._compute_padding(len(data), self.SECTOR_SIZE)
self._sdc_address = self.bdt[0].images[image_index].image_destination + len(data) + \
self._compute_padding(len(data), self.IMG_AUTO_ALIGN)
elif img_type == EnumAppType.SCD:
if self._sdc_address == 0:
raise Exception('SCFW have to be define before SCD !')
image_index = self.bdt[0].images_count
self.bdt[0].images[image_index].image_destination = self._sdc_address
self.bdt[0].images[image_index].image_entry = 0
self.bdt[0].images[image_index].image_size = len(data)
self.bdt[0].images[image_index].rom_flags = 0
self.bdt[0].images[image_index].hab_flags = self.IMG_TYPE_SCD
self.bdt[0].images[image_index].scfw_flags = 0x1
self.bdt[0].images_count += 1
self._app[0][image_index].data = data
self._app[0][image_index].padding = self._compute_padding(len(data), self.SECTOR_SIZE)
else:
raise Exception('Unknown data type !')
def export(self):
''' Export Image as binary blob
:return:
'''
self._update()
data = bytes()
data += self.ivt[0].export(True)
data += self.ivt[1].export(True)
data += self.bdt[0].export(True)
data += self.bdt[1].export(True)
data += self.dcd.export(True)
data += self.csf.export(True)
data += bytes([self.PADDING_VAL] * self._compute_padding(len(data), self.APP_ALIGN - self.offset))
for container in range(self.COUNT_OF_CONTAINERS):
for image in range(self.bdt[container].images_count):
data += self.app[container][image].export(True)
return data
@classmethod
def parse(cls, stream, step=0x100, size=None):
""" Parse image from stream buffer or bytes array
:param stream: The stream buffer or bytes array
:param step: Image searching step
:param size: parsing size
:return: BootImg3a object
"""
if isinstance(stream, (bytes, bytearray)):
stream = BytesIO(stream)
if not isinstance(stream, (BufferedReader, BytesIO)):
raise TypeError(" Not correct value type: \"{}\" !".format(type(stream)))
header = None
start_index = stream.tell()
last_index = stream.seek(0, SEEK_END)
stream.seek(start_index)
if size:
last_index = min(start_index + size, last_index)
imx_image = False
while start_index < (last_index - Header.SIZE):
header = Header.parse(read_raw_data(stream, Header.SIZE, no_seek=True))
if header.tag == SegTag.IVT3 or header.length == SegIVT3a.SIZE or \
header.param in (0x43,):
imx_image = True
break
else:
start_index = stream.seek(step, SEEK_CUR)
if not imx_image:
raise Exception(' Not an i.MX Boot Image !')
obj = cls(version=header.param)
img_size = last_index - start_index
if start_index > 0:
obj.offset = start_index
# Parse IVT
obj.ivt[0] = SegIVT3a.parse(read_raw_segment(stream, SegTag.IVT3))
obj.ivt[1] = SegIVT3a.parse(read_raw_segment(stream, SegTag.IVT3))
# Parse BDT
obj.bdt[0] = SegBDS3a.parse(read_raw_data(stream, SegBDS3a.SIZE))
obj.bdt[1] = SegBDS3a.parse(read_raw_data(stream, SegBDS3a.SIZE))
# Parse DCD
if obj.ivt[0].dcd_address:
stream.seek(start_index + (obj.ivt[0].dcd_address - obj.ivt[0].ivt_address), 0)
obj.dcd = SegDCD.parse(read_raw_segment(stream, SegTag.DCD))
# Parse CSF
if obj.ivt[0].csf_address:
stream.seek(start_index + (obj.ivt[0].csf_address - obj.ivt[0].ivt_address), 0)
obj.csf = SegCSF.parse(read_raw_segment(stream, SegTag.CSF))
# Parse IMAGES
for container in range(obj.COUNT_OF_CONTAINERS):
for i in range(obj.bdt[container].images_count):
stream.seek(obj.bdt[container].images[i].image_source - obj.offset, 0)
obj.app[container][i].data = read_raw_data(stream, obj.bdt[container].images[i].image_size)
return obj
########################################################################################################################
# Boot Image V3b: i.MX8QM-A0
########################################################################################################################
class BootImg3b(BootImgBase):
""" IMX Boot Image v3b """
IMG_TYPE_CSF = 0x01
IMG_TYPE_SCD = 0x02
IMG_TYPE_EXEC = 0x03
IMG_TYPE_DATA = 0x04
SCFW_FLAGS_A53 = 0x1354014
SCFW_FLAGS_A72 = 0x1354065
SCFW_FLAGS_M4_0 = 0x4a5162
SCFW_FLAGS_M4_1 = 0x4f52a3
SCFW_FLAGS_SCFW = 0x1
INITIAL_LOAD_ADDR_SCU_ROM = 0x2000e000
INITIAL_LOAD_ADDR_AP_ROM = 0x00110000
INITIAL_LOAD_ADDR_FLEXSPI = 0x08000000
# The value of CSF segment size
CSF_SIZE = 0x2000
# The align value for img
IMG_AUTO_ALIGN = 0x10
# The align value for sector
SECTOR_SIZE = 0x200
# The align value of APP segment
APP_ALIGN = 0x1200
PADDING_VAL = 0x00
# The value of img head size
# offset | size
HEAD_SIZE = {0x400: 0xC400,
0x1000: 0x1400}
COUNT_OF_CONTAINERS = 2
@property
def plg(self):
return self._plg
@plg.setter
def plg(self, value):
assert isinstance(value, bool)
self._plg = value
@property
def ivt(self):
return self._ivt
@ivt.setter
def ivt(self, value):
assert isinstance(value, list)
assert len(value) == self.COUNT_OF_CONTAINERS
assert isinstance(value[0], SegIVT3b)
self._ivt = value
@property
def bdt(self):
return self._bdt
@bdt.setter
def bdt(self, value):
assert isinstance(value, list)
assert len(value) == self.COUNT_OF_CONTAINERS
assert isinstance(value[0], SegBDS3b)
self._bdt = value
@property
def app(self):
return self._app
@app.setter
def app(self, value):
self._app = value
@property
def scd(self):
return self._scd
@scd.setter
def scd(self, value):
self._scd = value
@property
def csf(self):
return self._csf
@csf.setter
def csf(self, value):
assert isinstance(value, SegCSF)
self._csf = value
def __init__(self, address=0, offset=0x400, version=0x43):
""" Initialize boot image object
:param address: The start address of img in target memory
:param offset: The IVT offset
:param version: The version of boot img format
:return: BootImage object
"""
super().__init__(address, offset)
self._ivt = [SegIVT3b(version), SegIVT3b(version)]
self._bdt = [SegBDS3b(), SegBDS3b()]
self._app = [[SegAPP() for _ in range(SegBDS3b.IMAGES_MAX_COUNT)],
[SegAPP() for _ in range(SegBDS3b.IMAGES_MAX_COUNT)]]
self._dcd = SegDCD()
self._scd = SegAPP()
self._csf = SegCSF()
self._plg = False
self._scd_address = 0
if not isinstance(self.address, list):
self.address = [self.INITIAL_LOAD_ADDR_SCU_ROM, self.INITIAL_LOAD_ADDR_AP_ROM]
@staticmethod
def _compute_padding(image_size, sector_size):
return ((image_size // sector_size + (image_size % sector_size > 0)) * sector_size) - image_size
def _update(self):
# Set zero padding for IVT and BDT sections
for container in range(self.COUNT_OF_CONTAINERS):
self.ivt[container].padding = 0
self.bdt[container].padding = 0
# Set IVT section
self.ivt[container].ivt_address = self.address[container] + self.offset + \
container * self.ivt[container].size
self.ivt[container].bdt_address = self.ivt[container].ivt_address + \
self.ivt[container].space * (2 - container) + \
container * self.bdt[container].size
if container == 0:
if self.dcd.enabled:
self.ivt[container].dcd_address = self.ivt[container].bdt_address + self.bdt[container].space * 2
if self.csf.enabled:
self.ivt[container].csf_address = self.ivt[container].dcd_address + self.dcd.space
else:
self.ivt[container].csf_address = 0
else:
self.ivt[container].dcd_address = 0
if self.csf.enabled:
self.ivt[container].csf_address = self.ivt[container].bdt_address + \
self.bdt[container].space * 2
else:
self.ivt[container].csf_address = 0
else:
self.ivt[container].dcd_address = 0
self.ivt[container].csf_address = 0
self.app[container][0].padding = self._compute_padding(self.bdt[container].images[0].image_size,
self.SECTOR_SIZE)
if self.bdt[container].images_count != 0:
self.bdt[container].boot_data_size = self.bdt[container].size
if container == 0:
self.bdt[container].images[0].image_source = self.APP_ALIGN
else:
last_image_index = self.bdt[container - 1].images_count - 1
last_image_address = self.bdt[container - 1].images[last_image_index].image_source
self.bdt[container].images[0].image_source = last_image_address + \
self.app[container - 1][last_image_index].space
next_image_address = 0
for i in range(self.bdt[container].images_count - 1):
self.bdt[container].images[i + 1].image_source = self.bdt[container].images[i].image_source + \
self.app[container][i].space
self.app[container][i + 1].padding = self._compute_padding(
self.bdt[container].images[i + 1].image_size, self.SECTOR_SIZE)
next_image_address = self.bdt[container].images[i + 1].image_source + self.app[container][i + 1].space
if container == 0:
if self.bdt[container].scd.image_destination != 0:
self.bdt[container].scd.image_source = next_image_address
self.scd.padding = self._compute_padding(self.bdt[0].scd.image_size, self.SECTOR_SIZE)
next_image_address += self.scd.space
# Set BDT section
if self.csf.enabled:
self.bdt[container].csf.image_source = next_image_address
self.csf.padding = self._compute_padding(self.bdt[0].csf.image_size, self.SECTOR_SIZE)
next_image_address += self.csf.space
# Set BDT section
def info(self):
self._update()
# Print IVT
msg = "#" * 60 + "\n"
msg += "# IVT (Image Vector Table)\n"
msg += "#" * 60 + "\n\n"
for index, ivt in enumerate(self.ivt):
msg += "-" * 60 + "\n"
msg += "- IVT[{}]\n".format(index)
msg += "-" * 60 + "\n\n"
msg += ivt.info()
# Print BDI
msg += "#" * 60 + "\n"
msg += "# BDI (Boot Data Info)\n"
msg += "#" * 60 + "\n\n"
for index, bdi in enumerate(self.bdt):
msg += "-" * 60 + "\n"
msg += "- BDI[{}]\n".format(index)
msg += "-" * 60 + "\n\n"
msg += bdi.info()
# Print DCD
if self.dcd.enabled:
msg += "#" * 60 + "\n"
msg += "# DCD (Device Config Data)\n"
msg += "#" * 60 + "\n\n"
msg += self.dcd.info()
# Print CSF
if self.csf.enabled:
msg += "#" * 60 + "\n"
msg += "# CSF (Code Signing Data)\n"
msg += "#" * 60 + "\n\n"
msg += self.csf.info()
return msg
def add_image(self, data, img_type=EnumAppType.APP, address=0):
""" Add specific image into the main boot image
:param data: Raw data of image
:param img_type: Type of image
:param address: address in RAM
"""
if img_type == EnumAppType.A53 or img_type == EnumAppType.A72:
image_index = self.bdt[1].images_count
self.app[1][image_index].data = data
self.bdt[1].images[image_index].image_destination = address
self.bdt[1].images[image_index].image_entry = address
self.bdt[1].images[image_index].image_size = len(data)
if img_type == EnumAppType.A53:
self.bdt[1].images[image_index].flags = self.SCFW_FLAGS_A53
elif img_type == EnumAppType.A72:
self.bdt[1].images[image_index].flags = self.SCFW_FLAGS_A72
self.app[1][image_index].padding = self._compute_padding(len(data), self.SECTOR_SIZE)
self.bdt[1].images_count += 1
elif img_type == EnumAppType.M4_0 or img_type == EnumAppType.M4_1:
image_index = self.bdt[0].images_count
self.app[0][image_index].data = data
self.bdt[0].images[image_index].image_destination = address
self.bdt[0].images[image_index].image_entry = address
self.bdt[0].images[image_index].image_size = len(data)
if img_type == EnumAppType.M4_0:
self.bdt[0].images[image_index].flags = self.SCFW_FLAGS_M4_0
elif img_type == EnumAppType.M4_1:
self.bdt[0].images[image_index].flags = self.SCFW_FLAGS_M4_1
self.app[0][image_index].padding = ((len(data) // self.SECTOR_SIZE + (
len(data) % self.SECTOR_SIZE > 0)) * self.SECTOR_SIZE) - len(data)
self.bdt[0].images_count += 1
elif img_type == EnumAppType.SCFW:
image_index = self.bdt[0].images_count
self.bdt[0].images[image_index].image_destination = 0x30fe0000
self.bdt[0].images[image_index].image_entry = 0x1ffe0000
self.bdt[0].images[image_index].image_size = len(data)
self.bdt[0].images[image_index].flags = self.SCFW_FLAGS_SCFW
self._scd_address = self.bdt[0].images[image_index].image_destination + len(data) + \
self._compute_padding(len(data), self.IMG_AUTO_ALIGN)
self.bdt[0].images_count += 1
self.app[0][image_index].data = data
self.app[0][image_index].padding = self._compute_padding(len(data), self.SECTOR_SIZE)
elif img_type == EnumAppType.SCD:
if self._scd_address == 0:
raise Exception('SCFW have to be define before SCD !')
self.scd.data = data
self.scd.padding = self._compute_padding(len(data), self.SECTOR_SIZE)
self.bdt[0].scd.image_destination = self._scd_address
self.bdt[0].scd.image_entry = 0
self.bdt[0].scd.image_size = len(data)
self.ivt[0].scd_address = self.bdt[0].scd.image_destination
else:
raise Exception(' Unknown image type !')
def export(self):
self._update()
# data = bytearray(self._offset)
data = bytes()
data += self.ivt[0].export(True)
data += self.ivt[1].export(True)
data += self.bdt[0].export(True)
data += self.bdt[1].export(True)
data += self.dcd.export(True)
data += bytes([self.PADDING_VAL] * self._compute_padding(len(data), self.APP_ALIGN - self.offset))
for container in range(self.COUNT_OF_CONTAINERS):
for i in range(self.bdt[container].images_count):
data += self.app[container][i].export(True)
if self.bdt[0].scd.image_source != 0:
data += self.scd.export(True)
if self.bdt[0].csf.image_source != 0:
data += self.csf.export(True)
return data
@classmethod
def parse(cls, stream, step=0x100, size=None):
""" Parse image from stream buffer or bytes array
:param stream: The stream buffer or bytes array
:param step: Image searching step
:param size: parsing size
:return: BootImg3b object
"""
if isinstance(stream, (bytes, bytearray)):
stream = BytesIO(stream)
if not isinstance(stream, (BufferedReader, BytesIO)):
raise TypeError(" Not correct value type: \"{}\" !".format(type(stream)))
header = None
start_index = stream.tell()
last_index = stream.seek(0, SEEK_END)
stream.seek(start_index)
if size:
last_index = min(start_index + size, last_index)
imx_image = False
while start_index < (last_index - Header.SIZE):
header = Header.parse(read_raw_data(stream, Header.SIZE, no_seek=True))
if header.tag == SegTag.IVT2 or header.length == SegIVT3b.SIZE or \
header.param in (0x43,):
imx_image = True
break
else:
start_index = stream.seek(step, SEEK_CUR)
if not imx_image:
raise Exception(' Not an i.MX Boot Image !')
obj = cls(version=header.param)
img_size = last_index - start_index
if start_index > 0:
obj.offset = start_index
# Parse IVT
obj.ivt[0] = SegIVT3b.parse(read_raw_segment(stream, SegTag.IVT2))
obj.ivt[1] = SegIVT3b.parse(read_raw_segment(stream, SegTag.IVT2))
# Parse BDT
obj.bdt[0] = SegBDS3b.parse(read_raw_data(stream, SegBDS3b.SIZE))
obj.bdt[1] = SegBDS3b.parse(read_raw_data(stream, SegBDS3b.SIZE))
# Parse DCD
if obj.ivt[0].dcd_address:
stream.seek(start_index + (obj.ivt[0].dcd_address - obj.ivt[0].ivt_address), 0)
obj.dcd = SegDCD.parse(read_raw_segment(stream, SegTag.DCD))
# Parse IMAGES
for container in range(obj.COUNT_OF_CONTAINERS):
for i in range(obj.bdt[container].images_count):
stream.seek(obj.bdt[container].images[i].image_source - obj.offset, 0)
obj.app[container][i].data = read_raw_data(stream, obj.bdt[container].images[i].image_size)
# Parse SCD
if obj.bdt[0].scd.image_source != 0:
stream.seek(obj.bdt[0].scd.image_source - obj.offset, 0)
obj.scd.data = read_raw_data(stream, obj.bdt[0].scd.image_size)
# Parse CSF
if obj.bdt[0].csf.image_source != 0:
stream.seek(obj.bdt[0].csf.image_source - obj.offset, 0)
obj.csf = SegCSF.parse(read_raw_segment(stream, SegTag.CSF))
return obj
########################################################################################################################
# Boot Image V4: i.MX8DM, i.MX8QM_B0, i.MX8QXP_B0
########################################################################################################################
class BootImg4(BootImgBase):
""" i.MX Boot Image v4 """
def __init__(self, address=0, offset=0x400):
""" Initialize boot image object
:param address: The start address of image in target memory
:param offset: The image offset
:return: BootImage object
"""
super().__init__(address, offset)
self._dcd = SegDCD()
self._cont1_header = SegBIC1()
self._cont2_header = SegBIC1()
self._cont1_data = []
self._cont2_data = []
def _update(self):
pass
def info(self):
self._update()
msg = ""
msg += "#" * 60 + "\n"
msg += "# Boot Images Container 1\n"
msg += "#" * 60 + "\n\n"
msg += self._cont1_header.info()
msg += "#" * 60 + "\n"
msg += "# Boot Images Container 2\n"
msg += "#" * 60 + "\n\n"
msg += self._cont2_header.info()
if self.dcd.enabled:
msg += "#" * 60 + "\n"
msg += "# DCD (Device Config Data)\n"
msg += "#" * 60 + "\n\n"
msg += self.dcd.info()
return msg
def add_image(self, data, img_type, address):
raise NotImplementedError()
def export(self):
self._update()
data = bytes()
data += self._cont1_header.export(True)
data += self._cont2_header.export(True)
# TODO: Complete Implementation
return data
@classmethod
def parse(cls, stream, step=0x100, size=None):
""" Parse image from stream buffer or bytes array
:param stream: The stream buffer or bytes array
:param step: Image searching step
:param size: parsing size
:return: BootImg4 object
"""
if isinstance(stream, (bytes, bytearray)):
stream = BytesIO(stream)
if not isinstance(stream, (BufferedReader, BytesIO)):
raise TypeError(" Not correct value type: \"{}\" !".format(type(stream)))
start_index = stream.tell()
last_index = stream.seek(0, SEEK_END)
stream.seek(start_index)
if size:
last_index = min(start_index + size, last_index)
imx_image = False
while start_index < (last_index - Header.SIZE):
header = Header.parse(read_raw_data(stream, Header2.SIZE, no_seek=True))
if header.tag == SegTag.BIC1:
imx_image = True
break
else:
start_index = stream.seek(step, SEEK_CUR)
if not imx_image:
raise Exception(' Not an i.MX Boot Image !')
img_size = last_index - start_index
obj = cls()
if start_index > 0:
obj.offset = start_index
# Parse Containers
obj._cont1_header = SegBIC1.parse(read_raw_data(stream, 0x400))
obj._cont2_header = SegBIC1.parse(read_raw_data(stream, 0x400))
# TODO: Complete Implementation
return obj
########################################################################################################################
# i.MX Kernel Image Classes
########################################################################################################################
class KernelImg(object):
""" IMX Kernel Image """
IMAGE_MIN_SIZE = 0x1000
@property
def address(self):
return self._ivt.app_address
@address.setter
def address(self, value):
self._ivt.app_address = value
@property
def version(self):
return self._ivt.version
@version.setter
def version(self, value):
self._ivt.version = value
@property
def app(self):
return self._app.data
@app.setter
def app(self, value):
assert isinstance(value, (bytes, bytearray))
self._app.data = value
@property
def csf(self):
return self._csf
@csf.setter
def csf(self, value):
assert isinstance(value, SegCSF)
self._csf = value
def __init__(self, address=0, app=None, csf=None, version=0x41):
self._ivt = SegIVT2(version)
self._ivt.app_address = address
self._app = SegAPP(app)
self._csf = SegCSF() if csf is None else csf
def __str__(self):
return self.info()
def __repr__(self):
return self.info()
def _update(self):
pass
def info(self):
pass
def export(self):
self._update()
data = self._app.export(True)
data += self._ivt.export(True)
data += self._csf.export(True)
return data
@classmethod
def parse(cls, data):
assert type(data) in (str, bytes, bytearray)
assert len(data) > cls.IMAGE_MIN_SIZE
pass
|
mammadori/pyglet
|
refs/heads/master
|
tests/window/CONTEXT_SHARE.py
|
27
|
#!/usr/bin/env python
'''Test that multiple windows share objects by default.
This test is non-interactive.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from ctypes import *
from pyglet import window
from pyglet.gl import *
__noninteractive = True
class CONTEXT_SHARE(unittest.TestCase):
def create_context(self, share):
display = window.get_platform().get_default_display()
screen = display.get_default_screen()
config = screen.get_best_config()
return config.create_context(share)
def test_context_share_list(self):
w1 = window.Window(200, 200)
try:
w1.switch_to()
glist = glGenLists(1)
glNewList(glist, GL_COMPILE)
glLoadIdentity()
glEndList()
self.assertTrue(glIsList(glist))
except:
w1.close()
raise
w2 = window.Window(200, 200)
try:
w2.switch_to()
self.assertTrue(glIsList(glist))
finally:
w1.close()
w2.close()
def test_context_noshare_list(self):
w1 = window.Window(200, 200)
try:
w1.switch_to()
glist = glGenLists(1)
glNewList(glist, GL_COMPILE)
glLoadIdentity()
glEndList()
self.assertTrue(glIsList(glist))
except:
w1.close()
raise
w2 = window.Window(200, 200, context=self.create_context(None))
try:
w2.set_visible(True)
w2.switch_to()
self.assertTrue(not glIsList(glist))
finally:
w1.close()
w2.close()
def test_context_share_texture(self):
w1 = window.Window(200, 200)
try:
w1.switch_to()
textures = c_uint()
glGenTextures(1, byref(textures))
texture = textures.value
glBindTexture(GL_TEXTURE_2D, texture)
data = (c_ubyte * 4)()
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA,
GL_UNSIGNED_BYTE, data)
self.assertTrue(glIsTexture(texture))
except:
w1.close()
raise
w2 = window.Window(200, 200)
try:
w2.switch_to()
self.assertTrue(glIsTexture(texture))
glDeleteTextures(1, byref(textures))
self.assertTrue(not glIsTexture(texture))
w1.switch_to()
self.assertTrue(not glIsTexture(texture))
finally:
w1.close()
w2.close()
if __name__ == '__main__':
unittest.main()
|
geordanr/pylint
|
refs/heads/master
|
test/input/func_r0903.py
|
10
|
"""test min methods"""
__revision__ = None
class Aaaa:
"""yo"""
def __init__(self):
pass
def meth1(self):
"""hehehe"""
print self
def _dontcount(self):
"""not public"""
print self
|
Dumbaz/ProjectEuler
|
refs/heads/master
|
Problem2/Problem_2.py
|
1
|
fib_numbers = [1,2]
def fib(first, second):
if(first) not in fib_numbers:
fib_numbers.append(first)
if(second) not in fib_numbers:
fib_numbers.append(second)
fib_numbers.append(first+second)
return (first + second)
while (fib_numbers[-1] < 4000000):
fib(fib_numbers[-1], fib_numbers[-2])
print(fib_numbers)
sum = 0
for term in fib_numbers:
if(term%2 == 0):
sum += term
print(sum)
|
kimrutherford/intermine
|
refs/heads/master
|
testmodel/webapp/selenium/test/account-login-openid-test.py
|
18
|
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from test.testmodeltestcase import TestModelTestCase as Super
import unittest, time, re, os
class AccountLoginOpenID(Super):
def setUp(self):
Super.setUp(self)
@unittest.skip("Google OpenID has been shut down")
def test_account_login_openid(self):
name = os.getenv('TESTMODEL_OPENID_NAME')
password = os.getenv('TESTMODEL_OPENID_PASSWORD')
# Runs test assuming that google is our openid provider
browser = self.browser
browser.get("https://accounts.google.com/Logout")
browser.get(self.base_url + "/begin.do")
browser.find_element_by_link_text("Log in").click()
browser.find_element_by_css_selector("a.google").click()
browser.find_element_by_id("Email").clear()
browser.find_element_by_id("Email").send_keys(name)
browser.find_element_by_id("Passwd").clear()
browser.find_element_by_id("Passwd").send_keys(password)
browser.find_element_by_id("signIn").click()
# TBD until we can deploy testmodel on a server with access to openid
return
self.assertEqual("Log out", browser.find_element_by_link_text("Log out").text)
self.assertLoggedIn()
self.upload_list_and_save_it()
def assertLoggedIn(self):
self.assertEqual("Log out", browser.find_element_by_link_text("Log out").text)
def upload_list_and_save_it(self):
browser.get(self.base_url + "//bag.do?subtab=upload")
browser.find_element_by_link_text("Lists").click()
browser.find_element_by_link_text("Upload").click()
Select(browser.find_element_by_id("typeSelector")).select_by_visible_text("Company")
browser.find_element_by_id("pasteInput").click()
browser.find_element_by_id("pasteInput").clear()
browser.find_element_by_id("pasteInput").send_keys("CompanyA,CompanyB,Dunder-Mifflin")
browser.find_element_by_id("submitBag").click()
browser.find_element_by_id("newBagName").clear()
browser.find_element_by_id("newBagName").send_keys("Company List 1")
header_link = self.browser.find_element_by_xpath("//*[@id=\"target\"]/div[1]/header/a")
header_link.click()
browser.get(self.base_url + "/bag.do?subtab=view.do")
# Create a list and save it
|
rcocetta/kano-profile
|
refs/heads/master
|
kano_world/config.py
|
1
|
#!/usr/bin/env python
# config.py
#
# Copyright (C) 2014, 2015 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU General Public License v2
#
import os
import yaml
CONF_FILE = '/etc/kano-world.conf'
def load_conf():
conf = None
if os.path.exists(CONF_FILE):
with open(CONF_FILE, 'r') as f:
conf = yaml.load(f)
if conf is None:
conf = {}
if 'api_url' not in conf:
conf['api_url'] = 'https://api.kano.me'
if 'world_url' not in conf:
conf['world_url'] = 'http://world.kano.me'
return conf
CONF = load_conf()
API_URL = CONF['api_url']
WORLD_URL = CONF['world_url']
def get_world_url(path):
return "{}/{}".format(WORLD_URL, path)
def get_api_url(path):
return "{}/{}".format(API_URL, path)
|
allenlavoie/tensorflow
|
refs/heads/master
|
tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
|
9
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A decoder that performs beam search."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
__all__ = [
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"BeamSearchDecoder",
"FinalBeamSearchDecoderOutput",
"tile_batch",
]
class BeamSearchDecoderState(
collections.namedtuple("BeamSearchDecoderState",
("cell_state", "log_probs", "finished", "lengths"))):
pass
class BeamSearchDecoderOutput(
collections.namedtuple("BeamSearchDecoderOutput",
("scores", "predicted_ids", "parent_ids"))):
pass
class FinalBeamSearchDecoderOutput(
collections.namedtuple("FinalBeamDecoderOutput",
["predicted_ids", "beam_search_decoder_output"])):
"""Final outputs returned by the beam search after all decoding is finished.
Args:
predicted_ids: The final prediction. A tensor of shape
`[batch_size, T, beam_width]` (or `[T, batch_size, beam_width]` if
`output_time_major` is True). Beams are ordered from best to worst.
beam_search_decoder_output: An instance of `BeamSearchDecoderOutput` that
describes the state of the beam search.
"""
pass
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = ops.convert_to_tensor(t, name="t")
shape_t = array_ops.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape[0].value * multiplier if t.shape[0].value is not None else None)
tiled = array_ops.tile(array_ops.expand_dims(t, 1), tiling)
tiled = array_ops.reshape(tiled,
array_ops.concat(
([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tensor_shape.TensorShape([tiled_static_batch_size]).concatenate(
t.shape[1:]))
return tiled
def tile_batch(t, multiplier, name=None):
"""Tile the batch dimension of a (possibly nested structure of) tensor(s) t.
For each tensor t in a (possibly nested structure) of tensors,
this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed of
minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape
`[batch_size * multiplier, s0, s1, ...]` composed of minibatch entries
`t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated
`multiplier` times.
Args:
t: `Tensor` shaped `[batch_size, ...]`.
multiplier: Python int.
name: Name scope for any created operations.
Returns:
A (possibly nested structure of) `Tensor` shaped
`[batch_size * multiplier, ...]`.
Raises:
ValueError: if tensor(s) `t` do not have a statically known rank or
the rank is < 1.
"""
flat_t = nest.flatten(t)
with ops.name_scope(name, "tile_batch", flat_t + [multiplier]):
return nest.map_structure(lambda t_: _tile_batch(t_, multiplier), t)
def gather_tree_from_array(t, parent_ids, sequence_length):
"""Calculates the full beams for `TensorArray`s.
Args:
t: A stacked `TensorArray` of size `max_time` that contains `Tensor`s of
shape `[batch_size, beam_width, s]` or `[batch_size * beam_width, s]`
where `s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `Tensor` which is a stacked `TensorArray` of the same size and type as
`t` and where beams are sorted in each `Tensor` according to `parent_ids`.
"""
max_time = parent_ids.shape[0].value or array_ops.shape(parent_ids)[0]
batch_size = parent_ids.shape[1].value or array_ops.shape(parent_ids)[1]
beam_width = parent_ids.shape[2].value or array_ops.shape(parent_ids)[2]
# Generate beam ids that will be reordered by gather_tree.
beam_ids = array_ops.expand_dims(
array_ops.expand_dims(math_ops.range(beam_width), 0), 0)
beam_ids = array_ops.tile(beam_ids, [max_time, batch_size, 1])
mask = array_ops.sequence_mask(
sequence_length, maxlen=max_time, dtype=dtypes.int32)
mask = array_ops.transpose(mask, perm=[2, 0, 1])
# Use beam_width + 1 to mark the end of beam.
masked_beam_ids = (beam_ids * mask) + (1 - mask) * (beam_width + 1)
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(sequence_length, axis=1))
sorted_beam_ids = beam_search_ops.gather_tree(
step_ids=masked_beam_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=beam_width + 1)
# For out of range steps, simply copy the same beam.
sorted_beam_ids = array_ops.where(
math_ops.cast(mask, dtypes.bool), x=sorted_beam_ids, y=beam_ids)
# Generate indices for gather_nd.
time_ind = array_ops.tile(array_ops.reshape(
math_ops.range(max_time), [-1, 1, 1]), [1, batch_size, beam_width])
batch_ind = array_ops.tile(array_ops.reshape(
math_ops.range(batch_size), [-1, 1, 1]), [1, max_time, beam_width])
batch_ind = array_ops.transpose(batch_ind, perm=[1, 0, 2])
indices = array_ops.stack([time_ind, batch_ind, sorted_beam_ids], -1)
# Gather from a tensor with collapsed additional dimensions.
gather_from = t
final_shape = array_ops.shape(gather_from)
gather_from = array_ops.reshape(
gather_from, [max_time, batch_size, beam_width, -1])
ordered = array_ops.gather_nd(gather_from, indices)
ordered = array_ops.reshape(ordered, final_shape)
return ordered
def _check_maybe(t):
if t.shape.ndims is None:
raise ValueError(
"Expected tensor (%s) to have known rank, but ndims == None." % t)
def _check_static_batch_beam_maybe(shape, batch_size, beam_width):
"""Raises an exception if dimensions are known statically and can not be
reshaped to [batch_size, beam_size, -1].
"""
reshaped_shape = tensor_shape.TensorShape([batch_size, beam_width, None])
if (batch_size is not None and shape[0].value is not None
and (shape[0] != batch_size * beam_width
or (shape.ndims >= 2 and shape[1].value is not None
and (shape[0] != batch_size or shape[1] != beam_width)))):
tf_logging.warn("TensorArray reordering expects elements to be "
"reshapable to %s which is incompatible with the "
"current shape %s. Consider setting "
"reorder_tensor_arrays to False to disable TensorArray "
"reordering during the beam search."
% (reshaped_shape, shape))
return False
return True
def _check_batch_beam(t, batch_size, beam_width):
"""Returns an Assert operation checking that the elements of the stacked
TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
the TensorArray elements have a known rank of at least 1.
"""
error_message = ("TensorArray reordering expects elements to be "
"reshapable to [batch_size, beam_size, -1] which is "
"incompatible with the dynamic shape of %s elements. "
"Consider setting reorder_tensor_arrays to False to disable "
"TensorArray reordering during the beam search."
% (t.name))
rank = t.shape.ndims
shape = array_ops.shape(t)
if rank == 2:
condition = math_ops.equal(shape[1], batch_size * beam_width)
else:
condition = math_ops.logical_or(
math_ops.equal(shape[1], batch_size * beam_width),
math_ops.logical_and(
math_ops.equal(shape[1], batch_size),
math_ops.equal(shape[2], beam_width)))
return control_flow_ops.Assert(condition, [error_message])
class BeamSearchDecoder(decoder.Decoder):
"""BeamSearch sampling decoder.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
"""
def __init__(self,
cell,
embedding,
start_tokens,
end_token,
initial_state,
beam_width,
output_layer=None,
length_penalty_weight=0.0,
reorder_tensor_arrays=True):
"""Initialize the BeamSearchDecoder.
Args:
cell: An `RNNCell` instance.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
beam_width: Python integer, the number of beams.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
reorder_tensor_arrays: If `True`, `TensorArray`s' elements within the cell
state will be reordered according to the beam search path. If the
`TensorArray` can be reordered, the stacked form will be returned.
Otherwise, the `TensorArray` will be returned as is. Set this flag to
`False` if the cell state contains `TensorArray`s that are not amenable
to reordering.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`,
or `output_layer` is not an instance of `tf.layers.Layer`.
ValueError: If `start_tokens` is not a vector or
`end_token` is not a scalar.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell) # pylint: disable=protected-access
if (output_layer is not None and
not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._output_layer = output_layer
self._reorder_tensor_arrays = reorder_tensor_arrays
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._batch_size = array_ops.size(start_tokens)
self._beam_width = beam_width
self._length_penalty_weight = length_penalty_weight
self._initial_cell_state = nest.map_structure(
self._maybe_split_batch_beams, initial_state, self._cell.state_size)
self._start_tokens = array_ops.tile(
array_ops.expand_dims(self._start_tokens, 1), [1, self._beam_width])
self._start_inputs = self._embedding_fn(self._start_tokens)
self._finished = array_ops.one_hot(
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=False,
off_value=True,
dtype=dtypes.bool)
@property
def batch_size(self):
return self._batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s), size)
layer_output_shape = self._output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def tracks_own_finished(self):
"""The BeamSearchDecoder shuffles its beams and their finished state.
For this reason, it conflicts with the `dynamic_decode` function's
tracking of finished states. Setting this property to true avoids
early stopping of decoding due to mismanagement of the finished state
in `dynamic_decode`.
Returns:
`True`.
"""
return True
@property
def output_size(self):
# Return the cell output and the id
return BeamSearchDecoderOutput(
scores=tensor_shape.TensorShape([self._beam_width]),
predicted_ids=tensor_shape.TensorShape([self._beam_width]),
parent_ids=tensor_shape.TensorShape([self._beam_width]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_cell_state)[0].dtype
return BeamSearchDecoderOutput(
scores=nest.map_structure(lambda _: dtype, self._rnn_output_size()),
predicted_ids=dtypes.int32,
parent_ids=dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, start_inputs, initial_state)`.
"""
finished, start_inputs = self._finished, self._start_inputs
dtype = nest.flatten(self._initial_cell_state)[0].dtype
log_probs = array_ops.one_hot( # shape(batch_sz, beam_sz)
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=ops.convert_to_tensor(0.0, dtype=dtype),
off_value=ops.convert_to_tensor(-np.Inf, dtype=dtype),
dtype=dtype)
initial_state = BeamSearchDecoderState(
cell_state=self._initial_cell_state,
log_probs=log_probs,
finished=finished,
lengths=array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.int64))
return (finished, start_inputs, initial_state)
def finalize(self, outputs, final_state, sequence_lengths):
"""Finalize and return the predicted_ids.
Args:
outputs: An instance of BeamSearchDecoderOutput.
final_state: An instance of BeamSearchDecoderState. Passed through to the
output.
sequence_lengths: An `int64` tensor shaped `[batch_size, beam_width]`.
The sequence lengths determined for each beam during decode.
**NOTE** These are ignored; the updated sequence lengths are stored in
`final_state.lengths`.
Returns:
outputs: An instance of `FinalBeamSearchDecoderOutput` where the
predicted_ids are the result of calling _gather_tree.
final_state: The same input instance of `BeamSearchDecoderState`.
"""
del sequence_lengths
# Get max_sequence_length across all beams for each batch.
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(final_state.lengths, axis=1))
predicted_ids = beam_search_ops.gather_tree(
outputs.predicted_ids,
outputs.parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=self._end_token)
if self._reorder_tensor_arrays:
final_state = final_state._replace(cell_state=nest.map_structure(
lambda t: self._maybe_sort_array_beams(
t, outputs.parent_ids, final_state.lengths),
final_state.cell_state))
outputs = FinalBeamSearchDecoderOutput(
beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
return outputs, final_state
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.as_shape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
static_batch_size = tensor_util.constant_value(self._batch_size)
batch_size_beam_width = (
None
if static_batch_size is None else static_batch_size * self._beam_width)
reshaped_t = array_ops.reshape(
t,
array_ops.concat(([self._batch_size * self._beam_width], t_shape[2:]),
0))
reshaped_t.set_shape(
(tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
def _split_batch_beams(self, t, s=None):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.TensorShape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
reshaped_t = array_ops.reshape(
t,
array_ops.concat(([self._batch_size, self._beam_width], t_shape[1:]),
0))
static_batch_size = tensor_util.constant_value(self._batch_size)
expected_reshaped_shape = tensor_shape.TensorShape(
[static_batch_size, self._beam_width]).concatenate(s)
if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?" %
(reshaped_t.shape, expected_reshaped_shape))
reshaped_t.set_shape(expected_reshaped_shape)
return reshaped_t
def _maybe_split_batch_beams(self, t, s):
"""Maybe splits the tensor from a batch by beams into a batch of beams.
We do this so that we can use nest and not run into problems with shapes.
Args:
t: `Tensor`, either scalar or shaped `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
If `t` is a matrix or higher order tensor, then the return value is
`t` reshaped to `[batch_size, beam_width] + s`. Otherwise `t` is
returned unchanged.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tensor_array_ops.TensorArray):
return t
_check_maybe(t)
if t.shape.ndims >= 1:
return self._split_batch_beams(t, s)
else:
return t
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, `t` is a tensor of dimension `[batch_size * beam_width] + s`,
then we reshape it to `[batch_size, beam_width] + s`.
Args:
t: `Tensor` of dimension `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
A reshaped version of t with shape `[batch_size, beam_width] + s`.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tensor_array_ops.TensorArray):
return t
_check_maybe(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
def _maybe_sort_array_beams(self, t, parent_ids, sequence_length):
"""Maybe sorts beams within a `TensorArray`.
Args:
t: A `TensorArray` of size `max_time` that contains `Tensor`s of shape
`[batch_size, beam_width, s]` or `[batch_size * beam_width, s]` where
`s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `TensorArray` where beams are sorted in each `Tensor` or `t` itself if
it is not a `TensorArray` or does not meet shape requirements.
"""
if not isinstance(t, tensor_array_ops.TensorArray):
return t
# pylint: disable=protected-access
if (not t._infer_shape or not t._element_shape
or t._element_shape[0].ndims is None
or t._element_shape[0].ndims < 1):
shape = (
t._element_shape[0] if t._infer_shape and t._element_shape
else tensor_shape.TensorShape(None))
tf_logging.warn("The TensorArray %s in the cell state is not amenable to "
"sorting based on the beam search result. For a "
"TensorArray to be sorted, its elements shape must be "
"defined and have at least a rank of 1, but saw shape: %s"
% (t.handle.name, shape))
return t
shape = t._element_shape[0]
# pylint: enable=protected-access
if not _check_static_batch_beam_maybe(
shape, tensor_util.constant_value(self._batch_size), self._beam_width):
return t
t = t.stack()
with ops.control_dependencies(
[_check_batch_beam(t, self._batch_size, self._beam_width)]):
return gather_tree_from_array(t, parent_ids, sequence_length)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
batch_size = self._batch_size
beam_width = self._beam_width
end_token = self._end_token
length_penalty_weight = self._length_penalty_weight
with ops.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
cell_state = state.cell_state
inputs = nest.map_structure(
lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]), inputs)
cell_state = nest.map_structure(self._maybe_merge_batch_beams, cell_state,
self._cell.state_size)
cell_outputs, next_cell_state = self._cell(inputs, cell_state)
cell_outputs = nest.map_structure(
lambda out: self._split_batch_beams(out, out.shape[1:]), cell_outputs)
next_cell_state = nest.map_structure(
self._maybe_split_batch_beams, next_cell_state, self._cell.state_size)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
beam_search_output, beam_search_state = _beam_search_step(
time=time,
logits=cell_outputs,
next_cell_state=next_cell_state,
beam_state=state,
batch_size=batch_size,
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight)
finished = beam_search_state.finished
sample_ids = beam_search_output.predicted_ids
next_inputs = control_flow_ops.cond(
math_ops.reduce_all(finished), lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (beam_search_output, beam_search_state, next_inputs, finished)
def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
beam_width, end_token, length_penalty_weight):
"""Performs a single step of Beam Search Decoding.
Args:
time: Beam search time step, should start at 0. At time 0 we assume
that all beams are equal and consider only the first beam for
continuations.
logits: Logits at the current time step. A tensor of shape
`[batch_size, beam_width, vocab_size]`
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
beam_state: Current state of the beam search.
An instance of `BeamSearchDecoderState`.
batch_size: The batch size for this input.
beam_width: Python int. The size of the beams.
end_token: The int32 end token.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
A new beam state.
"""
static_batch_size = tensor_util.constant_value(batch_size)
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
# Calculate the total log probs for the new hypotheses
# Final Shape: [batch_size, beam_width, vocab_size]
step_log_probs = nn_ops.log_softmax(logits)
step_log_probs = _mask_probs(step_log_probs, end_token, previously_finished)
total_probs = array_ops.expand_dims(beam_state.log_probs, 2) + step_log_probs
# Calculate the continuation lengths by adding to all continuing beams.
vocab_size = logits.shape[-1].value or array_ops.shape(logits)[-1]
lengths_to_add = array_ops.one_hot(
indices=array_ops.fill([batch_size, beam_width], end_token),
depth=vocab_size,
on_value=np.int64(0),
off_value=np.int64(1),
dtype=dtypes.int64)
add_mask = math_ops.to_int64(math_ops.logical_not(previously_finished))
lengths_to_add *= array_ops.expand_dims(add_mask, 2)
new_prediction_lengths = (
lengths_to_add + array_ops.expand_dims(prediction_lengths, 2))
# Calculate the scores for each beam
scores = _get_scores(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
length_penalty_weight=length_penalty_weight)
time = ops.convert_to_tensor(time, name="time")
# During the first time step we only consider the initial beam
scores_flat = array_ops.reshape(scores, [batch_size, -1])
# Pick the next beams according to the specified successors function
next_beam_size = ops.convert_to_tensor(
beam_width, dtype=dtypes.int32, name="beam_width")
next_beam_scores, word_indices = nn_ops.top_k(scores_flat, k=next_beam_size)
next_beam_scores.set_shape([static_batch_size, beam_width])
word_indices.set_shape([static_batch_size, beam_width])
# Pick out the probs, beam_ids, and states according to the chosen predictions
next_beam_probs = _tensor_gather_helper(
gather_indices=word_indices,
gather_from=total_probs,
batch_size=batch_size,
range_size=beam_width * vocab_size,
gather_shape=[-1],
name="next_beam_probs")
# Note: just doing the following
# math_ops.to_int32(word_indices % vocab_size,
# name="next_beam_word_ids")
# would be a lot cleaner but for reasons unclear, that hides the results of
# the op which prevents capturing it with tfdbg debug ops.
raw_next_word_ids = math_ops.mod(
word_indices, vocab_size, name="next_beam_word_ids")
next_word_ids = math_ops.to_int32(raw_next_word_ids)
next_beam_ids = math_ops.to_int32(
word_indices / vocab_size, name="next_beam_parent_ids")
# Append new ids to current predictions
previously_finished = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=previously_finished,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_finished = math_ops.logical_or(
previously_finished,
math_ops.equal(next_word_ids, end_token),
name="next_beam_finished")
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged.
# 2. Beams that are now finished (EOS predicted) have their length
# increased by 1.
# 3. Beams that are not yet finished have their length increased by 1.
lengths_to_add = math_ops.to_int64(math_ops.logical_not(previously_finished))
next_prediction_len = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=beam_state.lengths,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_prediction_len += lengths_to_add
# Pick out the cell_states according to the next_beam_ids. We use a
# different gather_shape here because the cell_state tensors, i.e.
# the tensors that would be gathered from, all have dimension
# greater than two and we need to preserve those dimensions.
# pylint: disable=g-long-lambda
next_cell_state = nest.map_structure(
lambda gather_from: _maybe_tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=gather_from,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1]),
next_cell_state)
# pylint: enable=g-long-lambda
next_state = BeamSearchDecoderState(
cell_state=next_cell_state,
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished)
output = BeamSearchDecoderOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
parent_ids=next_beam_ids)
return output, next_state
def _get_scores(log_probs, sequence_lengths, length_penalty_weight):
"""Calculates scores for beam search hypotheses.
Args:
log_probs: The log probabilities with shape
`[batch_size, beam_width, vocab_size]`.
sequence_lengths: The array of sequence lengths.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
The scores normalized by the length_penalty.
"""
length_penalty_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
return log_probs / length_penalty_
def _length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty. See https://arxiv.org/abs/1609.08144.
Returns the length penalty tensor:
```
[(5+sequence_lengths)/6]**penalty_factor
```
where all operations are performed element-wise.
Args:
sequence_lengths: `Tensor`, the sequence lengths of each hypotheses.
penalty_factor: A scalar that weights the length penalty.
Returns:
If the penalty is `0`, returns the scalar `1.0`. Otherwise returns
the length penalty factor, a tensor with the same shape as
`sequence_lengths`.
"""
penalty_factor = ops.convert_to_tensor(penalty_factor, name="penalty_factor")
penalty_factor.set_shape(()) # penalty should be a scalar.
static_penalty = tensor_util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return math_ops.div((5. + math_ops.to_float(sequence_lengths))
**penalty_factor, (5. + 1.)**penalty_factor)
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabilities of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=ops.convert_to_tensor(0., dtype=probs.dtype),
off_value=probs.dtype.min)
finished_probs = array_ops.tile(
array_ops.reshape(finished_row, [1, 1, -1]),
array_ops.concat([array_ops.shape(finished), [1]], 0))
finished_mask = array_ops.tile(
array_ops.expand_dims(finished, 2), [1, 1, vocab_size])
return array_ops.where(finished_mask, finished_probs, probs)
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Maybe applies _tensor_gather_helper.
This applies _tensor_gather_helper when the gather_from dims is at least as
big as the length of gather_shape. This is used in conjunction with nest so
that we don't apply _tensor_gather_helper to inapplicable values like scalars.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
or the original tensor if its dimensions are too small.
"""
if isinstance(gather_from, tensor_array_ops.TensorArray):
return gather_from
_check_maybe(gather_from)
if gather_from.shape.ndims >= len(gather_shape):
return _tensor_gather_helper(
gather_indices=gather_indices,
gather_from=gather_from,
batch_size=batch_size,
range_size=range_size,
gather_shape=gather_shape)
else:
return gather_from
def _tensor_gather_helper(gather_indices,
gather_from,
batch_size,
range_size,
gather_shape,
name=None):
"""Helper for gathering the right indices from the tensor.
This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
gathering from that according to the gather_indices, which are offset by
the right amounts in order to preserve the batch order.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The input batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
name: The tensor name for set of operations. By default this is
'tensor_gather_helper'. The final output is named 'output'.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
"""
with ops.name_scope(name, "tensor_gather_helper"):
range_ = array_ops.expand_dims(math_ops.range(batch_size) * range_size, 1)
gather_indices = array_ops.reshape(gather_indices + range_, [-1])
output = array_ops.gather(
array_ops.reshape(gather_from, gather_shape), gather_indices)
final_shape = array_ops.shape(gather_from)[:1 + len(gather_shape)]
static_batch_size = tensor_util.constant_value(batch_size)
final_static_shape = (
tensor_shape.TensorShape([static_batch_size]).concatenate(
gather_from.shape[1:1 + len(gather_shape)]))
output = array_ops.reshape(output, final_shape, name="output")
output.set_shape(final_static_shape)
return output
|
andrewsmedina/django
|
refs/heads/master
|
tests/get_earliest_or_latest/tests.py
|
8
|
from __future__ import absolute_import
from datetime import datetime
from django.test import TestCase
from .models import Article, Person
class EarliestOrLatestTests(TestCase):
"""Tests for the earliest() and latest() objects methods"""
def tearDown(self):
"""Makes sure Article has a get_latest_by"""
if not Article._meta.get_latest_by:
Article._meta.get_latest_by = 'pub_date'
def test_earliest(self):
# Because no Articles exist yet, earliest() raises ArticleDoesNotExist.
self.assertRaises(Article.DoesNotExist, Article.objects.earliest)
a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1)
)
a2 = Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 7, 28)
)
a3 = Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 8, 27)
)
a4 = Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 7, 30)
)
# Get the earliest Article.
self.assertEqual(Article.objects.earliest(), a1)
# Get the earliest Article that matches certain filters.
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).earliest(),
a2
)
# Pass a custom field name to earliest() to change the field that's used
# to determine the earliest object.
self.assertEqual(Article.objects.earliest('expire_date'), a2)
self.assertEqual(Article.objects.filter(
pub_date__gt=datetime(2005, 7, 26)).earliest('expire_date'), a2)
# Ensure that earliest() overrides any other ordering specified on the
# query. Refs #11283.
self.assertEqual(Article.objects.order_by('id').earliest(), a1)
# Ensure that error is raised if the user forgot to add a get_latest_by
# in the Model.Meta
Article.objects.model._meta.get_latest_by = None
self.assertRaisesMessage(
AssertionError,
"earliest() and latest() require either a field_name parameter or "
"'get_latest_by' in the model",
lambda: Article.objects.earliest(),
)
def test_latest(self):
# Because no Articles exist yet, latest() raises ArticleDoesNotExist.
self.assertRaises(Article.DoesNotExist, Article.objects.latest)
a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1)
)
a2 = Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 7, 28)
)
a3 = Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 8, 27)
)
a4 = Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 7, 30)
)
# Get the latest Article.
self.assertEqual(Article.objects.latest(), a4)
# Get the latest Article that matches certain filters.
self.assertEqual(
Article.objects.filter(pub_date__lt=datetime(2005, 7, 27)).latest(),
a1
)
# Pass a custom field name to latest() to change the field that's used
# to determine the latest object.
self.assertEqual(Article.objects.latest('expire_date'), a1)
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).latest('expire_date'),
a3,
)
# Ensure that latest() overrides any other ordering specified on the query. Refs #11283.
self.assertEqual(Article.objects.order_by('id').latest(), a4)
# Ensure that error is raised if the user forgot to add a get_latest_by
# in the Model.Meta
Article.objects.model._meta.get_latest_by = None
self.assertRaisesMessage(
AssertionError,
"earliest() and latest() require either a field_name parameter or "
"'get_latest_by' in the model",
lambda: Article.objects.latest(),
)
def test_latest_manual(self):
# You can still use latest() with a model that doesn't have
# "get_latest_by" set -- just pass in the field name manually.
p1 = Person.objects.create(name="Ralph", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(name="Stephanie", birthday=datetime(1960, 2, 3))
self.assertRaises(AssertionError, Person.objects.latest)
self.assertEqual(Person.objects.latest("birthday"), p2)
|
jaruba/chromium.src
|
refs/heads/nw12
|
build/toolchain/win/setup_toolchain.py
|
9
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import errno
import os
import re
import subprocess
import sys
"""
Copies the given "win tool" (which the toolchain uses to wrap compiler
invocations) and the environment blocks for the 32-bit and 64-bit builds on
Windows to the build directory.
The arguments are the visual studio install location and the location of the
win tool. The script assumes that the root build directory is the current dir
and the files will be written to the current directory.
"""
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _SetupScript(target_arch, sdk_dir):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
if bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', 1))) and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We only support x64-hosted tools.
# TODO(scottmg|dpranke): Non-depot_tools toolchain: need to get Visual
# Studio install location from registry.
return [os.path.normpath(os.path.join(os.environ['GYP_MSVS_OVERRIDE_PATH'],
'VC/vcvarsall.bat')),
'amd64_x86' if target_arch == 'x86' else 'amd64']
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _CopyTool(source_path):
"""Copies the given tool to the current directory, including a warning not
to edit it."""
with open(source_path) as source_file:
tool_source = source_file.readlines()
# Add header and write it out to the current directory (which should be the
# root build dir).
with open("gyp-win-tool", 'w') as tool_file:
tool_file.write(''.join([tool_source[0],
'# Generated by setup_toolchain.py do not edit.\n']
+ tool_source[1:]))
def main():
if len(sys.argv) != 6:
print('Usage setup_toolchain.py '
'<visual studio path> <win tool path> <win sdk path> '
'<runtime dirs> <cpu_arch>')
sys.exit(2)
vs_path = sys.argv[1]
tool_source = sys.argv[2]
win_sdk_path = sys.argv[3]
runtime_dirs = sys.argv[4]
cpu_arch = sys.argv[5]
_CopyTool(tool_source)
archs = ('x86', 'x64')
assert cpu_arch in archs
vc_bin_dir = ''
# TODO(scottmg|goma): Do we need an equivalent of
# ninja_use_custom_environment_files?
for arch in archs:
# Extract environment variables for subprocesses.
args = _SetupScript(arch, win_sdk_path)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
env['PATH'] = runtime_dirs + ';' + env['PATH']
if arch == cpu_arch:
for path in env['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, 'cl.exe')):
vc_bin_dir = os.path.realpath(path)
break
# The Windows SDK include directories must be first. They both have a sal.h,
# and the SDK one is newer and the SDK uses some newer features from it not
# present in the Visual Studio one.
if win_sdk_path:
additional_includes = ('{sdk_dir}\\Include\\shared;' +
'{sdk_dir}\\Include\\um;' +
'{sdk_dir}\\Include\\winrt;').format(
sdk_dir=win_sdk_path)
env['INCLUDE'] = additional_includes + env['INCLUDE']
env_block = _FormatAsEnvironmentBlock(env)
with open('environment.' + arch, 'wb') as f:
f.write(env_block)
assert vc_bin_dir
print 'vc_bin_dir = "%s"' % vc_bin_dir
if __name__ == '__main__':
main()
|
happy5214/pywikibot-core
|
refs/heads/master
|
scripts/welcome.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
u"""
Script to welcome new users.
This script works out of the box for Wikis that
have been defined in the script. It is currently used on the Dutch, Norwegian,
Albanian, Italian Wikipedia, Wikimedia Commons and English Wikiquote.
Ensure you have community support before running this bot!
Everything that needs customisation to support additional projects is
indicated by comments.
Description of basic functionality:
* Request a list of new users every period (default: 3600 seconds)
You can choose to break the script after the first check (see arguments)
* Check if new user has passed a threshold for a number of edits
(default: 1 edit)
* Optional: check username for bad words in the username or if the username
consists solely of numbers; log this somewhere on the wiki (default: False)
Update: Added a whitelist (explanation below).
* If user has made enough edits (it can be also 0), check if user has an empty
talk page
* If user has an empty talk page, add a welcome message.
* Optional: Once the set number of users have been welcomed, add this to the
configured log page, one for each day (default: True)
* If no log page exists, create a header for the log page first.
This script (by default not yet implemented) uses two templates that need to
be on the local wiki:
* {{WLE}}: contains mark up code for log entries (just copy it from Commons)
* {{welcome}}: contains the information for new users
This script understands the following command-line arguments:
-edit[:#] Define how many edits a new user needs to be welcomed
(default: 1, max: 50)
-time[:#] Define how many seconds the bot sleeps before restart
(default: 3600)
-break Use it if you don't want that the Bot restart at the end
(it will break) (default: False)
-nlog Use this parameter if you do not want the bot to log all
welcomed users (default: False)
-limit[:#] Use this parameter to define how may users should be
checked (default:50)
-offset[:TIME] Skip the latest new users (those newer than TIME)
to give interactive users a chance to welcome the
new users (default: now)
Timezone is the server timezone, GMT for Wikimedia
TIME format : yyyymmddhhmmss
-timeoffset[:#] Skip the latest new users, accounts newer than
# minutes
-numberlog[:#] The number of users to welcome before refreshing the
welcome log (default: 4)
-filter Enable the username checks for bad names (default: False)
-ask Use this parameter if you want to confirm each possible
bad username (default: False)
-random Use a random signature, taking the signatures from a wiki
page (for instruction, see below).
-file[:#] Use a file instead of a wikipage to take the random sign.
If you use this parameter, you don't need to use -random.
-sign Use one signature from command line instead of the default
-savedata This feature saves the random signature index to allow to
continue to welcome with the last signature used.
-sul Welcome the auto-created users (default: False)
-quiet Prevents users without contributions are displayed
********************************* GUIDE ***********************************
Report, Bad and white list guide:
1) Set in the code which page it will use to load the badword, the
whitelist and the report
2) In these page you have to add a "tuple" with the names that you want to
add in the two list. For example: ('cat', 'mouse', 'dog')
You can write also other text in the page, it will work without problem.
3) What will do the two pages? Well, the Bot will check if a badword is in
the username and set the "warning" as True. Then the Bot check if a word
of the whitelist is in the username. If yes it remove the word and
recheck in the bad word list to see if there are other badword in the
username.
Example:
* dio is a badword
* Claudio is a normal name
* The username is "Claudio90 fuck!"
* The Bot finds dio and sets "warning"
* The Bot finds Claudio and sets "ok"
* The Bot finds fuck at the end and sets "warning"
* Result: The username is reported.
4) When a user is reported you have to check him and do:
* If he's ok, put the {{welcome}}
* If he's not, block him
* You can decide to put a "you are blocked, change another username"
template or not.
* Delete the username from the page.
IMPORTANT : The Bot check the user in this order:
* Search if he has a talkpage (if yes, skip)
* Search if he's blocked, if yes he will be skipped
* Search if he's in the report page, if yes he will be skipped
* If no, he will be reported.
Random signature guide:
Some welcomed users will answer to the one who has signed the welcome message.
When you welcome many new users, you might be overwhelmed with such answers.
Therefore you can define usernames of other users who are willing to receive
some of these messages from newbies.
1) Set the page that the bot will load
2) Add the signatures in this way:
*<SPACE>SIGNATURE
<NEW LINE>
Example:
<pre>
* [[User:Filnik|Filnik]]
* [[User:Rock|Rock]]
</pre>
NOTE: The white space and <pre></pre> aren't required but I suggest you to
use them.
******************************** Badwords **********************************
The list of Badwords of the code is opened. If you think that a word is
international and it must be blocked in all the projects feel free to add it.
If also you think that a word isn't so international, feel free to delete it.
However, there is a dinamic-wikipage to load that badwords of your project or
you can add them directly in the source code that you are using without adding
or deleting.
Some words, like "Administrator" or "Dio" (God in italian) or "Jimbo" aren't
badwords at all but can be used for some bad-nickname.
"""
#
# (C) Alfio, 2005
# (C) Kyle/Orgullomoore, 2006-2007
# (C) Siebrand Mazeland, 2006-2009
# (C) Filnik, 2007-2011
# (C) Daniel Herding, 2007
# (C) Alex Shih-Han Lin, 2009-2010
# (C) xqt, 2009-2017
# (C) Pywikibot team, 2008-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import codecs
from datetime import timedelta
import locale
import re
import sys
import time
from random import choice
import pywikibot
from pywikibot import config, i18n
from pywikibot.tools.formatter import color_format
from pywikibot.tools import issue_deprecation_warning, UnicodeType
locale.setlocale(locale.LC_ALL, '')
# Script uses the method i18n.translate() to find the right
# page/user/summary/etc so the need to specify language and project have
# been eliminated.
# FIXME: Not all language/project combinations have been defined yet.
# Add the following strings to customise for a language:
# logbook, netext, report_page, bad_pag, report_text, random_sign,
# whitelist_pg, final_new_text_additions, logpage_header
############################################################################
# The page where the bot will save the log (e.g. Wikipedia:Welcome log).
#
# ATTENTION: Projects not listed won't write a log to the wiki.
logbook = {
'ar': u'Project:سجل الترحيب',
'fr': ('Wikipedia:Prise de décision/'
'Accueil automatique des nouveaux par un robot/log'),
'ga': u'Project:Log fáilte',
'it': u'Project:Benvenuto Bot/Log',
'ja': u'利用者:Alexbot/Welcomebotログ',
'nl': u'Project:Logboek welkom',
'no': u'Project:Velkomstlogg',
'sq': u'Project:Tung log',
'sr': u'Project:Добродошлице',
'zh': u'User:Welcomebot/欢迎日志',
'commons': u'Project:Welcome log',
}
# The text for the welcome message (e.g. {{welcome}}) and %s at the end
# that is your signature (the bot has a random parameter to add different
# sign, so in this way it will change according to your parameters).
netext = {
'commons': '{{subst:welcome}} %s',
'wikipedia': {
'am': u'{{subst:Welcome}} %s',
'ar': u'{{subst:ترحيب}} %s',
'ba': '{{Hello}} %s',
'bn': u'{{subst:স্বাগতম/বট}} %s',
'da': u'{{velkommen|%s}}',
'en': u'{{subst:welcome}} %s',
'fa': u'{{جا:خوشامد}} %s',
'fr': u'{{subst:Discussion Projet:Aide/Bienvenue}} %s',
'ga': u'{{subst:fáilte}} %s',
'he': u'{{ס:ברוך הבא}} %s',
'id': u'{{subst:sdbot2}}\n%s',
'it': u'<!-- inizio template di benvenuto -->\n{{subst:Benvebot}}\n%s',
'ja': u'{{subst:Welcome/intro}}\n{{subst:welcome|%s}}',
'ka': u'{{ახალი მომხმარებელი}}--%s',
'kn': '{{subst:ಸುಸ್ವಾಗತ}} %s',
'ml': u'{{ബദൽ:സ്വാഗതം/bot}} %s',
'nap': u'{{Bemmenuto}}%s',
'nl': u'{{hola|bot|%s}}',
'no': u'{{subst:bruker:jhs/vk}} %s',
'pdc': u'{{subst:Wilkum}}%s',
'pt': u'{{subst:bem vindo}} %s',
'roa-tara': u'{{Bovègne}} %s',
'ru': u'{{Hello}} %s',
'sq': u'{{subst:tung}} %s',
'sr': u'{{Добродошлица}} %s',
'vec': u'{{subst:Benvegnù|%s}}',
'vo': u'{{benokömö}} %s',
'zh': u'{{subst:welcome|sign=%s}}',
'zh-yue': u'{{歡迎}}--%s',
},
'wikinews': {
'ar': '{{subst:ترحيب}} %s',
'fa': u'{{خوشامد۲|%s}}',
'it': u'{{subst:benvenuto}}',
'zh': u'{{subst:welcome}} %s',
},
'wiktionary': {
'ar': u'{{subst:ترحيب}} %s',
'bn': u'{{subst:স্বাগতম|%s}}',
'fa': u'{{جا:خوشامد|%s}}',
'it': u'{{subst:Utente:Filnik/Benve|firma=%s}}',
},
'wikiversity': {
'ar': u'{{subst:ترحيب}} %s',
'de': u'{{subst:Willkommen|%s}}',
'el': u'{{subst:καλωσόρισμα}} %s',
'en': u'{{subst:Welcome}}\n\n{{subst:Talktome}} %s',
'es': u'{{subst:bienvenido usuario}} %s',
'fr': u'{{Bienvenue}} %s',
'it': u'{{subst:Benvenuto}} %s',
},
}
# The page where the bot will report users with a possibly bad username.
report_page = {
'commons': ("Project:Administrators'noticeboard/User problems/Usernames"
"to be checked"),
'wikipedia': {
'am': u'User:Beria/Report',
'ar': 'Project:إخطار الإداريين/أسماء مستخدمين للفحص',
'da': u'Bruger:Broadbot/Report',
'en': u'Project:Administrator intervention against vandalism',
'fa': u'Project:تابلوی اعلانات مدیران/گزارش ربات',
'ga': u'Project:Log fáilte/Drochainmneacha',
'it': u'Project:Benvenuto_Bot/Report',
'ja': u'利用者:Alexbot/report',
'nl': ('Project:Verzoekpagina voor moderatoren'
'/RegBlok/Te controleren gebruikersnamen'),
'no': u'Bruker:JhsBot II/Rapport',
'pdc': u'Benutzer:Xqt/Report',
'ru': u'Участник:LatitudeBot/Рапорт',
'sq': u'User:EagleBot/Report',
'sr': u'User:SashatoBot/Записи',
'zh': u'User:Welcomebot/report',
'zh-yue': u'User:Alexbot/report',
}
}
# The page where the bot reads the real-time bad words page
# (this parameter is optional).
bad_pag = {
'commons': 'Project:Welcome log/Bad_names',
'wikipedia': {
'am': u'User:Beria/Bad_names',
'ar': u'Project:سجل الترحيب/أسماء سيئة',
'en': u'Project:Welcome log/Bad_names',
'fa': u'Project:سیاهه خوشامد/نام بد',
'it': u'Project:Benvenuto_Bot/Lista_Badwords',
'ja': u'Project:不適切な名前の利用者',
'nl': u'Project:Logboek_welkom/Bad_names',
'no': u'Bruker:JhsBot/Daarlige ord',
'ru': u'Участник:LatitudeBot/Чёрный список',
'sq': u'User:Eagleal/Bad_names',
'sr': u'Додавање корисника за проверу',
'zh': u'User:Welcomebot/badname',
'zh-yue': u'User:Welcomebot/badname',
}
}
timeselected = u' ~~~~~' # Defining the time used after the signature
# The text for reporting a possibly bad username
# e.g. *[[Talk_page:Username|Username]]).
report_text = {
'commons': '\n*{{user3|%s}}' + timeselected,
'wikipedia': {
'am': u"\n*[[User talk:%s]]" + timeselected,
'ar': u"\n*{{user13|%s}}" + timeselected,
'da': u'\n*[[Bruger Diskussion:%s]] ' + timeselected,
'de': u'\n*[[Benutzer Diskussion:%s]] ' + timeselected,
'en': u'\n*{{Userlinks|%s}} ' + timeselected,
'fa': u'\n*{{کاربر|%s}}' + timeselected,
'fr': u'\n*{{u|%s}} ' + timeselected,
'ga': u'\n*[[Plé úsáideora:%s]] ' + timeselected,
'it': u"\n{{Reported|%s|",
'ja': u"\n*{{User2|%s}}" + timeselected,
'nl': u'\n*{{linkgebruiker%s}} ' + timeselected,
'no': u'\n*{{bruker|%s}} ' + timeselected,
'pdc': u'\n*[[Benutzer Diskussion:%s]] ' + timeselected,
'sq': u'\n*[[User:%s]] ' + timeselected,
'zh': u"\n*{{User|%s}}" + timeselected
}
}
# Set where you load your list of signatures that the bot will load if you use
# the random argument (this parameter is optional).
random_sign = {
'am': u'User:Beria/Signatures',
'ar': u'Project:سجل الترحيب/توقيعات',
'ba': 'Ҡатнашыусы:Salamat bot/Ярҙам',
'da': u'Wikipedia:Velkommen/Signaturer',
'en': u'Project:Welcome log/Sign',
'fa': u'Project:سیاهه خوشامد/امضاها',
'fr': u'Projet:Service de Parrainage Actif/Signatures',
'it': u'Project:Benvenuto_Bot/Firme',
# jawiki: Don't localize. Community discussion oppose to this feature
# [[ja:Wikipedia:Bot作業依頼/ウェルカムメッセージ貼り付け依頼]]
'nap': u'User:Cellistbot/Firme',
'roa-tara': u'Wikipedia:Bovègne Bot/Firme',
'ru': u'Участник:LatitudeBot/Sign',
'vec': u'Utente:FriBot/Firme',
'zh': u'User:Welcomebot/欢迎日志/用户',
}
# The page where the bot reads the real-time whitelist page.
# (this parameter is optional).
whitelist_pg = {
'ar': u'Project:سجل الترحيب/قائمة بيضاء',
'en': u'User:Filnik/whitelist',
'ga': u'Project:Log fáilte/Bánliosta',
'it': u'Project:Benvenuto_Bot/Lista_Whitewords',
'ru': u'Участник:LatitudeBot/Белый_список',
}
# Text after the {{welcome}} template, if you want to add something
# Default (en): nothing.
final_new_text_additions = {
'it': u'\n<!-- fine template di benvenuto -->',
'zh': '<small>(via ~~~)</small>',
}
#
#
logpage_header = {
'_default': u'{|border="2" cellpadding="4" cellspacing="0" style="margin: '
u'0.5em 0.5em 0.5em 1em; padding: 0.5em; background: #bfcda5; '
u'border: 1px #b6fd2c solid; border-collapse: collapse; '
u'font-size: 95%;"',
'no': u'[[Kategori:Velkomstlogg|{{PAGENAME}}]]\n{| class="wikitable"',
'it': u'[[Categoria:Benvenuto log|{{subst:PAGENAME}}]]\n{|border="2" '
u'cellpadding="4" cellspacing="0" style="margin: 0.5em 0.5em 0.5em '
u'1em; padding: 0.5em; background: #bfcda5; border: 1px #b6fd2c '
u'solid; border-collapse: collapse; font-size: 95%;"'
}
# Ok, that's all. What is below, is the rest of code, now the code is fixed
# and it will run correctly in your project ;)
############################################################################
class FilenameNotSet(pywikibot.Error):
"""An exception indicating that a signature filename was not specifed."""
class Global(object):
"""Container class for global settings."""
attachEditCount = 1 # edit count that an user required to be welcomed
dumpToLog = 15 # number of users that are required to add the log
offset = None # skip users newer than that timestamp
timeoffset = 0 # skip users newer than # minutes
recursive = True # define if the Bot is recursive or not
timeRecur = 3600 # how much time (sec.) the bot waits before restart
makeWelcomeLog = True # create the welcome log or not
confirm = False # should bot ask to add user to bad-username list
welcomeAuto = False # should bot welcome auto-created users
filtBadName = False # check if the username is ok or not
randomSign = False # should signature be random or not
saveSignIndex = False # should save the signature index or not
signFileName = None # File name, default: None
defaultSign = '--~~~~' # default signature
queryLimit = 50 # number of users that the bot load to check
quiet = False # Users without contributions aren't displayed
class WelcomeBot(object):
"""Bot to add welcome messages on User pages."""
def __init__(self):
"""Constructor."""
self.site = pywikibot.Site()
self.check_managed_sites()
self.bname = {}
self._totallyCount = 0
self.welcomed_users = []
if globalvar.randomSign:
self.defineSign(True)
def check_managed_sites(self):
"""Check that site is managed by welcome.py."""
# Raises KeyError if site is not in netext dict.
site_netext = i18n.translate(self.site, netext)
if site_netext is None:
raise KeyError(
'welcome.py is not localized for site {0} in netext dict.'
.format(self.site))
self.welcome_text = site_netext
def badNameFilter(self, name, force=False):
"""Check for bad names."""
if not globalvar.filtBadName:
return False
# initialize blacklist
if not hasattr(self, '_blacklist') or force:
elenco = [
' ano', ' anus', 'anal ', 'babies', 'baldracca', 'balle',
'bastardo', 'bestiali', 'bestiale', 'bastarda', 'b.i.t.c.h.',
'bitch', 'boobie', 'bordello', 'breast', 'cacata', 'cacca',
'cachapera', 'cagata', 'cane', 'cazz', 'cazzo', 'cazzata',
'chiavare', 'chiavata', 'chick', 'christ ', 'cristo',
'clitoride', 'coione', 'cojdioonear', 'cojones', 'cojo',
'coglione', 'coglioni', 'cornuto', 'cula', 'culatone',
'culattone', 'culo', 'deficiente', 'deficente', 'dio', 'die ',
'died ', 'ditalino', 'ejackulate', 'enculer', 'eroticunt',
'fanculo', 'fellatio', 'fica ', 'ficken', 'figa', 'sfiga',
'fottere', 'fotter', 'fottuto', 'fuck', 'f.u.c.k.', 'funkyass',
'gay', 'hentai.com', 'horne', 'horney', 'virgin', 'hotties',
'idiot', '@alice.it', 'incest', 'jesus', 'gesu', 'gesù',
'kazzo', 'kill', 'leccaculo', 'lesbian', 'lesbica', 'lesbo',
'masturbazione', 'masturbare', 'masturbo', 'merda', 'merdata',
'merdoso', 'mignotta', 'minchia', 'minkia', 'minchione',
'mona', 'nudo', 'nuda', 'nudi', 'oral', 'sex', 'orgasmso',
'porc', 'pompa', 'pompino', 'porno', 'puttana', 'puzza',
'puzzone', 'racchia', 'sborone', 'sborrone', 'sborata',
'sborolata', 'sboro', 'scopata', 'scopare', 'scroto',
'scrotum', 'sega', 'sesso', 'shit', 'shiz', 's.h.i.t.',
'sadomaso', 'sodomist', 'stronzata', 'stronzo', 'succhiamelo',
'succhiacazzi', 'testicol', 'troia', 'universetoday.net',
'vaffanculo', 'vagina', 'vibrator', 'vacca', 'yiddiot',
'zoccola',
]
elenco_others = [
'@', ".com", ".sex", ".org", ".uk", ".en", ".it", "admin",
"administrator", "amministratore", '@yahoo.com', '@alice.com',
'amministratrice', 'burocrate', 'checkuser', 'developer',
'http://', 'jimbo', 'mediawiki', 'on wheals', 'on wheal',
'on wheel', 'planante', 'razinger', 'sysop', 'troll', 'vandal',
' v.f. ', 'v. fighter', 'vandal f.', 'vandal fighter',
'wales jimmy', 'wheels', 'wales', 'www.',
]
# blacklist from wikipage
badword_page = pywikibot.Page(self.site,
i18n.translate(self.site,
bad_pag))
list_loaded = []
if badword_page.exists():
pywikibot.output(u'\nLoading the bad words list from %s...'
% self.site)
list_loaded = load_word_function(badword_page.get())
else:
showStatus(4)
pywikibot.output(u'The bad word page doesn\'t exist!')
self._blacklist = elenco + elenco_others + list_loaded
del elenco, elenco_others, list_loaded
if not hasattr(self, '_whitelist') or force:
# initialize whitelist
whitelist_default = ['emiliano']
wtlpg = i18n.translate(self.site, whitelist_pg)
list_white = []
if wtlpg:
whitelist_page = pywikibot.Page(self.site, wtlpg)
if whitelist_page.exists():
pywikibot.output(u'\nLoading the whitelist from %s...'
% self.site)
list_white = load_word_function(whitelist_page.get())
else:
showStatus(4)
pywikibot.output(u"The whitelist's page doesn't exist!")
else:
showStatus(4)
pywikibot.warning(u"The whitelist hasn't been setted!")
# Join the whitelist words.
self._whitelist = list_white + whitelist_default
del list_white, whitelist_default
try:
for wname in self._whitelist:
if wname.lower() in str(name).lower():
name = name.lower().replace(wname.lower(), '')
for bname in self._blacklist:
self.bname[name] = bname
return bname.lower() in name.lower()
except UnicodeEncodeError:
pass
try:
for bname in self._blacklist:
if bname.lower() in str(name).lower(): # bad name positive
self.bname[name] = bname
return True
except UnicodeEncodeError:
pass
return False
def reportBadAccount(self, name=None, final=False):
"""Report bad account."""
# Queue process
if name:
if globalvar.confirm:
answer = pywikibot.input_choice(
u'%s may have an unwanted username, do you want to report '
u'this user?' % name,
[('Yes', 'y'), ('No', 'n'), ('All', 'a')], 'n',
automatic_quit=False)
if answer in ['a', 'all']:
answer = 'y'
globalvar.confirm = False
else:
answer = 'y'
if answer.lower() in ['yes', 'y'] or not globalvar.confirm:
showStatus()
pywikibot.output(
'%s is possibly an unwanted username. It will be reported.'
% name)
if hasattr(self, '_BAQueue'):
self._BAQueue.append(name)
else:
self._BAQueue = [name]
if len(self._BAQueue) >= globalvar.dumpToLog or final:
rep_text = ''
# name in queue is max, put detail to report page
pywikibot.output("Updating badname accounts to report page...")
rep_page = pywikibot.Page(self.site,
i18n.translate(self.site,
report_page))
if rep_page.exists():
text_get = rep_page.get()
else:
text_get = ('This is a report page for the Bad-username, '
'please translate me. --~~~')
pos = 0
# The talk page includes "_" between the two names, in this way
# replace them to " ".
for usrna in self._BAQueue:
username = pywikibot.url2link(usrna, self.site, self.site)
n = re.compile(re.escape(username), re.UNICODE)
y = n.search(text_get, pos)
if y:
pywikibot.output(u'%s is already in the report page.'
% username)
else:
# Adding the log.
rep_text += i18n.translate(self.site,
report_text) % username
if self.site.code == 'it':
rep_text = "%s%s}}" % (rep_text, self.bname[username])
com = i18n.twtranslate(self.site, 'welcome-bad_username')
if rep_text != '':
rep_page.put(text_get + rep_text, summary=com, force=True,
minorEdit=True)
showStatus(5)
pywikibot.output(u'Reported')
self.BAQueue = []
else:
return True
def makelogpage(self, queue=None):
"""Make log page."""
if queue is None:
queue = []
if not globalvar.makeWelcomeLog or len(queue) == 0:
return
text = u''
logg = i18n.translate(self.site, logbook)
if not logg:
return
target = logg + '/' + time.strftime('%Y/%m/%d',
time.localtime(time.time()))
if self.site.code == 'it':
target = logg + '/' + time.strftime('%d/%m/%Y',
time.localtime(time.time()))
logPage = pywikibot.Page(self.site, target)
if logPage.exists():
text = logPage.get()
else:
# make new log page
showStatus()
pywikibot.output(
'Log page is not exist, getting information for page creation')
text = i18n.translate(self.site, logpage_header,
fallback=i18n.DEFAULT_FALLBACK)
text += u'\n!%s' % self.site.namespace(2)
text += u'\n!%s' % str.capitalize(
self.site.mediawiki_message('contribslink'))
for result in queue:
# Adding the log... (don't take care of the variable's name...).
luser = pywikibot.url2link(result.username, self.site, self.site)
text += u'\n{{WLE|user=%s|contribs=%d}}' % (
luser, result.editCount())
# update log page.
while True:
try:
logPage.put(text, i18n.twtranslate(self.site,
'welcome-updating'))
return True
except pywikibot.EditConflict:
pywikibot.output(u'An edit conflict has occurred. Pausing for '
u'10 seconds before continuing.')
time.sleep(10)
def parseNewUserLog(self):
"""Retrieve new users."""
if globalvar.timeoffset != 0:
start = self.site.server_time() - timedelta(
minutes=globalvar.timeoffset)
else:
start = globalvar.offset
for ue in self.site.logevents('newusers', total=globalvar.queryLimit,
start=start):
if ue.action == 'create' or (
ue.action == 'autocreate' and globalvar.welcomeAuto):
yield pywikibot.User(ue.page())
def defineSign(self, force=False):
"""Setup signature."""
if hasattr(self, '_randomSignature') and not force:
return self._randomSignature
signText = u''
creg = re.compile(r"^\* ?(.*?)$", re.M)
if not globalvar.signFileName:
signPageName = i18n.translate(self.site, random_sign)
if not signPageName:
showStatus(4)
pywikibot.output(
"%s doesn't allow random signature, force disable."
% self.site)
globalvar.randomSign = False
return
signPage = pywikibot.Page(self.site, signPageName)
if signPage.exists():
pywikibot.output('Loading signature list...')
signText = signPage.get()
else:
pywikibot.output('The signature list page does not exist, '
'random signature will be disabled.')
globalvar.randomSign = False
else:
try:
f = codecs.open(
pywikibot.config.datafilepath(globalvar.signFileName), 'r',
encoding=config.console_encoding)
except LookupError:
f = codecs.open(pywikibot.config.datafilepath(
globalvar.signFileName), 'r', encoding='utf-8')
except IOError:
pywikibot.error(u'No fileName!')
raise FilenameNotSet("No signature filename specified.")
signText = f.read()
f.close()
self._randomSignature = creg.findall(signText)
return self._randomSignature
def run(self):
"""Run the bot."""
while True:
welcomed_count = 0
for users in self.parseNewUserLog():
if users.isBlocked():
showStatus(3)
pywikibot.output('%s has been blocked!' % users.username)
continue
if 'bot' in users.groups():
showStatus(3)
pywikibot.output('%s is a bot!' % users.username)
continue
if 'bot' in users.username.lower():
showStatus(3)
pywikibot.output(u'%s might be a global bot!'
% users.username)
continue
if users.editCount() >= globalvar.attachEditCount:
showStatus(2)
pywikibot.output(u'%s has enough edits to be welcomed.'
% users.username)
ustp = users.getUserTalkPage()
if ustp.exists():
showStatus(3)
pywikibot.output(u'%s has been already welcomed.'
% users.username)
continue
else:
if self.badNameFilter(users.username):
self.reportBadAccount(users.username)
continue
welcome_text = self.welcome_text
if globalvar.randomSign:
if self.site.family.name != 'wikinews':
welcome_text = (welcome_text
% choice(self.defineSign()))
if self.site.family.name == 'wiktionary' and \
self.site.code == 'it':
pass
else:
welcome_text += timeselected
elif (self.site.family.name != 'wikinews' and
self.site.code != 'it'):
welcome_text = (welcome_text
% globalvar.defaultSign)
final_text = i18n.translate(
self.site, final_new_text_additions)
if final_text:
welcome_text += final_text
welcome_comment = i18n.twtranslate(self.site,
'welcome-welcome')
try:
# append welcomed, welcome_count++
ustp.put(welcome_text, welcome_comment,
minorEdit=False)
welcomed_count += 1
self._totallyCount += 1
self.welcomed_users.append(users)
except pywikibot.EditConflict:
showStatus(4)
pywikibot.output(u'An edit conflict has occurred, '
u'skipping this user.')
if globalvar.makeWelcomeLog and \
i18n.translate(self.site, logbook):
showStatus(5)
if welcomed_count == 1:
pywikibot.output(u'One user has been welcomed.')
elif welcomed_count == 0:
pywikibot.output(u'No users have been welcomed.')
else:
pywikibot.output(u'%s users have been welcomed.'
% welcomed_count)
if welcomed_count >= globalvar.dumpToLog:
if self.makelogpage(self.welcomed_users):
self.welcomed_users = []
welcomed_count = 0
else:
continue
# If we haven't to report, do nothing.
else:
if users.editCount() == 0:
if not globalvar.quiet:
showStatus(1)
pywikibot.output(u'%s has no contributions.'
% users.username)
else:
showStatus(1)
pywikibot.output(u'%s has only %d contributions.'
% (users.username, users.editCount()))
# That user mustn't be welcomed.
continue
if globalvar.makeWelcomeLog and i18n.translate(
self.site, logbook) and welcomed_count > 0:
showStatus()
if welcomed_count == 1:
pywikibot.output(u'Putting the log of the latest user...')
else:
pywikibot.output(
u'Putting the log of the latest %d users...'
% welcomed_count)
if not self.makelogpage(self.welcomed_users):
continue
self.welcomed_users = []
if hasattr(self, '_BAQueue'):
showStatus()
pywikibot.output("Putting bad name to report page....")
self.reportBadAccount(None, final=True)
try:
if globalvar.recursive:
showStatus()
if locale.getlocale()[1]:
strfstr = time.strftime(
'%d %b %Y %H:%M:%S (UTC)', time.gmtime())
# py2-py3 compatibility
if not isinstance(strfstr, UnicodeType):
strfstr = strfstr.decode(locale.getlocale()[1])
else:
strfstr = time.strftime(
u"%d %b %Y %H:%M:%S (UTC)", time.gmtime())
pywikibot.output(u'Sleeping %d seconds before rerun. %s'
% (globalvar.timeRecur, strfstr))
pywikibot.stopme()
time.sleep(globalvar.timeRecur)
else:
raise KeyboardInterrupt
except KeyboardInterrupt:
break
def showStatus(n=0):
"""Output colorized status."""
staColor = {
0: 'lightpurple',
1: 'lightaqua',
2: 'lightgreen',
3: 'lightyellow',
4: 'lightred',
5: 'lightblue'
}
staMsg = {
0: 'MSG',
1: 'NoAct',
2: 'Match',
3: 'Skip',
4: 'Warning',
5: 'Done',
}
pywikibot.output(color_format('{color}[{0:5}]{default} ',
staMsg[n], color=staColor[n]), newline=False)
def load_word_function(raw):
"""Load the badword list and the whitelist."""
page = re.compile(r"(?:\"|\')(.*?)(?:\"|\')(?:, |\))", re.UNICODE)
list_loaded = page.findall(raw)
if len(list_loaded) == 0:
pywikibot.output(u'There was no input on the real-time page.')
return list_loaded
globalvar = Global()
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
for arg in pywikibot.handle_args(args):
arg, sep, val = arg.partition(':')
if arg == '-edit':
globalvar.attachEditCount = int(val or pywikibot.input(
'After how many edits would you like to welcome new users? '
'(0 is allowed)'))
elif arg == '-timeoffset':
globalvar.timeoffset = int(val or pywikibot.input(
'Which time offset (in minutes) for new users would you like '
'to use?'))
elif arg == '-time':
globalvar.timeRecur = int(val or pywikibot.input(
'For how many seconds would you like to bot to sleep before '
'checking again?'))
elif arg == '-offset':
if not val:
val = pywikibot.input(
'Which time offset for new users would you like to use? '
'(yyyymmddhhmmss)')
try:
globalvar.offset = pywikibot.Timestamp.fromtimestampformat(val)
except ValueError:
# upon request, we could check for software version here
raise ValueError(
"Mediawiki has changed, -offset:# is not supported "
"anymore, but -offset:TIMESTAMP is, assuming TIMESTAMP "
"is yyyymmddhhmmss. -timeoffset is now also supported. "
"Please read this script source header for documentation.")
elif arg == '-file':
globalvar.randomSign = True
globalvar.signFileName = val or pywikibot.input(
'Where have you saved your signatures?')
elif arg == '-sign':
globalvar.defaultSign = val or pywikibot.input(
'Which signature to use?')
globalvar.defaultSign += timeselected
elif arg == '-break':
globalvar.recursive = False
elif arg == '-nlog':
globalvar.makeWelcomeLog = False
elif arg == '-ask':
globalvar.confirm = True
elif arg == '-filter':
globalvar.filtBadName = True
elif arg == '-savedata':
globalvar.saveSignIndex = True
elif arg == '-random':
globalvar.randomSign = True
elif arg == '-sul':
globalvar.welcomeAuto = True
elif arg == '-limit':
globalvar.queryLimit = int(val or pywikibot.input(
u'How many of the latest new users would you like to load?'))
elif arg == '-numberlog':
globalvar.dumpToLog = int(val or pywikibot.input(
'After how many welcomed users would you like to update the '
'welcome log?'))
elif arg == '-quiet':
globalvar.quiet = True
elif arg == '-quick':
issue_deprecation_warning('The usage of "-quick" option', None, 2)
# Filename and Pywikibot path
# file where is stored the random signature index
filename = pywikibot.config.datafilepath('welcome-%s-%s.data'
% (pywikibot.Site().family.name,
pywikibot.Site().code))
if globalvar.offset and globalvar.timeoffset:
pywikibot.warning(
'both -offset and -timeoffset were provided, ignoring -offset')
globalvar.offset = 0
try:
bot = WelcomeBot()
except KeyError as error:
# site not managed by welcome.py
pywikibot.bot.suggest_help(exception=error)
return False
try:
bot.run()
except KeyboardInterrupt:
if bot.welcomed_users:
showStatus()
pywikibot.output("Put welcomed users before quit...")
bot.makelogpage(bot.welcomed_users)
pywikibot.output("\nQuitting...")
finally:
# If there is the savedata, the script must save the number_user.
if globalvar.randomSign and globalvar.saveSignIndex and \
bot.welcomed_users:
if sys.version_info[0] > 2:
import pickle as cPickle
else:
import cPickle
with open(filename, 'wb') as f:
cPickle.dump(bot.welcomed_users, f,
protocol=config.pickle_protocol)
if __name__ == "__main__":
main()
|
tommyip/zulip
|
refs/heads/master
|
analytics/urls.py
|
4
|
from django.conf.urls import include, url
import analytics.views
from zerver.lib.rest import rest_dispatch
i18n_urlpatterns = [
# Server admin (user_profile.is_staff) visible stats pages
url(r'^activity$', analytics.views.get_activity,
name='analytics.views.get_activity'),
url(r'^activity/support$', analytics.views.support,
name='analytics.views.support'),
url(r'^realm_activity/(?P<realm_str>[\S]+)/$', analytics.views.get_realm_activity,
name='analytics.views.get_realm_activity'),
url(r'^user_activity/(?P<email>[\S]+)/$', analytics.views.get_user_activity,
name='analytics.views.get_user_activity'),
url(r'^stats/realm/(?P<realm_str>[\S]+)/$', analytics.views.stats_for_realm,
name='analytics.views.stats_for_realm'),
url(r'^stats/installation$', analytics.views.stats_for_installation,
name='analytics.views.stats_for_installation'),
url(r'^stats/remote/(?P<remote_server_id>[\S]+)/installation$',
analytics.views.stats_for_remote_installation,
name='analytics.views.stats_for_remote_installation'),
url(r'^stats/remote/(?P<remote_server_id>[\S]+)/realm/(?P<remote_realm_id>[\S]+)/$',
analytics.views.stats_for_remote_realm,
name='analytics.views.stats_for_remote_realm'),
# User-visible stats page
url(r'^stats$', analytics.views.stats,
name='analytics.views.stats'),
]
# These endpoints are a part of the API (V1), which uses:
# * REST verbs
# * Basic auth (username:password is email:apiKey)
# * Takes and returns json-formatted data
#
# See rest_dispatch in zerver.lib.rest for an explanation of auth methods used
#
# All of these paths are accessed by either a /json or /api prefix
v1_api_and_json_patterns = [
# get data for the graphs at /stats
url(r'^analytics/chart_data$', rest_dispatch,
{'GET': 'analytics.views.get_chart_data'}),
url(r'^analytics/chart_data/realm/(?P<realm_str>[\S]+)$', rest_dispatch,
{'GET': 'analytics.views.get_chart_data_for_realm'}),
url(r'^analytics/chart_data/installation$', rest_dispatch,
{'GET': 'analytics.views.get_chart_data_for_installation'}),
url(r'^analytics/chart_data/remote/(?P<remote_server_id>[\S]+)/installation$', rest_dispatch,
{'GET': 'analytics.views.get_chart_data_for_remote_installation'}),
url(r'^analytics/chart_data/remote/(?P<remote_server_id>[\S]+)/realm/(?P<remote_realm_id>[\S]+)$',
rest_dispatch,
{'GET': 'analytics.views.get_chart_data_for_remote_realm'}),
]
i18n_urlpatterns += [
url(r'^api/v1/', include(v1_api_and_json_patterns)),
url(r'^json/', include(v1_api_and_json_patterns)),
]
urlpatterns = i18n_urlpatterns
|
BuildingLink/sentry
|
refs/heads/master
|
tests/sentry/interfaces/test_contexts.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.interfaces.contexts import Contexts
from sentry.testutils import TestCase
class ContextsTest(TestCase):
def test_os(self):
ctx = Contexts.to_python({
'os': {
'name': 'Windows',
'version': '95',
'rooted': True,
},
})
assert sorted(ctx.iter_tags()) == [
('os', 'Windows 95'),
('os.name', 'Windows'),
('os.rooted', 'yes'),
]
assert ctx.to_json() == {
'os': {
'type': 'os',
'name': 'Windows',
'version': '95',
'rooted': True,
}
}
def test_runtime(self):
ctx = Contexts.to_python({
'runtime': {
'name': 'Java',
'version': '1.2.3',
'build': 'BLAH',
},
})
assert sorted(ctx.iter_tags()) == [
('runtime', 'Java 1.2.3'),
('runtime.name', 'Java'),
]
assert ctx.to_json() == {
'runtime': {
'type': 'runtime',
'name': 'Java',
'version': '1.2.3',
'build': 'BLAH',
}
}
def test_device(self):
ctx = Contexts.to_python({
'device': {
'name': 'My iPad',
'model': 'iPad',
'model_id': '1234AB',
'version': '1.2.3',
'arch': 'arm64',
},
})
assert sorted(ctx.iter_tags()) == [
('device', 'iPad'),
]
assert ctx.to_json() == {
'device': {
'type': 'device',
'name': 'My iPad',
'model': 'iPad',
'model_id': '1234AB',
'version': '1.2.3',
'arch': 'arm64',
}
}
def test_device_with_alias(self):
ctx = Contexts.to_python({
'my_device': {
'type': 'device',
'title': 'My Title',
'name': 'My iPad',
'model': 'iPad',
'model_id': '1234AB',
'version': '1.2.3',
'arch': 'arm64',
},
})
assert sorted(ctx.iter_tags()) == [
('my_device', 'iPad')
]
assert ctx.to_json() == {
'my_device': {
'type': 'device',
'title': 'My Title',
'name': 'My iPad',
'model': 'iPad',
'model_id': '1234AB',
'version': '1.2.3',
'arch': 'arm64',
}
}
def test_default(self):
ctx = Contexts.to_python({
'whatever': {
'foo': 'bar',
'blub': 'blah',
'biz': [1, 2, 3],
'baz': {'foo': 'bar'},
},
})
assert sorted(ctx.iter_tags()) == []
assert ctx.to_json() == {
'whatever': {
'type': 'default',
'foo': 'bar',
'blub': 'blah',
'biz': [1, 2, 3],
'baz': {'foo': 'bar'},
}
}
def test_path(self):
assert Contexts().get_path() == 'contexts'
def test_app(self):
ctx = Contexts.to_python({
'app': {
'app_id': '1234',
'device_app_hash': '5678',
},
})
assert sorted(ctx.iter_tags()) == [
('app.device', '5678'),
]
assert ctx.to_json() == {
'app': {
'type': 'app',
'app_id': '1234',
'device_app_hash': '5678',
}
}
|
ezpuzz/supysonic
|
refs/heads/master
|
api/browse.py
|
1
|
# coding: utf-8
# This file is part of Supysonic.
#
# Supysonic is a Python implementation of the Subsonic server API.
# Copyright (C) 2013 Alban 'spl0k' Féron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flask import request
from web import app
from db import Folder, Artist, Album, Track, func, session
from . import get_entity
import uuid, time, string
import os.path
@app.route('/rest/getMusicFolders.view', methods = [ 'GET', 'POST' ])
def list_folders():
return request.formatter({
'musicFolders': {
'musicFolder': [ {
'id': f.id,
'name': f.name
} for f in session.query(Folder).filter(Folder.root == True).order_by(Folder.path).all() ]
}
})
@app.route('/rest/getIndexes.view', methods = [ 'GET', 'POST' ])
def list_indexes():
musicFolderId = request.args.get('musicFolderId')
ifModifiedSince = request.args.get('ifModifiedSince')
if ifModifiedSince:
try:
ifModifiedSince = int(ifModifiedSince) / 1000
except:
return request.error_formatter(0, 'Invalid timestamp')
if musicFolderId is None:
folder = session.query(Folder).filter(Folder.root == True).all()
else:
try:
mfid = uuid.UUID(musicFolderId)
except:
return request.error_formatter(0, 'Invalid id')
folder = session.query(Folder).get(mfid)
if not folder or (type(folder) is not list and not folder.root):
return request.error_formatter(70, 'Folder not found')
last_modif = max(map(lambda f: f.last_scan, folder)) if type(folder) is list else folder.last_scan
if (not ifModifiedSince is None) and last_modif < ifModifiedSince:
return request.formatter({ 'indexes': { 'lastModified': last_modif * 1000 } })
# The XSD lies, we don't return artists but a directory structure
if type(folder) is list:
artists = []
childs = []
for f in folder:
artists += f.get_children()
childs += f.tracks
else:
artists = folder.get_children()
childs = folder.tracks
indexes = {}
for artist in artists:
index = artist.name[0].upper()
if index in map(str, xrange(10)):
index = '#'
elif index not in string.letters:
index = '?'
if index not in indexes:
indexes[index] = []
indexes[index].append(artist)
return request.formatter({
'indexes': {
'lastModified': last_modif * 1000,
'index': [ {
'name': k,
'artist': [ {
'id': a.id,
'name': a.name
} for a in sorted(v, key = lambda a: a.name.lower()) ]
} for k, v in sorted(indexes.iteritems()) ],
'child': [ c.as_subsonic_child(request.user) for c in sorted(childs, key = lambda t: t.sort_key()) ]
}
})
@app.route('/rest/getMusicDirectory.view', methods = [ 'GET', 'POST' ])
def show_directory():
status, res = get_entity(request, Folder)
if not status:
return res
res.tracks = [t for t in res.tracks if os.path.isfile(t.path)]
directory = {
'id': res.id,
'name': res.name,
'child': [ f.as_subsonic_child(request.user) for f in res.get_children() ] + [ t.as_subsonic_child(request.user) for t in sorted(res.tracks, key = lambda t: t.sort_key()) ]
}
if not res.root:
parent = session.query(Folder).with_entities(Folder.id) \
.filter(Folder.path.like(res.path[:len(res.path)-len(res.name)-1])) \
.order_by(func.length(Folder.path).desc()).first()
if parent:
directory['parent'] = parent.id
return request.formatter({ 'directory': directory })
@app.route('/rest/getArtists.view', methods = [ 'GET', 'POST' ])
def list_artists():
# According to the API page, there are no parameters?
indexes = {}
# Optimized query instead of using backrefs, is there a way to speed up the backref?
c = session.query(Album.artist_id, func.count(Album.artist_id).label('c')).group_by(Album.artist_id).subquery(name='c')
for artist in session.query(Artist.name, Artist.id, c.c.c.label('albums')).join(c).order_by(Artist.name).all():
index = artist.name[0].upper() if artist.name else '?'
if index in map(str, xrange(10)):
index = '#'
elif index not in string.letters:
index = '?'
if index not in indexes:
indexes[index] = []
indexes[index].append(artist)
return request.formatter({
'artists': {
'index': [ {
'name': k,
'artist': [ {
'id': a.id,
'name': a.name.strip(),
'albumCount': a.albums
} for a in v ]
} for k, v in sorted(indexes.iteritems()) ]
}
})
@app.route('/rest/getArtist.view', methods = [ 'GET', 'POST' ])
def artist_info():
status, res = get_entity(request, Artist)
if not status:
return res
info = res.as_subsonic_artist(request.user)
info['album'] = [ a.as_subsonic_album(request.user) for a in sorted(res.albums, key = lambda a: a.sort_key()) ]
return request.formatter({ 'artist': info })
@app.route('/rest/getAlbum.view', methods = [ 'GET', 'POST' ])
def album_info():
status, res = get_entity(request, Album)
if not status:
return res
info = res.as_subsonic_album(request.user)
info['song'] = [ t.as_subsonic_child(request.user) for t in sorted(res.tracks, key = lambda t: t.sort_key()) ]
return request.formatter({ 'album': info })
@app.route('/rest/getSong.view', methods = [ 'GET', 'POST' ])
def track_info():
status, res = get_entity(request, Track)
if not status:
return res
return request.formatter({ 'song': res.as_subsonic_child(request.user) })
@app.route('/rest/getVideos.view', methods = [ 'GET', 'POST' ])
def list_videos():
return request.error_formatter(0, 'Video streaming not supported')
@app.route('/rest/validateLicense.view', methods = [ 'GET' ])
def validate():
return true
|
mxOBS/deb-pkg_trusty_chromium-browser
|
refs/heads/master
|
third_party/scons-2.0.1/engine/SCons/Variables/BoolVariable.py
|
61
|
"""engine.SCons.Variables.BoolVariable
This file defines the option type for SCons implementing true/false values.
Usage example:
opts = Variables()
opts.Add(BoolVariable('embedded', 'build for an embedded system', 0))
...
if env['embedded'] == 1:
...
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/BoolVariable.py 5134 2010/08/16 23:02:40 bdeegan"
__all__ = ['BoolVariable',]
import SCons.Errors
__true_strings = ('y', 'yes', 'true', 't', '1', 'on' , 'all' )
__false_strings = ('n', 'no', 'false', 'f', '0', 'off', 'none')
def _text2bool(val):
"""
Converts strings to True/False depending on the 'truth' expressed by
the string. If the string can't be converted, the original value
will be returned.
See '__true_strings' and '__false_strings' for values considered
'true' or 'false respectivly.
This is usable as 'converter' for SCons' Variables.
"""
lval = val.lower()
if lval in __true_strings: return True
if lval in __false_strings: return False
raise ValueError("Invalid value for boolean option: %s" % val)
def _validator(key, val, env):
"""
Validates the given value to be either '0' or '1'.
This is usable as 'validator' for SCons' Variables.
"""
if not env[key] in (True, False):
raise SCons.Errors.UserError(
'Invalid value for boolean option %s: %s' % (key, env[key]))
def BoolVariable(key, help, default):
"""
The input parameters describe a boolen option, thus they are
returned with the correct converter and validator appended. The
'help' text will by appended by '(yes|no) to show the valid
valued. The result is usable for input to opts.Add().
"""
return (key, '%s (yes|no)' % help, default,
_validator, _text2bool)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
vatsalparekh/tweet-csv
|
refs/heads/master
|
twitter_profile_csv.py
|
1
|
import tweepy
import csv
import webbrowser
import time
def get_all_tweets(tweepy_user):
alltweets = []
screen_name = input(
'Enter the screenname of the profile : @')
new_tweets = tweepy_user.user_timeline(screen_name=screen_name, count=200)
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
while len(new_tweets) > 0:
print ("getting tweets before " + str(oldest))
new_tweets = tweepy_user.user_timeline(
screen_name=screen_name, count=200, max_id=oldest)
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
print ("..." + str(len(alltweets)) + ' tweets downloaded so far')
outtweets = [[tweet.id_str, tweet.created_at,
tweet.text.encode("utf-8")] for tweet in alltweets]
with open('%s_tweets.csv' % screen_name, 'w') as f:
writer = csv.writer(f)
writer.writerow(["id", "created_at", "text"])
writer.writerows(outtweets)
def get_auth():
print('To paste -> Ctrl+Shift+V \n')
consumer_token = input('Enter your consumer token :')
consumer_secret = input('Enter your consumer secret :')
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
try:
redirect_url = auth.get_authorization_url()
print ('\nOpen this link : ' + redirect_url)
time.sleep(1)
webbrowser.open(redirect_url)
except tweepy.TweepError:
print ('Error! Failed to get auth_url.')
time.sleep(2)
pin = input("Enter Authorization Pin :").strip()
auth.get_access_token(verifier=pin)
tweepy_user = tweepy.API(auth)
get_all_tweets(tweepy_user)
if __name__ == '__main__':
get_auth()
|
BehavioralInsightsTeam/edx-platform
|
refs/heads/release-bit
|
lms/djangoapps/course_api/blocks/transformers/milestones.py
|
14
|
"""
Milestones Transformer
"""
import logging
from django.conf import settings
from edx_proctoring.api import get_attempt_status_summary
from edx_proctoring.exceptions import ProctoredExamNotFoundException
from openedx.core.djangoapps.content.block_structure.transformer import (
BlockStructureTransformer,
)
from student.models import EntranceExamConfiguration
from util import milestones_helpers
log = logging.getLogger(__name__)
class MilestonesAndSpecialExamsTransformer(BlockStructureTransformer):
"""
A transformer that handles both milestones and special (timed) exams.
It includes or excludes all unfulfilled milestones from the student view based on the value of `include_gated_sections`.
An entrance exam is considered a milestone, and is not considered a "special exam".
It also includes or excludes all special (timed) exams (timed, proctored, practice proctored) in/from the
student view, based on the value of `include_special_exams`.
"""
WRITE_VERSION = 1
READ_VERSION = 1
@classmethod
def name(cls):
return "milestones"
def __init__(self, include_special_exams=True, include_gated_sections=True):
self.include_special_exams = include_special_exams
self.include_gated_sections = include_gated_sections
@classmethod
def collect(cls, block_structure):
"""
Computes any information for each XBlock that's necessary to execute
this transformer's transform method.
Arguments:
block_structure (BlockStructureCollectedData)
"""
block_structure.request_xblock_fields('is_proctored_enabled')
block_structure.request_xblock_fields('is_practice_exam')
block_structure.request_xblock_fields('is_timed_exam')
block_structure.request_xblock_fields('entrance_exam_id')
def transform(self, usage_info, block_structure):
"""
Modify block structure according to the behavior of milestones and special exams.
"""
required_content = self.get_required_content(usage_info, block_structure)
def user_gated_from_block(block_key):
"""
Checks whether the user is gated from accessing this block, first via special exams,
then via a general milestones check.
"""
if usage_info.has_staff_access:
return False
elif self.gated_by_required_content(block_key, block_structure, required_content):
return True
elif not self.include_gated_sections and self.has_pending_milestones_for_user(block_key, usage_info):
return True
elif (settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
(self.is_special_exam(block_key, block_structure) and
not self.include_special_exams)):
return True
return False
for block_key in block_structure.topological_traversal():
if user_gated_from_block(block_key):
block_structure.remove_block(block_key, False)
elif self.is_special_exam(block_key, block_structure):
self.add_special_exam_info(block_key, block_structure, usage_info)
@staticmethod
def is_special_exam(block_key, block_structure):
"""
Test whether the block is a special exam.
"""
return (
block_structure.get_xblock_field(block_key, 'is_proctored_enabled') or
block_structure.get_xblock_field(block_key, 'is_practice_exam') or
block_structure.get_xblock_field(block_key, 'is_timed_exam')
)
@staticmethod
def has_pending_milestones_for_user(block_key, usage_info):
"""
Test whether the current user has any unfulfilled milestones preventing
them from accessing this block.
"""
return bool(milestones_helpers.get_course_content_milestones(
unicode(block_key.course_key),
unicode(block_key),
'requires',
usage_info.user.id
))
# TODO: As part of a cleanup effort, this transformer should be split into
# MilestonesTransformer and SpecialExamsTransformer, which are completely independent.
def add_special_exam_info(self, block_key, block_structure, usage_info):
"""
For special exams, add the special exam information to the course blocks.
"""
special_exam_attempt_context = None
try:
# Calls into edx_proctoring subsystem to get relevant special exam information.
# This will return None, if (user, course_id, content_id) is not applicable.
special_exam_attempt_context = get_attempt_status_summary(
usage_info.user.id,
unicode(block_key.course_key),
unicode(block_key)
)
except ProctoredExamNotFoundException as ex:
log.exception(ex)
if special_exam_attempt_context:
# This user has special exam context for this block so add it.
block_structure.set_transformer_block_field(
block_key,
self,
'special_exam_info',
special_exam_attempt_context,
)
@staticmethod
def get_required_content(usage_info, block_structure):
"""
Get the required content for the course.
This takes into account if the user can skip the entrance exam.
"""
course_key = block_structure.root_block_usage_key.course_key
user_can_skip_entrance_exam = EntranceExamConfiguration.user_can_skip_entrance_exam(usage_info.user, course_key)
required_content = milestones_helpers.get_required_content(course_key, usage_info.user)
if not required_content:
return required_content
if user_can_skip_entrance_exam:
# remove the entrance exam from required content
entrance_exam_id = block_structure.get_xblock_field(block_structure.root_block_usage_key, 'entrance_exam_id')
required_content = [content for content in required_content if not content == entrance_exam_id]
return required_content
@staticmethod
def gated_by_required_content(block_key, block_structure, required_content):
"""
Returns True if the current block associated with the block_key should be gated by the given required_content.
Returns False otherwise.
"""
if not required_content:
return False
if block_key.block_type == 'chapter' and unicode(block_key) not in required_content:
return True
return False
|
sajuptpm/contrail-controller
|
refs/heads/master
|
src/config/utils/provision_linklocal.py
|
10
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import argparse
import ConfigParser
from vnc_api.vnc_api import *
class MetadataProvisioner(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self._vnc_lib = VncApi(
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/')
linklocal_obj=LinklocalServiceEntryType(
linklocal_service_name=self._args.linklocal_service_name,
linklocal_service_ip=self._args.linklocal_service_ip,
linklocal_service_port=self._args.linklocal_service_port,
ip_fabric_DNS_service_name=self._args.ipfabric_dns_service_name,
ip_fabric_service_port=self._args.ipfabric_service_port,
ip_fabric_service_ip=[self._args.ipfabric_service_ip])
try:
current_config=self._vnc_lib.global_vrouter_config_read(
fq_name=['default-global-system-config',
'default-global-vrouter-config'])
except Exception as e:
if self._args.oper == "add":
linklocal_services_obj=LinklocalServicesTypes([linklocal_obj])
conf_obj=GlobalVrouterConfig(linklocal_services=linklocal_services_obj)
result=self._vnc_lib.global_vrouter_config_create(conf_obj)
print 'Created.UUID is %s'%(result)
return
current_linklocal=current_config.get_linklocal_services()
if current_linklocal is None:
obj = {'linklocal_service_entry': []}
else:
obj = current_linklocal.__dict__
new_linklocal=[]
for key, value in obj.iteritems():
found=False
for vl in value:
entry = vl.__dict__
if ('linklocal_service_name' in entry and
entry['linklocal_service_name'] == self._args.linklocal_service_name):
if self._args.oper == "add":
new_linklocal.append(linklocal_obj)
found=True
else:
new_linklocal.append(vl)
if not found and self._args.oper == "add":
new_linklocal.append(linklocal_obj)
obj[key] = new_linklocal
conf_obj=GlobalVrouterConfig(linklocal_services=obj)
result=self._vnc_lib.global_vrouter_config_update(conf_obj)
print 'Updated.%s'%(result)
# end __init__
def _parse_args(self, args_str):
'''
Eg. python provision_metadata.py
--api_server_ip 127.0.0.1
--api_server_port 8082
--linklocal_service_name name
--linklocal_service_ip 1.2.3.4
--linklocal_service_port 1234
--ipfabric_dns_service_name fabric_server_name
--ipfabric_service_ip 10.1.1.1
--ipfabric_service_port 5775
--oper <add | delete>
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'linklocal_service_name': '',
'linklocal_service_ip': '',
'linklocal_service_port': 0,
'ipfabric_dns_service_name': '',
'ipfabric_service_ip': [],
'ipfabric_service_port': 0,
'oper': 'add',
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'admin'
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--api_server_ip", help="IP address of api server")
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument(
"--linklocal_service_name", help="Service Name")
parser.add_argument(
"--linklocal_service_ip", help="Link Local Service IP")
parser.add_argument(
"--linklocal_service_port", type=int, help="Link Local Service Port")
parser.add_argument(
"--ipfabric_dns_service_name", help="IP Fabric DNS Service Name")
parser.add_argument(
"--ipfabric_service_ip", help="IP Fabric Service IP")
parser.add_argument(
"--ipfabric_service_port", type=int, help="IP Fabric Service Port")
parser.add_argument(
"--oper", default='add', help="Provision operation to be done(add or delete)")
parser.add_argument(
"--admin_tenant_name", help="Tenant to create the Link Local Service")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
self._args = parser.parse_args(remaining_argv)
if not self._args.linklocal_service_name:
parser.error('linklocal_service_name is required')
# end _parse_args
# end class MetadataProvisioner
def main(args_str=None):
MetadataProvisioner(args_str)
# end main
if __name__ == "__main__":
main()
|
alcobar/asuswrt-merlin
|
refs/heads/master
|
release/src/router/samba-3.0.25b/source/python/examples/tdbpack/test_tdbpack.py
|
55
|
#! /usr/bin/env python2.2
__doc__ = """test case for samba.tdbpack functions
tdbpack provides a means of pickling values into binary formats
compatible with that used by the samba tdbpack()/tdbunpack()
functions.
Numbers are always stored in little-endian format; strings are stored
in either DOS or Unix codepage as appropriate.
The format for any particular element is encoded as a short ASCII
string, with one character per field."""
# Copyright (C) 2002 Hewlett-Packard.
__author__ = 'Martin Pool <mbp@sourcefrog.net>'
import unittest
import oldtdbutil
import samba.tdbpack
both_unpackers = (samba.tdbpack.unpack, oldtdbutil.unpack)
both_packers = (samba.tdbpack.pack, oldtdbutil.pack)
# # ('B', [10, 'hello'], '\x0a\0\0\0hello'),
# ('BB', [11, 'hello\0world', 3, 'now'],
# '\x0b\0\0\0hello\0world\x03\0\0\0now'),
# ('pd', [1, 10], '\x01\0\0\0\x0a\0\0\0'),
# ('BBB', [5, 'hello', 0, '', 5, 'world'],
# '\x05\0\0\0hello\0\0\0\0\x05\0\0\0world'),
# strings are sequences in Python, there's no getting away
# from it
# ('ffff', 'evil', 'e\0v\0i\0l\0'),
# ('BBBB', 'evil',
# '\x01\0\0\0e'
# '\x01\0\0\0v'
# '\x01\0\0\0i'
# '\x01\0\0\0l'),
# ('', [], ''),
# # exercise some long strings
# ('PP', ['hello' * 255, 'world' * 255],
# 'hello' * 255 + '\0' + 'world' * 255 + '\0'),
# ('PP', ['hello' * 40000, 'world' * 50000],
# 'hello' * 40000 + '\0' + 'world' * 50000 + '\0'),
# ('B', [(5*51), 'hello' * 51], '\xff\0\0\0' + 'hello' * 51),
# ('BB', [(5 * 40000), 'hello' * 40000,
# (5 * 50000), 'world' * 50000],
# '\x40\x0d\x03\0' + 'hello' * 40000 + '\x90\xd0\x03\x00' + 'world' * 50000),
class PackTests(unittest.TestCase):
symm_cases = [
('w', [42], '\x2a\0'),
('www', [42, 2, 69], '\x2a\0\x02\0\x45\0'),
('wd', [42, 256], '\x2a\0\0\x01\0\0'),
('w', [0], '\0\0'),
('w', [255], '\xff\0'),
('w', [256], '\0\x01'),
('w', [0xdead], '\xad\xde'),
('w', [0xffff], '\xff\xff'),
('p', [0], '\0\0\0\0'),
('p', [1], '\x01\0\0\0'),
('d', [0x01020304], '\x04\x03\x02\x01'),
('d', [0x7fffffff], '\xff\xff\xff\x7f'),
('d', [0x80000000L], '\x00\x00\x00\x80'),
('d', [0x80000069L], '\x69\x00\x00\x80'),
('d', [0xffffffffL], '\xff\xff\xff\xff'),
('d', [0xffffff00L], '\x00\xff\xff\xff'),
('ddd', [1, 10, 50], '\x01\0\0\0\x0a\0\0\0\x32\0\0\0'),
('ff', ['hello', 'world'], 'hello\0world\0'),
('fP', ['hello', 'world'], 'hello\0world\0'),
('PP', ['hello', 'world'], 'hello\0world\0'),
('B', [0, ''], '\0\0\0\0'),
# old implementation is wierd when string is not the right length
# ('B', [2, 'hello'], '\x0a\0\0\0hello'),
('B', [5, 'hello'], '\x05\0\0\0hello'),
]
def test_symmetric(self):
"""Cookbook of symmetric pack/unpack tests
"""
for packer in [samba.tdbpack.pack]: # both_packers:
for unpacker in both_unpackers:
for format, values, expected in self.symm_cases:
out_packed = packer(format, values)
self.assertEquals(out_packed, expected)
out, rest = unpacker(format, expected)
self.assertEquals(rest, '')
self.assertEquals(list(values), list(out))
def test_large(self):
"""Test large pack/unpack strings"""
large_cases = [('w' * 1000, xrange(1000)), ]
for packer in both_packers:
for unpacker in both_unpackers:
for format, values in large_cases:
packed = packer(format, values)
out, rest = unpacker(format, packed)
self.assertEquals(rest, '')
self.assertEquals(list(values), list(out))
def test_pack(self):
"""Cookbook of expected pack values
These can't be used for the symmetric test because the unpacked value is
not "canonical".
"""
cases = [('w', (42,), '\x2a\0'),
]
for packer in both_packers:
for format, values, expected in cases:
self.assertEquals(packer(format, values), expected)
def test_unpack_extra(self):
# Test leftover data
for unpacker in both_unpackers:
for format, values, packed in self.symm_cases:
out, rest = unpacker(format, packed + 'hello sailor!')
self.assertEquals(rest, 'hello sailor!')
self.assertEquals(list(values), list(out))
def test_pack_extra(self):
"""Leftover values when packing"""
cases = [
('d', [10, 20], [10]),
('d', [10, 'hello'], [10]),
('ff', ['hello', 'world', 'sailor'], ['hello', 'world']),
]
for unpacker in both_unpackers:
for packer in both_packers:
for format, values, chopped in cases:
bin = packer(format, values)
out, rest = unpacker(format, bin)
self.assertEquals(list(out), list(chopped))
self.assertEquals(rest, '')
def test_unpack(self):
"""Cookbook of tricky unpack tests"""
cases = [
# Apparently I couldn't think of any tests that weren't
# symmetric :-/
]
for unpacker in both_unpackers:
for format, values, expected in cases:
out, rest = unpacker(format, expected)
self.assertEquals(rest, '')
self.assertEquals(list(values), list(out))
def test_pack_failures(self):
"""Expected errors for incorrect packing"""
cases = [('w', []),
# ('w', ()),
# ('w', {}),
('ww', [2]),
('w', 2),
# ('w', None),
('wwwwwwwwwwww', []),
# ('w', [0x60A15EC5L]),
# ('w', [None]),
('d', []),
('p', []),
('f', [2]),
('P', [None]),
('P', ()),
('f', [hex]),
('fw', ['hello']),
# ('f', [u'hello']),
('B', [2]),
(None, [2, 3, 4]),
(ord('f'), [20]),
# old code doesn't distinguish string from seq-of-char
# (['w', 'w'], [2, 2]),
# old code just ignores invalid characters
# ('Q', [2]),
# ('fQ', ['2', 3]),
# ('fQ', ['2']),
(2, [2]),
# old code doesn't typecheck format
# ({}, {})
]
for packer in both_packers:
for format, values in cases:
try:
packer(format, values)
except StandardError:
pass
else:
raise AssertionError("didn't get exception: format %s, values %s, packer %s"
% (`format`, `values`, `packer`))
def test_unpack_failures(self):
"""Expected errors for incorrect unpacking"""
cases = [
# This ought to be illegal, but the old code doesn't prohibit it
# ('$', '', ValueError),
# ('Q', '', ValueError),
# ('Q$', '', ValueError),
('f', '', IndexError),
('d', '', IndexError),
# This is an illegal packing, but the old code doesn't trap
# ('d', '2', IndexError),
# ('d', '22', IndexError),
# ('d', '222', IndexError),
# ('p', '\x01\0', IndexError),
# ('w', '2', IndexError),
# ('B', '\xff\0\0\0hello', IndexError),
# ('B', '\xff\0', IndexError),
('w', '', IndexError),
('f', 'hello', IndexError),
('f', '', IndexError),
# ('B', '\x01\0\0\0', IndexError),
# ('B', '\x05\0\0\0hell', IndexError),
('B', '\xff\xff\xff\xff', ValueError),
# ('B', 'foobar', IndexError),
# ('BB', '\x01\0\0\0a\x01', IndexError),
]
for unpacker in both_unpackers:
for format, values, throwable_class in cases:
try:
unpacker(format, values)
except StandardError:
pass
else:
raise AssertionError("didn't get exception: format %s, values %s, unpacker %s"
% (`format`, `values`, `unpacker`))
def test_unpack_repeated(self):
cases = [(('df$',
'\x00\x00\x00\x00HP C LaserJet 4500-PS\x00Windows 4.0\x00\\print$\\WIN40\\0\\PSCRIPT.DRV\x00\\print$\\WIN40\\0\\PSCRIPT.DRV\x00\\print$\\WIN40\\0\\PSCRIPT.DRV\x00\\print$\\WIN40\\0\\PSCRIPT.HLP\x00\x00RAW\x00\\print$\\WIN40\\0\\readme.wri\x00\\print$\\WIN40\\0\\pscript.drv\x00\\print$\\WIN40\\0\\pscript.hlp\x00'),
([0L, 'HP C LaserJet 4500-PS', 'Windows 4.0', '\\print$\\WIN40\\0\\PSCRIPT.DRV', '\\print$\\WIN40\\0\\PSCRIPT.DRV', '\\print$\\WIN40\\0\\PSCRIPT.DRV', '\\print$\\WIN40\\0\\PSCRIPT.HLP', '', 'RAW', '\\print$\\WIN40\\0\\readme.wri', '\\print$\\WIN40\\0\\pscript.drv', '\\print$\\WIN40\\0\\pscript.hlp'], ''))]
for unpacker in both_unpackers:
for input, expected in cases:
result = apply(unpacker, input)
if result != expected:
raise AssertionError("%s:\n input: %s\n output: %s\n expected: %s" % (`unpacker`, `input`, `result`, `expected`))
if __name__ == '__main__':
unittest.main()
|
krunal3103/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/filters/lint.py
|
979
|
from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name})
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]})
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name})
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value})
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name})
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name})
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data})
if not data:
raise LintError(_("%(type)s token with empty data") % {"type": type})
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data})
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %(type)s") % {"type": type})
yield token
|
Homeloc/validictory
|
refs/heads/master
|
validictory/tests/test_defaults.py
|
3
|
from unittest import TestCase
import validictory
class TestItems(TestCase):
def test_property(self):
schema = {
"type": "object",
"properties": {
"foo": {
"default": "bar"
},
"baz": {
"type": "integer"
}
}
}
data = {'baz': 2}
result = validictory.validate(data, schema, required_by_default=False)
self.assertEqual(result, {"foo": "bar", "baz": 2})
def test_item(self):
schema = {
'type': 'object',
'type': 'array',
'items': [
{
'type': 'any'
},
{
'type': 'string'
},
{
'default': 'baz'
},
]
}
data = ['foo', 'bar']
result = validictory.validate(data, schema, required_by_default=False)
self.assertEqual(result, ["foo", "bar", "baz"])
|
hmen89/odoo
|
refs/heads/master
|
openerp/addons/base/ir/ir_model.py
|
12
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import re
import time
import types
import openerp
import openerp.modules.registry
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
openerp.modules.registry.RegistryManager.new(cr.dbname)
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
self.instanciate(cr, user, vals['model'], context)
model = self.pool[vals['model']]
model._prepare_setup_fields(cr, SUPERUSER_ID)
model._setup_fields(cr, SUPERUSER_ID)
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
class x_custom_model(osv.osv):
_custom = True
if isinstance(model, unicode):
model = model.encode('utf-8')
x_custom_model._name = model
x_custom_model._module = False
a = x_custom_model._build_model(self.pool, cr)
if not a._columns:
x_name = 'id'
elif 'x_name' in a._columns.keys():
x_name = 'x_name'
else:
x_name = a._columns.keys()[0]
x_custom_model._rec_name = x_name
a._rec_name = x_name
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade','Cascade'),('set null','Set NULL')], 'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
model._columns.pop(field.name, None)
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = self.pool[field.model]._all_columns[field.name].column._rel
cr.execute('DROP table "%s"' % (rel_name))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
cr.commit()
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
if vals['model'] in self.pool:
model = self.pool[vals['model']]
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
model._rec_name = 'x_name'
model.__init__(self.pool, cr)
model._prepare_setup_fields(cr, SUPERUSER_ID)
model._setup_fields(cr, SUPERUSER_ID)
#Added context to _auto_init for special treatment to custom field for select_level
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
column_rename = None # if set, *one* column can be renamed here
models_patch = {} # structs of (obj, [(field, prop, change_to),..])
# data to be updated on the orm model
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', tools.ustr),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', '_domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('selectable', 'selectable', bool),
('select_level', 'select', int),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None:
models_patch.setdefault(obj._name, (obj,[]))
# find out which properties (per model) we need to update
for field_name, field_property, set_fn in model_props:
if field_name in vals:
property_value = set_fn(vals[field_name])
if getattr(obj._columns[item.name], field_property) != property_value:
models_patch[obj._name][1].append((final_name, field_property, property_value))
# our dict is ready here, but no properties are changed so far
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % column_rename[1])
# This is VERY risky, but let us have this feature:
# we want to change the key of column in obj._columns dict
col = column_rename[0]._columns.pop(column_rename[1][1]) # take object out, w/o copy
column_rename[0]._columns[column_rename[1][2]] = col
if models_patch:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context, select=vals.get('select_level', '0'),
update_custom_fields=True)
for __, patch_struct in models_patch.items():
obj = patch_struct[0]
for col_name, col_prop, val in patch_struct[1]:
setattr(obj._columns[col_name], col_prop, val)
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
@tools.ormcache()
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return r or False
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def name_get(self, cr, uid, ids, context=None):
result = {}
result2 = []
for res in self.browse(cr, uid, ids, context=context):
if res.id:
result.setdefault(res.model, {})
result[res.model][res.res_id] = res.id
for model in result:
try:
r = dict(self.pool[model].name_get(cr, uid, result[model].keys(), context=context))
for key,val in result[model].items():
result2.append((val, r.get(key, False)))
except:
# some object have no valid name_get implemented, we accept this
pass
return result2
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xml_id))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
try:
id = self.read(cr, uid, [self._get_id(cr, uid, module, xml_id)], ['res_id'])[0]['res_id']
self.loads[(module,xml_id)] = (model,id)
except:
id = False
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, uid, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
if not res:
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
elif xml_id:
cr.execute('UPDATE ir_values set value=%s WHERE model=%s and key=%s and name=%s'+where,(value, model, key, name))
ir_values_obj.invalidate_cache(cr, uid, ['value'])
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules:
return True
to_unlink = []
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC""",
(tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module,name) not in self.loads:
to_unlink.append((model,res_id))
if not config.get('import_partial'):
for (model, res_id) in to_unlink:
if model in self.pool:
_logger.info('Deleting %s@%s', res_id, model)
self.pool[model].unlink(cr, uid, [res_id])
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pchrista/AliPhysics
|
refs/heads/master
|
PWGJE/EMCALJetTasks/macros/JetQA/plotPWGJEQA.py
|
27
|
#! /usr/bin/env python
# Macro to plot PWGJE QA histograms, using AliAnalysisTaskPWGJEQA.
#
# It automatically detects what to plot, based on the content of your analysis output file:
# whether to do track/calo/jet/event QA, as well as MC vs. data, PbPb vs. pp, Run1 vs. Run2, Phos vs. no Phos.
#
# To run:
# python plotPWGJEQA.py -f "/my/dir/AnalysisResults.root" -o "/my/dir/outputQA/" -i ".png"
#
# (or, run without options: defaults are "AnalysisResults.root" and "./outputQA/" and ".pdf")
#
# If not using standard AOD collections, you need to set the list names in the config below.
# You may need to set some of the displayed ranges on the plots.
#
# Note: It is possible you will have to change the scaling on a couple plots, to give them reasonable ranges.
#
# Note: AliAnalysisTaskPWGJEQA uses variable binning for centrality, track pT, track pT-res, and cluster E.
# Relevant histograms are plotted below using "width" scaling option to divide by bin width, when applicable.
#
# Note: Changing the binning in the analysis task may break some functionality here.
#
# Author: James Mulligan (james.mulligan@yale.edu)
# Track plotting based in part on code from plotJETrackQA.C
# General
import os
import sys
import argparse
import itertools
import math
# ROOT
import ROOT
# Prevent ROOT from stealing focus when plotting
ROOT.gROOT.SetBatch(True)
def plotPWGJEQA(inputFile, outputDir, referenceFile, fileFormat):
# Open input file and get relevant lists
f = ROOT.TFile(inputFile)
# Set directory for QA output
if not outputDir.endswith("/"):
outputDir = outputDir + "/"
if not os.path.exists(outputDir):
os.makedirs(outputDir)
# Detect whether this is a Pt-hard production (only returns true if the histos have been scaled, with scalePtHardHistos.py)
isPtHard = False
for key in f.GetListOfKeys():
if "Scaled" in key.GetName():
isPtHard = True
print("Is Pt-hard: %s" % isPtHard)
# Configure the plotting macro
qaTaskBaseName = "AliAnalysisTaskPWGJEQA"
# Input names
tracksListName = "tracks"
generatorTrackThnName = "tracks_PhysPrim"
matchedTrackThnName = "tracks_Matched"
# Handles older QA task
if "EmcalTrackingQA" in qaTaskBaseName:
tracksListName = "fTracks"
generatorTrackThnName = "fParticlesPhysPrim"
matchedTrackThnName = "fParticlesMatched"
# Get the output list of AliAnalysisTaskPWGJEQA
qaTaskName = determineQATaskName(qaTaskBaseName, f, isPtHard)
print("Found qaTaskName \"{0}\"".format(qaTaskName))
qaList = f.Get(qaTaskName)
# If not a Pt-hard production (since it is done already), we need to set Sumw2 since we will scale and divide histograms
if not isPtHard:
print("Setting Sumw2 on histograms.")
for obj in qaList:
SetSumw2(obj)
# Get the lists for tracks, cells, clusters, full jets, charged jets, and event QA
trackTHnSparse = qaList.FindObject(tracksListName)
cellQAList = qaList.FindObject("emcalCells")
clusterQAList = qaList.FindObject("caloClusters")
chargedJetList = qaList.FindObject("Jet_AKTChargedR020_tracks_pT0150_pt_scheme")
fullJetList = qaList.FindObject("Jet_AKTFullR020_tracks_pT0150_caloClusters_E0300_pt_scheme")
nEventsRef = 0
# If reference file provided, get its analysis lists
qaListRef = ""
trackTHnSparseRef = ""
clusterQAListRef = ""
cellQAListRef = ""
chargedJetListRef = ""
fullJetListRef = ""
if referenceFile:
fRef = ROOT.TFile(referenceFile)
qaListRef = fRef.Get(qaTaskName)
if not isPtHard:
print("Setting Sumw2 on reference histograms.")
for obj in qaListRef:
SetSumw2(obj)
trackTHnSparseRef = qaListRef.FindObject(tracksListName)
trackTHnSparseRef.SetName("trackRef")
clusterQAListRef = qaListRef.FindObject("caloClusters")
clusterQAListRef.SetName("caloClustersRef")
cellQAListRef = qaListRef.FindObject("emcalCells")
cellQAListRef.SetName("emcalCellsRef")
chargedJetListRef = qaListRef.FindObject("Jet_AKTChargedR020_tracks_pT0150_pt_scheme")
chargedJetListRef.SetName("chargedJetListRef")
fullJetListRef = qaListRef.FindObject("Jet_AKTFullR020_tracks_pT0150_caloClusters_E0300_pt_scheme")
fullJetListRef.SetName("fullJetListRef")
histNEventRef = qaListRef.FindObject("fHistEventCount")
nEventsRef = histNEventRef.GetBinContent(1)
print("N events ref: %d" % nEventsRef)
# Get number of events
histNEvent = qaList.FindObject("fHistEventCount")
nEvents = histNEvent.GetBinContent(1)
print("N events: %d" % nEvents)
# Set config: ispp, isMC, isRun2, includePhos
if qaList.FindObject("fHistCentrality"):
ispp = False
else:
ispp = True
print("Is pp: %s" % ispp)
if qaList.FindObject(generatorTrackThnName):
isMC = True
else:
isMC = False
print("Is MC: %s" % isMC)
if clusterQAList:
clusterTHnSparse = clusterQAList.FindObject("clusterObservables")
if ispp:
hClusterType = clusterTHnSparse.Projection(3)
else:
hClusterType = clusterTHnSparse.Projection(4)
isRun2 = hClusterType.GetBinContent(2) > 0
includePhos = hClusterType.GetBinContent(3) > 0
print("Is Run 2: %s" % isRun2)
print("Include Phos: %s" % includePhos)
else:
isRun2 = False
includePhos = False
# Plotting options
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptTitle(0)
# Plot QA
print("Plotting QA...")
if trackTHnSparse:
plotTrackQA(ispp, isMC, trackTHnSparse, generatorTrackThnName, matchedTrackThnName, qaList, nEvents, outputDir, qaListRef, trackTHnSparseRef, nEventsRef, fileFormat)
if clusterQAList:
plotCaloQA(ispp, isRun2, includePhos, clusterQAList, cellQAList, nEvents, outputDir, clusterQAListRef, cellQAListRef, nEventsRef, fileFormat)
if chargedJetList:
plotChargedJetQA(ispp, isPtHard, chargedJetList, outputDir, chargedJetListRef, nEvents, nEventsRef, fileFormat)
if fullJetList:
plotFullJetQA(ispp, isPtHard, isRun2, includePhos, fullJetList, outputDir, fullJetListRef, nEvents, nEventsRef, fileFormat)
if qaList.FindObject("eventQA"):
plotEventQA(ispp, isRun2, includePhos, qaList, outputDir, fileFormat)
if isPtHard:
plotPtHard(f, qaList,nEvents, qaListRef ,nEventsRef, outputDir, fileFormat)
def determineQATaskName(qaTaskBaseName, f, isPtHard):
""" Determine the task name based on a wide variety of possible names.
Since the task name varies depending on what input objects are included,
we need to guess the name.
Args:
qaTaskBaseName (str): Base name of the QA task without any of the input object names
f (TFile): Root file containing the QA task
"""
# Get all task names stored in the input file
possibleTaskNames = [key.GetName() for key in f.GetListOfKeys()]
# Possible input object names
tracksName = "tracks"
mcTracksName = "mcparticles"
cellsName = "emcalCells"
clustersName = "caloClusters"
# Compile into a list for easy processing
possibleNames = [tracksName, mcTracksName, cellsName, clustersName]
suffix = "histos"
if isPtHard:
suffix = "histosScaled"
for length in range(0, len(possibleNames)+1):
for elements in itertools.permutations(possibleNames, length):
joined = "_".join(elements)
testTaskName = qaTaskBaseName
if joined:
testTaskName += "_" + joined
# Also Try ESD
testTaskNameESD = testTaskName.replace("emcalCells", "EMCALCells").replace("caloClusters", "CaloClusters").replace("tracks", "Tracks").replace("mcparticles", "MCParticles")
for taskName in [testTaskName, testTaskNameESD]:
taskName = "{0}_{1}".format(taskName, suffix)
if taskName in possibleTaskNames:
return taskName
print("Could not determine QA task name! Please check your spelling!")
exit(1)
########################################################################################################
# Plot track histograms ##############################################################################
########################################################################################################
def plotTrackQA(ispp, isMC, trackTHnSparse, generatorTrackThnName, matchedTrackThnName, qaList, nEvents, outputDir, qaListRef, trackTHnSparseRef, nEventsRef, fileFormat):
# Create subdirectory for Tracks
outputDirTracks = outputDir + "Tracks/"
if not os.path.exists(outputDirTracks):
os.makedirs(outputDirTracks)
# trackTHnSparse consists of (Centrality, Pt, Eta, Phi, Track type, sigma(pT)/pT)
if isMC:
generatorTHnSparse = qaList.FindObject(generatorTrackThnName) # (Centrality, Pt, Eta, Phi, findable)
matchedTHnSparse = qaList.FindObject(matchedTrackThnName) # (Pt-gen, Eta-gen, Phi-gen, Pt-det, Eta-det, Phi-det, (pT-gen - pT-det)/pT-det, Track type)
#---------------------------------------------------------------------------------------------------
# phi distribution of hybrid tracks
#---------------------------------------------------------------------------------------------------
c1 = ROOT.TCanvas("c1","c1: Phi",600,450)
c1.cd()
# Project to (Phi, Track type)
if ispp:
hPhiTracktype = trackTHnSparse.Projection(2,3)
else:
hPhiTracktype = trackTHnSparse.Projection(3,4)
hPhiGlobal = hPhiTracktype.ProjectionY("PhiGlobal", 1, 1)
hPhiComplementary = hPhiTracktype.ProjectionY("PhiComplementary", 2, 2)
hPhiGlobal.SetLineColor(2)
hPhiGlobal.SetLineWidth(3)
hPhiGlobal.SetLineStyle(1)
hPhiComplementary.SetLineStyle(1)
hPhiComplementary.SetLineColor(4)
hPhiComplementary.SetLineWidth(3)
hPhiSum = hPhiGlobal.Clone()
hPhiSum.Add(hPhiComplementary)
hPhiSum.SetTitle("hPhiSum")
hPhiSum.SetName("hPhiSum")
hPhiSum.SetLineColor(1)
hPhiSum.SetMarkerColor(1)
hPhiSum.SetLineStyle(1)
hPhiGlobal.Scale(1./nEvents)
hPhiComplementary.Scale(1./nEvents)
hPhiSum.Scale(1./nEvents)
hPhiGlobal.SetTitle("#phi Distribution of Hybrid Tracks")
hPhiGlobal.GetYaxis().SetTitle("#frac{1}{N_{evts}} #frac{dN}{d#phi}")
hPhiGlobal.GetYaxis().SetTitleSize(0.06)
hPhiGlobal.GetXaxis().SetTitleSize(0.06)
hPhiGlobal.GetXaxis().SetTitleOffset(0.5)
hPhiGlobal.GetYaxis().SetRangeUser(0,15.)
if ispp:
hPhiGlobal.GetYaxis().SetRangeUser(0,0.2)
if isMC:
hPhiGlobal.GetYaxis().SetRangeUser(0,0.25)
ROOT.gPad.SetLeftMargin(0.15)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.13)
ROOT.gPad.SetTopMargin(0.05)
hPhiGlobal.Draw("hist")
hPhiComplementary.Draw("hist same")
hPhiSum.Draw("hist same")
leg1 = ROOT.TLegend(0.17,0.7,0.83,0.93,"Hybrid tracks")
leg1.SetFillColor(10)
leg1.SetBorderSize(0)
leg1.SetFillStyle(0)
leg1.SetTextSize(0.04)
leg1.AddEntry(hPhiGlobal, "w/ SPD & ITSrefit", "l")
leg1.AddEntry(hPhiComplementary, "w/o SPD & w/ ITSrefit", "l")
leg1.AddEntry(hPhiSum, "sum", "l")
leg1.Draw("same")
textNEvents = ROOT.TLatex()
textNEvents.SetNDC()
c1.cd()
textNEvents.DrawLatex(0.52,0.68,"#it{N}_{events} = %d" % nEvents)
outputFilename = os.path.join(outputDirTracks, "hTrackPhi" + fileFormat)
c1.SaveAs(outputFilename)
# Also plot the TH2 phi vs. pT -- make sure that phi is uniform at all pT
# Project to (Pt, Phi)
if ispp:
hPhiPtSum = trackTHnSparse.Projection(2,0)
else:
hPhiPtSum = trackTHnSparse.Projection(3,1)
hPhiPtSum.Scale(1.,"width")
hPhiPtSum.GetZaxis().SetRangeUser(1e-7, 3e5)
outputFilename = os.path.join(outputDirTracks, "hTrackPhiPt" + fileFormat)
plotHist(hPhiPtSum, outputFilename, "colz", False, True)
#---------------------------------------------------------------------------------------------------
# pT distribution of hybrid tracks
#---------------------------------------------------------------------------------------------------
# Project to (Pt, Track type)
if ispp:
hPtTracktype = trackTHnSparse.Projection(0,3)
else:
hPtTracktype = trackTHnSparse.Projection(1,4)
hPtGlobal = hPtTracktype.ProjectionY("PtGlobal", 1, 1)
hPtComplementary = hPtTracktype.ProjectionY("PtComplementary", 2, 2)
hPtSum = hPtGlobal.Clone()
hPtSum.Add(hPtComplementary)
# If reference distribution supplied, project to (Pt, Track type)
hPtSumRef = ""
if trackTHnSparseRef and qaListRef:
if ispp:
hPtTracktypeRef = trackTHnSparseRef.Projection(0,3)
else:
hPtTracktypeRef = trackTHnSparseRef.Projection(1,4)
hPtGlobalRef = hPtTracktypeRef.ProjectionY("PtGlobalRef", 1, 1)
hPtComplementaryRef = hPtTracktypeRef.ProjectionY("PtComplementaryRef", 2, 2)
hPtSumRef = hPtGlobalRef.Clone()
hPtSumRef.Add(hPtComplementaryRef)
outputFilename = os.path.join(outputDirTracks, "hTrackPt" + fileFormat)
xRangeMax = 100
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Track selection"
legendRunLabel = "Hybrid tracks"
legendRefLabel = "Hybrid tracks, all runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hPtSum, hPtSumRef, hPtGlobal, hPtComplementary, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width", "w/ SPD & ITSrefit", "w/o SPD & w/ ITSrefit")
# Plot also ratio of central track spectrum to peripheral track spectrum
trackTHnSparse.GetAxis(0).SetRangeUser(0,10)
hPt010 = trackTHnSparse.Projection(1)
hPt010.SetName("hPt010")
trackTHnSparse.GetAxis(0).SetRangeUser(50,90)
hPt5090 = trackTHnSparse.Projection(1)
hPt5090.SetName("hPt5090")
outputFilename = os.path.join(outputDirTracks, "hTrackPtRatio" + fileFormat)
xRangeMax = 75
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "Tracks"
h1legendLabel = "50-90 %"
h2legendLabel = "0-10 %"
ratioYAxisTitle = "Central / Peripheral"
yRatioMax = 12
plotSpectraCent(hPt5090, hPt010, "", nEvents, ispp, outputFilename, xRangeMax, yAxisTitle, ratioYAxisTitle, legendTitle, h1legendLabel, h2legendLabel, "", "width", yRatioMax)
trackTHnSparse.GetAxis(0).SetRangeUser(0,90)
#---------------------------------------------------------------------------------------------------
# pT resolution of hybrid tracks -- from track fitting
#---------------------------------------------------------------------------------------------------
c5 = ROOT.TCanvas("c5","c5: pT resolution",600,450)
c5.cd()
# Project to (Pt, Track type, pT resolution)
if ispp:
hPtTracktypePtSigma1Pt = trackTHnSparse.Projection(0,3,4)
else:
hPtTracktypePtSigma1Pt = trackTHnSparse.Projection(1,4,5)
# Project to global tracks and take profile, to get the pT resolution as a function of pT (Profile of pT vs pT*sigma(1/pT), i.e. pT vs sigma(pT)/pT)
# Note: No need to scale by bin width (despite pt-res having variable binning), since we take a profile (rather than e.g. plot a spectrum).
hPtTracktypePtSigma1Pt.GetYaxis().SetRange(1,1)
hPtPtSigma1PtGlobal = hPtTracktypePtSigma1Pt.Project3D("zx")
hPtPtSigma1PtGlobal.SetName("hPtPtSigma1PtGlobal")
profPtPtSigma1PtGlobal = hPtPtSigma1PtGlobal.ProfileX()
profPtPtSigma1PtGlobal.SetName("profPtPtSigma1PtGlobal")
profPtPtSigma1PtGlobal.SetLineColor(2)
profPtPtSigma1PtGlobal.SetLineWidth(3)
profPtPtSigma1PtGlobal.SetMarkerStyle(21)
profPtPtSigma1PtGlobal.SetMarkerColor(2)
profPtPtSigma1PtGlobal.SetMaximum(0.3)
#profPtPtSigma1PtGlobal.GetYaxis().SetTitle("#it{p}_{T} #times #sigma(1/#it{p}_{T})")
profPtPtSigma1PtGlobal.GetYaxis().SetTitle(" #sigma(#it{p}_{T}) / #it{p}_{T}")
profPtPtSigma1PtGlobal.GetXaxis().SetTitleSize(0.06)
profPtPtSigma1PtGlobal.GetYaxis().SetTitleSize(0.06)
profPtPtSigma1PtGlobal.GetYaxis().SetRangeUser(0,0.15)
ROOT.gPad.SetLeftMargin(0.15)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.14)
ROOT.gPad.SetTopMargin(0.05)
#profPtPtSigma1PtGlobal.GetXaxis().SetRangeUser(0, 100)
profPtPtSigma1PtGlobal.Draw()
# Project to complementary tracks and take profile
hPtTracktypePtSigma1Pt.GetYaxis().SetRange(2,2)
hPtPtSigma1PtComplementary = hPtTracktypePtSigma1Pt.Project3D("zx")
hPtPtSigma1PtComplementary.SetName("hPtPtSigma1PtComplementary")
profPtPtSigma1PtComplementary = hPtPtSigma1PtComplementary.ProfileX()
profPtPtSigma1PtComplementary.SetName("profPtPtSigma1PtComplementary")
profPtPtSigma1PtComplementary.SetLineColor(4)
profPtPtSigma1PtComplementary.SetLineWidth(3)
profPtPtSigma1PtComplementary.SetMarkerStyle(24)
profPtPtSigma1PtComplementary.SetMarkerColor(4)
profPtPtSigma1PtComplementary.Draw("same")
leg3 = ROOT.TLegend(0.21,0.6,0.88,0.88,"Hybrid tracks")
leg3.SetFillColor(10)
leg3.SetBorderSize(0)
leg3.SetFillStyle(0)
leg3.SetTextSize(0.04)
leg3.AddEntry(profPtPtSigma1PtGlobal, "w/ SPD & ITSrefit", "lp")
leg3.AddEntry(profPtPtSigma1PtComplementary, "w/o SPD & w/ ITSrefit", "lp")
leg3.Draw("same")
outputFilename = os.path.join(outputDirTracks, "profTrackPtResolution" + fileFormat)
c5.SaveAs(outputFilename)
#---------------------------------------------------------------------------------------------------
# pT resolution of hybrid tracks -- from MC
#---------------------------------------------------------------------------------------------------
# (the error bars on this histogram, which denote the resolution, are not working at present...)
if isMC:
# Plot distribution (pT-gen - pT-det)/pT-det
c25 = ROOT.TCanvas("c25","c25: pT Res Dist MC",600,450)
c25.cd()
c25.SetLogy()
if ispp:
hPtRes = matchedTHnSparse.Projection(6)
else:
hPtRes = matchedTHnSparse.Projection(7)
hPtRes.GetYaxis().SetTitle("counts")
hPtRes.Draw("hist E")
outputFilename = os.path.join(outputDirTracks, "hTrackPtResolutionMC" + fileFormat)
c25.SaveAs(outputFilename)
# Plot mean of the distribution as a function of pT, with error bars as the standard deviation of the distribution
c24 = ROOT.TCanvas("c24","c24: pT Resolution MC",600,450)
c24.cd()
# Project to (Pt, pT resolution, Track type)
if ispp:
hPtTracktypePtRes = matchedTHnSparse.Projection(3,7,6)
else:
hPtTracktypePtRes = matchedTHnSparse.Projection(4,8,7)
# Project to global tracks and take profile, to get the pT resolution as a function of pT
hPtTracktypePtRes.GetYaxis().SetRange(1,1)
hPtPtResGlobal = hPtTracktypePtRes.Project3D("zx")
hPtPtResGlobal.SetName("hPtPtResGlobal")
profPtPtResGlobal = hPtPtResGlobal.ProfileX("prof",1,-1,"s") # set errors to standard deviation (rather than standard error on mean)
profPtPtResGlobal.SetName("profPtPtResGlobal")
profPtPtResGlobal.SetLineColor(2)
profPtPtResGlobal.SetLineWidth(3)
profPtPtResGlobal.SetMarkerStyle(21)
profPtPtResGlobal.SetMarkerColor(2)
profPtPtResGlobal.SetMaximum(0.3)
profPtPtResGlobal.GetYaxis().SetTitle("(#it{p}_{T}^{gen} - #it{p}_{T}^{det}) / #it{p}_{T}^{det}")
profPtPtResGlobal.GetXaxis().SetTitleSize(0.06)
profPtPtResGlobal.GetYaxis().SetTitleSize(0.06)
ROOT.gPad.SetLeftMargin(0.15)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.14)
ROOT.gPad.SetTopMargin(0.05)
profPtPtResGlobal.GetYaxis().SetRangeUser(-0.5, 1)
profPtPtResGlobal.GetXaxis().SetRangeUser(0,100)
profPtPtResGlobal.Draw("E")
# Project to complementary tracks and take profile
hPtTracktypePtRes.GetYaxis().SetRange(2,2)
hPtPtResComplementary = hPtTracktypePtRes.Project3D("zx")
hPtPtResComplementary.SetName("hPtPtResComplementary")
profPtPtResComplementary = hPtPtResComplementary.ProfileX()
profPtPtResComplementary.SetName("profPtPtResComplementary")
profPtPtResComplementary.SetLineColor(4)
profPtPtResComplementary.SetLineWidth(3)
profPtPtResComplementary.SetMarkerStyle(24)
profPtPtResComplementary.SetMarkerColor(4)
profPtPtResComplementary.Draw("same E")
leg3 = ROOT.TLegend(0.21,0.6,0.88,0.88,"Hybrid tracks")
leg3.SetFillColor(10)
leg3.SetBorderSize(0)
leg3.SetFillStyle(0)
leg3.SetTextSize(0.04)
leg3.AddEntry(profPtPtResGlobal, "w/ SPD & ITSrefit", "lp")
leg3.AddEntry(profPtPtResComplementary, "w/o SPD & w/ ITSrefit", "lp")
leg3.Draw("hist same")
textPtRes = ROOT.TLatex()
textPtRes.SetNDC()
textPtRes.DrawLatex(0.45,0.9,"Data points: mean value")
textPtRes.DrawLatex(0.45, 0.8,"Error bars: stdev (resolution)")
outputFilename = os.path.join(outputDirTracks, "profTrackPtResolutionMC" + fileFormat)
c24.SaveAs(outputFilename)
#---------------------------------------------------------------------------------------------------
# Tracking efficiency
#---------------------------------------------------------------------------------------------------
if isMC:
# Plot ratio of pT-gen-matched to pT-gen
c26 = ROOT.TCanvas("c26","c26: TrackingEfficiency",600,450)
c26.cd()
for dim in ["1D", "2D"]:
if dim == "1D":
# 1D case
if ispp:
hPtGenMatched = matchedTHnSparse.Projection(0)
hPtGen1D = generatorTHnSparse.Projection(0, 3)
else:
hPtGenMatched = matchedTHnSparse.Projection(1)
hPtGen1D = generatorTHnSparse.Projection(1, 4)
hPtGenFindable = hPtGen1D.ProjectionY("trackEff", 2, 2)
elif dim == "2D":
# 2D case
if ispp:
hPtGenMatched = matchedTHnSparse.Projection(1, 0)
hPtGen2D = generatorTHnSparse.Projection(0, 1, 3)
else:
hPtGenMatched = matchedTHnSparse.Projection(2, 1)
hPtGen2D = generatorTHnSparse.Projection(1, 2, 4)
hPtGen2D.GetZaxis().SetRange(2, 2)
hPtGenFindable = hPtGen2D.Project3D("yx")
hTrackingEfficiency = hPtGenMatched.Clone()
hTrackingEfficiency.Divide(hPtGenMatched, hPtGenFindable, 1., 1., "B")
hTrackingEfficiency.SetMarkerStyle(21)
hTrackingEfficiency.SetMarkerColor(2)
if hTrackingEfficiency.InheritsFrom(ROOT.TH2.Class()):
hTrackingEfficiency.Draw("colz")
else:
hTrackingEfficiency.GetYaxis().SetTitle("Tracking Efficiency")
hTrackingEfficiency.GetYaxis().SetRangeUser(0.6,1)
hTrackingEfficiency.GetXaxis().SetRangeUser(0,50)
hTrackingEfficiency.Draw("P")
outputFilename = os.path.join(outputDirTracks, "hTrackingEfficiency{0}".format(dim) + fileFormat)
c26.SaveAs(outputFilename)
#---------------------------------------------------------------------------------------------------
# eta distribution of hybrid tracks
#---------------------------------------------------------------------------------------------------
c6 = ROOT.TCanvas("c6","c6: Eta",600,450)
c6.cd()
# Project to (Eta, Track type)
if ispp:
hEtaTracktype = trackTHnSparse.Projection(1,3)
else:
hEtaTracktype = trackTHnSparse.Projection(2,4)
hEtaGlobal = hEtaTracktype.ProjectionY("EtaGlobal", 1, 1)
hEtaComplementary = hEtaTracktype.ProjectionY("EtaComplementary", 2, 2)
hEtaGlobal.SetLineColor(2)
hEtaGlobal.SetLineWidth(3)
hEtaGlobal.SetLineStyle(1)
hEtaComplementary.SetLineStyle(1)
hEtaComplementary.SetLineColor(4)
hEtaComplementary.SetLineWidth(3)
hEtaSum = hEtaGlobal.Clone()
hEtaSum.Add(hEtaComplementary)
hEtaSum.SetTitle("hEtaSum")
hEtaSum.SetName("hEtaSum")
hEtaSum.SetLineColor(1)
hEtaSum.SetMarkerColor(1)
hEtaSum.SetLineStyle(1)
hEtaGlobal.Scale(1./nEvents)
hEtaComplementary.Scale(1./nEvents)
hEtaSum.Scale(1./nEvents)
hEtaGlobal.SetTitle("#eta Distribution of Hybrid Tracks")
hEtaGlobal.GetYaxis().SetTitle("#frac{1}{N_{evts}} #frac{dN}{d#eta}")
hEtaGlobal.GetYaxis().SetTitleSize(0.06)
hEtaGlobal.GetXaxis().SetTitleSize(0.06)
hEtaGlobal.GetXaxis().SetTitleOffset(0.7)
hEtaGlobal.GetYaxis().SetRangeUser(0,20.)
if ispp:
hEtaGlobal.GetYaxis().SetRangeUser(0,0.2)
if isMC:
hEtaGlobal.GetYaxis().SetRangeUser(0,0.3)
ROOT.gPad.SetLeftMargin(0.15)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.13)
ROOT.gPad.SetTopMargin(0.05)
hEtaGlobal.Draw("hist")
hEtaComplementary.Draw("hist same")
hEtaSum.Draw("hist same")
leg1 = ROOT.TLegend(0.17,0.7,0.83,0.93,"Hybrid tracks")
leg1.SetFillColor(10)
leg1.SetBorderSize(0)
leg1.SetFillStyle(0)
leg1.SetTextSize(0.04)
leg1.AddEntry(hEtaGlobal, "w/ SPD & ITSrefit", "l")
leg1.AddEntry(hEtaComplementary, "w/o SPD & w/ ITSrefit", "l")
leg1.AddEntry(hEtaSum, "sum", "l")
leg1.Draw("same")
textNEvents = ROOT.TLatex()
textNEvents.SetNDC()
textNEvents.DrawLatex(0.65,0.87,"#it{N}_{events} = %d" % nEvents)
outputFilename = os.path.join(outputDirTracks, "hTrackEta" + fileFormat)
c6.SaveAs(outputFilename)
# Also plot the TH2 eta vs. pT -- make sure that eta is uniform at all pT
# Project to (Pt, Eta)
if ispp:
hEtaPtSum = trackTHnSparse.Projection(1,0)
else:
hEtaPtSum = trackTHnSparse.Projection(2,1)
hEtaPtSum.Scale(1.,"width")
hEtaPtSum.GetZaxis().SetRangeUser(1e-7, 3e5)
outputFilename = os.path.join(outputDirTracks, "hTrackEtaPt" + fileFormat)
plotHist(hEtaPtSum, outputFilename, "colz", False, True)
#---------------------------------------------------------------------------------------------------
# eta-phi distribution of hybrid tracks
#---------------------------------------------------------------------------------------------------
# Project to (Eta, Phi)
if ispp:
hEtaPhiSum = trackTHnSparse.Projection(1,2)
else:
hEtaPhiSum = trackTHnSparse.Projection(2,3)
hEtaPhiSum.SetName("hEtaPhiSum")
outputFilename = os.path.join(outputDirTracks, "hTrackEtaPhi" + fileFormat)
plotHist(hEtaPhiSum, outputFilename, "colz")
# And plot the eta-phi distribution for high-pT tracks
ROOT.gStyle.SetOptTitle(1)
if ispp:
trackTHnSparse.GetAxis(0).SetRangeUser(10,150)
hTrackEtaPhiHighPt = trackTHnSparse.Projection(1,2)
else:
trackTHnSparse.GetAxis(1).SetRangeUser(10,150)
hTrackEtaPhiHighPt = trackTHnSparse.Projection(2,3)
hTrackEtaPhiHighPt.SetTitle("Track Occupancy, p_{T} > 10 GeV")
outputFilename = os.path.join(outputDirTracks, "hTrackEtaPhiHighPt" + fileFormat)
plotHist(hTrackEtaPhiHighPt, outputFilename, "colz")
if ispp:
trackTHnSparse.GetAxis(0).SetRangeUser(0,150)
else:
trackTHnSparse.GetAxis(1).SetRangeUser(0,150)
ROOT.gStyle.SetOptTitle(0)
########################################################################################################
# Plot cluster histograms ##############################################################################
########################################################################################################
def plotCaloQA(ispp, isRun2, includePhos, clusterQAList, cellQAList, nEvents, outputDir, clusterQAListRef, cellQAListRef, nEventsRef, fileFormat):
# Create subdirectory for Cells, Clusters
outputDirCells = outputDir + "Cells/"
if not os.path.exists(outputDirCells):
os.makedirs(outputDirCells)
outputDirClusters = outputDir + "Clusters/"
if not os.path.exists(outputDirClusters):
os.makedirs(outputDirClusters)
clusterTHnSparse = clusterQAList.FindObject("clusterObservables")
# (Centrality, E_clus, eta, phi, clusterType)
if clusterQAListRef:
clusterTHnSparseRef = clusterQAListRef.FindObject("clusterObservables")
# Plot Eta-Phi of ALL CLUSTERS -----------------------------------------------------
# Project to (Eta, Phi)
if ispp:
hClusPhiEta = clusterTHnSparse.Projection(2,1)
else:
hClusPhiEta = clusterTHnSparse.Projection(3,2)
hClusPhiEta.SetName("clusterEMCalObservables_proj_eta_phi")
outputFilename = os.path.join(outputDirClusters, "hClusPhiEta" + fileFormat)
hClusPhiEta.GetXaxis().SetRangeUser(-1.5,0.8)#ELIANE -0.8,0.8
hClusPhiEta.GetYaxis().SetRangeUser(1.2,5.8)
plotHist(hClusPhiEta, outputFilename, "colz")
# Plot ratio to reference run, if supplied
if clusterQAListRef:
if ispp:
hClusPhiEtaRef = clusterTHnSparseRef.Projection(2,1)
else:
hClusPhiEtaRef = clusterTHnSparseRef.Projection(3,2)
hClusPhiEta.Scale(1./nEvents)
hClusPhiEtaRef.Scale(1./nEventsRef)
hClusPhiEtaRatio = hClusPhiEta.Clone()
hClusPhiEtaRatio.Divide(hClusPhiEtaRef)
ROOT.gStyle.SetOptTitle(1)
hClusPhiEtaRatio.SetTitle("Cluster Occupancy (per event): Current Run / All Runs")
outputFilename = os.path.join(outputDirClusters, "hClusPhiEtaRatio" + fileFormat)
plotHist(hClusPhiEtaRatio, outputFilename, "colz", False, True)
ROOT.gStyle.SetOptTitle(0)
# Plot EMCAL CLUSTERS --------------------------------------------------------------
# Project to (Energy, Eta, Phi, EMCal Cluster type)
if ispp:
clusterTHnSparse.GetAxis(3).SetRange(1,1)
hClusEMCalEta = clusterTHnSparse.Projection(1)
hClusEMCalPhi = clusterTHnSparse.Projection(2)
hClusEMCalEnergy = clusterTHnSparse.Projection(0)
else:
clusterTHnSparse.GetAxis(4).SetRange(1,1)
hClusEMCalEta = clusterTHnSparse.Projection(2)
hClusEMCalPhi = clusterTHnSparse.Projection(3)
hClusEMCalEnergy = clusterTHnSparse.Projection(1)
hClusEMCalEta.SetName("ClusEtaEmcal")
hClusEMCalPhi.SetName("ClusPhiEmcal")
hClusEMCalEnergy.SetName("ClusEnergyEmcal")
# Plot phi distribution
outputFilename = os.path.join(outputDirClusters, "hClusEMCalPhi" + fileFormat)
plotHist(hClusEMCalPhi, outputFilename, "hist E")
# Plot eta distribution
outputFilename = os.path.join(outputDirClusters, "hClusEMCalEta" + fileFormat)
plotHist(hClusEMCalEta, outputFilename, "hist E")
# Plot energy distribution
hClusEMCalEnergy.SetName("hClusEMCalEnergy")
hClusEMCalEnergyRef = ""
if clusterQAListRef:
if ispp:
clusterTHnSparseRef.GetAxis(3).SetRange(1,1)
hClusEMCalEnergyRef = clusterTHnSparseRef.Projection(0)
else:
clusterTHnSparseRef.GetAxis(4).SetRange(1,1)
hClusEMCalEnergyRef = clusterTHnSparseRef.Projection(1)
hClusEMCalEnergyRef.SetName("clusterEMCalObservablesRef_proj_energy")
outputFilename = os.path.join(outputDirClusters, "hClusEMCalEnergy" + fileFormat)
xRangeMax = 100
if ispp:
xRangeMax = 80
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "EMCal Clusters"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hClusEMCalEnergy, hClusEMCalEnergyRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width")
# Plot DCAL CLUSTERS (if isRun2) ----------------------------------------------------
if isRun2:
# Project to (Energy, Eta, Phi, DCal Cluster type)
if ispp:
clusterTHnSparse.GetAxis(3).SetRange(2,2)
hClusDCalEta = clusterTHnSparse.Projection(1)
hClusDCalPhi = clusterTHnSparse.Projection(2)
hClusDCalEnergy = clusterTHnSparse.Projection(0)
else:
clusterTHnSparse.GetAxis(4).SetRange(2,2)
hClusDCalEta = clusterTHnSparse.Projection(2)
hClusDCalPhi = clusterTHnSparse.Projection(3)
hClusDCalEnergy = clusterTHnSparse.Projection(1)
hClusDCalEta.SetName("ClusEtaDcal")
hClusDCalPhi.SetName("ClusPhiDcal")
hClusDCalEnergy.SetName("ClusEnergyDcal")
# Plot phi distribution
outputFilename = os.path.join(outputDirClusters, "hClusDCalPhi" + fileFormat)
plotHist(hClusDCalPhi, outputFilename, "hist E")
# Plot eta distribution
outputFilename = os.path.join(outputDirClusters, "hClusDCalEta" + fileFormat)
plotHist(hClusDCalEta, outputFilename, "hist E")
# Plot energy distribution
hClusDCalEnergy.SetName("hClusDCalEnergy")
hClusDCalEnergyRef = ""
if clusterQAListRef:
if ispp:
clusterTHnSparseRef.GetAxis(3).SetRange(2,2)
hClusDCalEnergyRef = clusterTHnSparseRef.Projection(0)
else:
clusterTHnSparseRef.GetAxis(4).SetRange(2,2)
hClusDCalEnergyRef = clusterTHnSparseRef.Projection(1)
hClusDCalEnergyRef.SetName("clusterDCalObservablesRef_proj_energy")
outputFilename = os.path.join(outputDirClusters, "hClusDCalEnergy" + fileFormat)
xRangeMax = 100
if ispp:
xRangeMax = 50
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "DCal Clusters"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hClusDCalEnergy, hClusDCalEnergyRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width")
# Plot PHOS CLUSTERS (if includePhos) -----------------------------------------------
if includePhos:
# Project to (Energy, Eta, Phi, PHOS Cluster type)
if ispp:
clusterTHnSparse.GetAxis(3).SetRange(3,3)
hClusPHOSEta = clusterTHnSparse.Projection(1)
hClusPHOSPhi = clusterTHnSparse.Projection(2)
hClusPHOSEnergy = clusterTHnSparse.Projection(0)
else:
clusterTHnSparse.GetAxis(4).SetRange(3,3)
hClusPHOSEta = clusterTHnSparse.Projection(2)
hClusPHOSPhi = clusterTHnSparse.Projection(3)
hClusPHOSEnergy = clusterTHnSparse.Projection(1)
hClusPHOSEta.SetName("ClusEtaPHOS")
hClusPHOSPhi.SetName("ClusPhiPHOS")
hClusPHOSEnergy.SetName("ClusEnergyPHOS")
# Plot phi distribution
outputFilename = os.path.join(outputDirClusters, "hClusPHOSPhi" + fileFormat)
plotHist(hClusPHOSPhi, outputFilename, "hist E")
# Plot eta distribution
outputFilename = os.path.join(outputDirClusters, "hClusPHOSEta" + fileFormat)
plotHist(hClusPHOSEta, outputFilename, "hist E")
# Plot energy distribution
hClusPHOSEnergy.SetName("hClusPHOSEnergy")
hClusPHOSEnergyRef = ""
if clusterQAListRef:
if ispp:
clusterTHnSparseRef.GetAxis(3).SetRange(3,3)
hClusPHOSEnergyRef = clusterTHnSparseRef.Projection(0)
else:
clusterTHnSparseRef.GetAxis(4).SetRange(3,3)
hClusPHOSEnergyRef = clusterTHnSparseRef.Projection(1)
hClusPHOSEnergyRef.SetName("clusterPHOSObservablesRef_proj_energy")
outputFilename = os.path.join(outputDirClusters, "hClusPHOSEnergy" + fileFormat)
xRangeMax = 100
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "PHOS Clusters"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hClusPHOSEnergy, hClusPHOSEnergyRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width")
# Plot the ratio of cluster spectra in EMCal/DCal/PHOS
if isRun2 and includePhos:
outputFilename = os.path.join(outputDirClusters, "hClusEnergyRatio" + fileFormat)
xRangeMax = 250
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "Calo clusters"
legendRunLabel = "EMCal clusters"
legendRefLabel = "PHOS clusters"
ratioYAxisTitle = "Ratio to PHOS"
h2LegendLabel = "DCal clusters"
# Note: the spectra already have been scaled by nEvents, bin width
plotSpectra(hClusEMCalEnergy, hClusPHOSEnergy, hClusDCalEnergy, "", 1., 1., ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", h2LegendLabel)
# Plot also the ratio of DCal to EMCal
if isRun2:
outputFilename = os.path.join(outputDirClusters, "hClusEnergyRatioEMC" + fileFormat)
xRangeMax = 250
if ispp:
xRangeMax = 80
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "Calo clusters"
legendRunLabel = "DCal clusters"
legendRefLabel = "EMCal clusters"
ratioYAxisTitle = "DCal / EMCal"
# Note: the spectra already have been scaled by nEvents, bin width
plotSpectra(hClusDCalEnergy, hClusEMCalEnergy, "", "", 1., 1., ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename)
# Plot some PHOS QA plots
if includePhos:
# Plot also PHOS SM spectra
SMlist = clusterQAList.FindObject("BySM")
c2 = ROOT.TCanvas("c2","c2: hist",600,450)
c2.cd()
c2.SetLogy()
leg = ROOT.TLegend(0.3,0.6,0.88,0.83,"PHOS SM")
leg.SetFillColor(10)
leg.SetBorderSize(0)
leg.SetFillStyle(0)
leg.SetTextSize(0.04)
for sm in range(1,5):
hSM = SMlist.FindObject("hPhosClusEnergy_SM" + str(sm))
hSM.SetLineColor(sm)
hSM.SetLineStyle(1)
hSM.GetXaxis().SetRangeUser(0,100)
if sm is 1:
hSM.Draw("hist E")
else:
hSM.Draw("hist E same")
leg.AddEntry(hSM, "SM " + str(sm), "l")
leg.Draw("same")
outputFilename = os.path.join(outputDirClusters, "hClusPHOSEnergyBySM" + fileFormat)
c2.SaveAs(outputFilename)
c2.Close()
# Plot some PHOS QA plots
if includePhos:
hPhosNCellsVsEnergy = clusterQAList.FindObject("hPhosNCellsVsEnergy")
outputFilename = os.path.join(outputDirClusters, "hClusPHOSNCellsVsEnergy" + fileFormat)
plotHist(hPhosNCellsVsEnergy, outputFilename, "colz", True, True)
hPhosM02VsEnergy = clusterQAList.FindObject("hPhosM02VsEnergy")
outputFilename = os.path.join(outputDirClusters, "hClusPHOSM02VsEnergy" + fileFormat)
plotHist(hPhosM02VsEnergy, outputFilename, "colz", True, True)
hPhosCellIdVsEnergy = clusterQAList.FindObject("hPhosCellIdVsEnergy")
outputFilename = os.path.join(outputDirClusters, "hClusPHOSCellIdVsEnergy" + fileFormat)
plotHist(hPhosCellIdVsEnergy, outputFilename, "colz", True, True)
# Plot EMCAL CELLS --------------------------------------------------------------
hCellEnergy = cellQAList.FindObject("fHistCellEnergy")
outputFilename = os.path.join(outputDirCells, "hCellEnergy" + fileFormat)
plotHist(hCellEnergy, outputFilename, "hist E", True)
profCellAbsIdEnergy = cellQAList.FindObject("fProfCellAbsIdEnergy")
outputFilename = os.path.join(outputDirCells, "profCellAbsIdEnergy" + fileFormat)
plotHist(profCellAbsIdEnergy, outputFilename)
hCellTime = cellQAList.FindObject("fHistCellTime")
outputFilename = os.path.join(outputDirCells, "hCellTime" + fileFormat)
plotHist(hCellTime, outputFilename, "hist E")
profCellAbsIdTime = cellQAList.FindObject("fProfCellAbsIdTime")
outputFilename = os.path.join(outputDirCells, "profCellAbsIdTime" + fileFormat)
profCellAbsIdTime.GetYaxis().SetRangeUser(-0.2e-6,0.2e-6)
plotHist(profCellAbsIdTime, outputFilename)
# Plot the CELL energy spectrum with and without timing cuts
hCellEnergyTall = cellQAList.FindObject("fHistCellEvsTime")
hCellEnergyTall = hCellEnergyTall.ProjectionY()
hCellEnergyTall.SetName("cell_Allproj_energy")
hCellEnergyTall.GetXaxis().SetTitle("E_{Cell} [GeV]")
outputFilename = os.path.join(outputDirCells, "hCellEnergyTall" + fileFormat)
plotHist(hCellEnergyTall, outputFilename, "hist E", True)
hCellEnergyTsel = cellQAList.FindObject("fHistCellEvsTime")
hCellEnergyTsel.GetXaxis().SetRangeUser(-50e-9,50e-9) #recomended time cut
hCellEnergyTsel = hCellEnergyTsel.ProjectionY()
hCellEnergyTsel.SetName("cell_Selproj_energy")
hCellEnergyTsel.GetXaxis().SetTitle("E_{Cell} |t_{cell}|<50ns [GeV]")
outputFilename = os.path.join(outputDirCells, "hCellEnergyTsel" + fileFormat)
plotHist(hCellEnergyTsel, outputFilename, "hist E", True)
#refernce histograms
if cellQAListRef:
hCellEnergyTallRef = cellQAListRef.FindObject("fHistCellEvsTime")
hCellEnergyTallRef = hCellEnergyTallRef.ProjectionY()
hCellEnergyTallRef.SetName("cellRef_Allproj_energy")
hCellEnergyTselRef = cellQAListRef.FindObject("fHistCellEvsTime")
hCellEnergyTselRef.GetXaxis().SetRangeUser(-50e-9,50e-9)
hCellEnergyTselRef = hCellEnergyTselRef.ProjectionY()
hCellEnergyTselRef.SetName("cellRef_Selproj_energy")
xRangeMax = 100
if ispp:
xRangeMax = 80
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{Cell}} [GeV^{-1}]"
legendTitle = "EMCal Cells"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
outputFilename = os.path.join(outputDirCells, "hCellEnergyTallRatio" + fileFormat)
plotSpectra(hCellEnergyTall, hCellEnergyTallRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width")
outputFilename = os.path.join(outputDirCells, "hCellEnergyTselRatio" + fileFormat)
plotSpectra(hCellEnergyTsel, hCellEnergyTselRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width")
########################################################################################################
# Plot charged jet histograms #######################################################################
########################################################################################################
def plotChargedJetQA(ispp, isPtHard, chargedJetList, outputDir, chargedJetListRef, nEvents, nEventsRef, fileFormat):
# Create subdirectory for Jets
outputDirJets = outputDir + "Jets/"
if not os.path.exists(outputDirJets):
os.makedirs(outputDirJets)
chargedJetTHnSparse = chargedJetList.FindObject("fHistJetObservables")
# (Centrality, eta, phi, pT, pTcorr, pT leading particle)
if chargedJetListRef:
chargedJetTHnSparseRef = chargedJetListRef.FindObject("fHistJetObservables")
ROOT.gStyle.SetOptTitle(1)
if not ispp:
# Plot charged jet rho vs. centrality
hChargedJetRhoVsCent = chargedJetList.FindObject("fHistRhoVsCent")
hChargedJetRhoVsCent.SetTitle("Rho vs. Centrality, Charged Jets")
outputFilename = os.path.join(outputDirJets, "hChargedJetRhoVsCent" + fileFormat)
plotHist(hChargedJetRhoVsCent, outputFilename, "colz", False, True)
# Plot charged jet eta-phi, for jet pT > threshold
# there are ceil(250/3)=84 jet pt bins
# (5,84) means (~12 GeV < jet pT < 250 GeV)
# (11,84) means (~30 GeV < jet pT < 250 GeV)
minJetPtBin = 5
maxJetPtBin = 84
if ispp:
chargedJetTHnSparse.GetAxis(2).SetRange(minJetPtBin, maxJetPtBin)
else:
minJetPtBin = 11
chargedJetTHnSparse.GetAxis(3).SetRange(minJetPtBin, maxJetPtBin)
if ispp:
hChargedJetEtaPhi = chargedJetTHnSparse.Projection(1,0)
else:
hChargedJetEtaPhi = chargedJetTHnSparse.Projection(2,1)
hChargedJetEtaPhi.SetName("ChargedJetEtaPhi")
hChargedJetEtaPhi.SetTitle("Charged Jet Occupancy, p_{T,jet} > " + str((minJetPtBin-1)*3) + " GeV")
hChargedJetEtaPhi.GetXaxis().SetRangeUser(-0.8,0.8)
outputFilename = os.path.join(outputDirJets, "hChargedJetEtaPhi" + fileFormat)
plotHist(hChargedJetEtaPhi, outputFilename, "colz", False)
# Plot ratio to reference run, if supplied
if chargedJetListRef:
if ispp:
chargedJetTHnSparseRef.GetAxis(2).SetRange(minJetPtBin, maxJetPtBin)
hChargedJetEtaPhiRef = chargedJetTHnSparseRef.Projection(1,0)
else:
chargedJetTHnSparseRef.GetAxis(3).SetRange(minJetPtBin, maxJetPtBin)
hChargedJetEtaPhiRef = chargedJetTHnSparseRef.Projection(2,1)
hChargedJetEtaPhiRef.SetName("ChargedJetEtaPhiRef")
hChargedJetEtaPhi.Scale(1./nEvents)
hChargedJetEtaPhiRef.Scale(1./nEventsRef)
hChargedJetEtaPhiRatio = hChargedJetEtaPhi.Clone()
hChargedJetEtaPhiRatio.Divide(hChargedJetEtaPhiRef)
hChargedJetEtaPhiRatio.SetTitle("Charged Jet p_{T,jet} > " + str((minJetPtBin-1)*3) + " GeV Occupancy (per event): Current Run / All Runs")
outputFilename = os.path.join(outputDirJets, "hChargedJetEtaPhiRatio" + fileFormat)
plotHist(hChargedJetEtaPhiRatio, outputFilename, "colz", False, True)
if ispp:
chargedJetTHnSparseRef.GetAxis(2).SetRange(1, maxJetPtBin)
else:
chargedJetTHnSparseRef.GetAxis(3).SetRange(1, maxJetPtBin)
if ispp:
chargedJetTHnSparse.GetAxis(2).SetRange(1, maxJetPtBin)
else:
chargedJetTHnSparse.GetAxis(3).SetRange(1, maxJetPtBin)
# Plot charged jet pT
if ispp:
hChargedJetPt = chargedJetTHnSparse.Projection(2)
else:
hChargedJetPt = chargedJetTHnSparse.Projection(3)
hChargedJetPt.SetName("hChargedJetPt")
hChargedJetPtRef = ""
if chargedJetListRef:
if ispp:
hChargedJetPtRef = chargedJetTHnSparseRef.Projection(2)
else:
hChargedJetPtRef = chargedJetTHnSparseRef.Projection(3)
hChargedJetPtRef.SetName("hChargedJetPt")
outputFilename = os.path.join(outputDirJets, "hChargedJetPt" + fileFormat)
xRangeMax = 250
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Charged jets"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hChargedJetPt, hChargedJetPtRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename)
# Plot charged jet pT leading particle vs. jet pT
if ispp:
hChargedJetPtLeadjetPt = chargedJetTHnSparse.Projection(3,2)
else:
hChargedJetPtLeadjetPt = chargedJetTHnSparse.Projection(5,3)
hChargedJetPtLeadjetPt.SetName("fHistChJetObservables_proj_pt_leadpt")
hChargedJetPtLeadjetPt.SetTitle("Leading pT vs. Jet pT, Charged Jets")
outputFilename = os.path.join(outputDirJets, "hChargedJetPtLeadjetPt" + fileFormat)
if isPtHard:
yMin= hChargedJetPt.GetBinContent(hChargedJetPt.FindBin(200)) #find entry in bin at 200 GeV to get the right y-Axis scale
yMax= hChargedJetPt.GetBinContent(hChargedJetPt.GetMaximumBin()) #find entry in bin at 200 GeV to get the right y-Axis scale
hChargedJetPt.GetYaxis().SetRangeUser(yMin,yMax*1.1)
plotHist(hChargedJetPtLeadjetPt, outputFilename, "colz", "", True)
else:
plotHist(hChargedJetPtLeadjetPt, outputFilename, "colz", "", True)
ROOT.gStyle.SetOptTitle(0)
# Plot charged jet pT, background-subtracted
if not ispp:
hChargedJetPtCorr = chargedJetTHnSparse.Projection(4)
hChargedJetPtCorr.SetName("hChargedJetPtCorr")
hChargedJetPtCorrRef = ""
if chargedJetListRef:
hChargedJetPtCorrRef = chargedJetTHnSparseRef.Projection(4)
hChargedJetPtCorrRef.SetName("hChargedJetPtCorr")
outputFilename = os.path.join(outputDirJets, "hChargedJetPtCorr" + fileFormat)
xRangeMax = 150
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Charged jets, background subtracted"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hChargedJetPtCorr, hChargedJetPtCorrRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename)
# Plot charged jet pT, background-subtracted, by centrality
chargedJetTHnSparse.GetAxis(0).SetRange(1, 1)
hChargedJetPtCorr010 = chargedJetTHnSparse.Projection(4)
hChargedJetPtCorr010.SetName("hChargedJetPtCorr010")
chargedJetTHnSparse.GetAxis(0).SetRange(2, 2)
hChargedJetPtCorr1030 = chargedJetTHnSparse.Projection(4)
hChargedJetPtCorr1030.SetName("hChargedJetPtCorr1030")
chargedJetTHnSparse.GetAxis(0).SetRange(3, 3)
hChargedJetPtCorr3050 = chargedJetTHnSparse.Projection(4)
hChargedJetPtCorr3050.SetName("hChargedJetPtCorr3050")
chargedJetTHnSparse.GetAxis(0).SetRange(4, 4)
hChargedJetPtCorr5090 = chargedJetTHnSparse.Projection(4)
hChargedJetPtCorr5090.SetName("hChargedJetPtCorr5090")
outputFilename = os.path.join(outputDirJets, "hChargedJetPtCorrCentral" + fileFormat)
xRangeMax = 150
yAxisTitle = "#frac{1}{N_{evts}N_{coll}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Charged jets, background subtracted"
legendRunLabel = "0-10%"
legendRefLabel = "50-90%"
DCalLegendLabel = "10-30%"
PHOSLegendLabel = "30-50%"
ratioYAxisTitle = "R_{CP}"
# Scale by Ncoll, to compare different centralities
# Values taken from https://twiki.cern.ch/twiki/bin/view/ALICE/CentralityCodeSnippets
Ncoll010 = 1636.
Ncoll1030 = 801.
Ncoll3050 = 264.
Ncoll5090 = 38.1
Ncoll090 = 435.3
hChargedJetPtCorr010.Scale(4.) # Scale by number of events in 0-10% relative to 50-90%
hChargedJetPtCorr1030.Scale(Ncoll010/Ncoll1030 * 2.)
hChargedJetPtCorr3050.Scale(Ncoll010/Ncoll3050 * 2.)
hChargedJetPtCorr5090.Scale(Ncoll010/Ncoll5090)
plotSpectra(hChargedJetPtCorr010, hChargedJetPtCorr5090, 0, 0, nEvents, nEvents, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", DCalLegendLabel, PHOSLegendLabel)
chargedJetTHnSparse.GetAxis(0).SetRange(1,4)
########################################################################################################
# Plot full jet histograms ##############################################################################
########################################################################################################
def plotFullJetQA(ispp, isPtHard, isRun2, includePhos, fullJetList, outputDir, fullJetListRef, nEvents, nEventsRef, fileFormat):
# Create subdirectory for Jets
outputDirJets = outputDir + "Jets/"
if not os.path.exists(outputDirJets):
os.makedirs(outputDirJets)
fullJetTHnSparse = fullJetList.FindObject("fHistJetObservables")
# (Centrality, eta, phi, pT, pTcorr, pT leading particle)
if fullJetListRef:
fullJetTHnSparseRef = fullJetListRef.FindObject("fHistJetObservables")
ROOT.gStyle.SetOptTitle(1)
if not ispp:
# Plot full jet rho vs. centrality
hFullJetRhoVsCent = fullJetList.FindObject("fHistRhoVsCent")
hFullJetRhoVsCent.SetTitle("Rho vs. Centrality, Full Jets")
outputFilename = os.path.join(outputDirJets, "hFullJetRhoVsCent" + fileFormat)
plotHist(hFullJetRhoVsCent, outputFilename, "colz", False, True)
# Plot Neutral Energy Fraction
hFullJetNEF = fullJetList.FindObject("hNEFVsPtEMC")
if not ispp:
if hFullJetNEF:
hFullJetNEF = hNEFVsPtCorrVsCentEMCal.Project3D("zy")
else:
print("hFullJetNEF not saved for PbPb in this version")
hFullJetNEF = hFullJetNEF.ProjectionY()
hFullJetNEFDCal = fullJetList.FindObject("hNEFVsPtDCal")
hFullJetNEFDCal = hFullJetNEFDCal.ProjectionY()
hFullJetNEF.SetTitle("NEF vs. p_{T,jet}, Full Jets")
outputFilename = os.path.join(outputDirJets, "hFullJetNEF" + fileFormat)
# plotHist(hFullJetNEF, outputFilename, "colz", True, False)
plotNEFSpectra(hFullJetNEF,hFullJetNEFDCal, 0,nEvents, ispp, 1, "1/N_{Evt} dN/dNEF", "EMCal", outputFilename,"", "DCal")
if ispp:
# Plot Delta HadCorr vs pT
hFullJetDeltaHcorr = fullJetList.FindObject("hDeltaEHadCorr")
hFullJetDeltaHcorr.GetXaxis().SetRangeUser(0., 150.)
hFullJetDeltaHcorr.SetTitle("#Delta E vs. p_{T,jet}, Full Jets")
#outputFilename = os.path.join(outputDirJets, "hFullJetDeltaHcorr" + fileFormat)
#plotHist(hFullJetDeltaHcorr, outputFilename, "colz", False, True)
hFullJetDeltaHcorr.SetTitle("<#DeltaE> vs. p_{T,jet}, Full Jets")
hDeltaEHadCorrProf = hFullJetDeltaHcorr.ProfileX()
hDeltaEHadCorrProf.GetYaxis().SetRangeUser(0.08, 15.)
hDeltaEHadCorrProf.SetLineColor(1)
hDeltaEHadCorrProf.SetMarkerStyle(20)
hDeltaEHadCorrProf.GetYaxis().SetTitleOffset(1.2)
hDeltaEHadCorrProf.GetYaxis().SetTitle("< #sum#it{E}_{nonlincorr} - #it{E}_{hadcorr} >")
outputFilename = os.path.join(outputDirJets, "hDeltaEHadCorrProf" + fileFormat)
plotHist(hDeltaEHadCorrProf, outputFilename, "E", True, False)
else:
print("hFullJetDeltaHcorr not saved for PbPb yet") #need to project the TH3 down to 2D
# Plot full jet eta-phi, for jet pT > threshold
# there are ceil(250/3)=84 jet pt bins
# (5,84) means (~12 GeV < jet pT < 250 GeV)
# (11,84) means (~30 GeV < jet pT < 250 GeV)
minJetPtBin = 5
maxJetPtBin = 84
if ispp:
fullJetTHnSparse.GetAxis(2).SetRange(minJetPtBin, maxJetPtBin)
else:
minJetPtBin = 11
fullJetTHnSparse.GetAxis(3).SetRange(minJetPtBin, maxJetPtBin)
# Plot full jet eta-phi
if ispp:
hFullJetEtaPhi = fullJetTHnSparse.Projection(1,0)
else:
hFullJetEtaPhi = fullJetTHnSparse.Projection(2,1)
hFullJetEtaPhi.SetName("FullJetEtaPhi")
hFullJetEtaPhi.SetTitle("Full Jet Occupancy, p_{T,jet} > " + str((minJetPtBin-1)*3) + " GeV")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhi" + fileFormat)
hFullJetEtaPhi.GetXaxis().SetRangeUser(-0.8,0.8)
hFullJetEtaPhi.GetYaxis().SetRangeUser(1.2,5.8)
plotHist(hFullJetEtaPhi, outputFilename, "colz", False)
# Plot ratio to reference run, if supplied
if fullJetListRef:
if ispp:
fullJetTHnSparseRef.GetAxis(2).SetRange(minJetPtBin, maxJetPtBin)
hFullJetEtaPhiRef = fullJetTHnSparseRef.Projection(1,0)
else:
fullJetTHnSparseRef.GetAxis(3).SetRange(minJetPtBin, maxJetPtBin)
hFullJetEtaPhiRef = fullJetTHnSparseRef.Projection(2,1)
hFullJetEtaPhiRef.SetName("FullJetEtaPhiRef")
hFullJetEtaPhi.Scale(1./nEvents)
hFullJetEtaPhiRef.Scale(1./nEventsRef)
hFullJetEtaPhiRatio = hFullJetEtaPhi.Clone()
hFullJetEtaPhiRatio.Divide(hFullJetEtaPhiRef)
hFullJetEtaPhiRatio.SetTitle("Full Jet p_{T,jet} > " + str((minJetPtBin-1)*3) + " GeV Occupancy (per event): Current Run / All Runs")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhiRatio" + fileFormat)
plotHist(hFullJetEtaPhiRatio, outputFilename, "colz", False)
if ispp:
fullJetTHnSparseRef.GetAxis(2).SetRange(1, maxJetPtBin)
else:
fullJetTHnSparseRef.GetAxis(3).SetRange(1, maxJetPtBin)
if ispp:
fullJetTHnSparse.GetAxis(2).SetRange(1, maxJetPtBin)
else:
fullJetTHnSparse.GetAxis(3).SetRange(1, maxJetPtBin)
ROOT.gStyle.SetOptTitle(0)
# Plot full jet pT
if ispp:
hFullJetPt = fullJetTHnSparse.Projection(2)
else:
hFullJetPt = fullJetTHnSparse.Projection(3)
hFullJetPt.SetName("hFullJetPt")
hFullJetPtRef = ""
if fullJetListRef:
if ispp:
hFullJetPtRef = fullJetTHnSparseRef.Projection(2)
else:
hFullJetPtRef = fullJetTHnSparseRef.Projection(3)
hFullJetPtRef.SetName("hFullJetPt")
outputFilename = os.path.join(outputDirJets, "hFullJetPt" + fileFormat)
xRangeMax = 250
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Full jets"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hFullJetPt, hFullJetPtRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename)
# Plot full jet pT leading particle vs. jet pT
if ispp:
hFullJetPtLeadjetPt = fullJetTHnSparse.Projection(3,2)
else:
hFullJetPtLeadjetPt = fullJetTHnSparse.Projection(5,3)
hFullJetPtLeadjetPt.SetName("fHistFuJetObservables_proj_pt_leadpt")
hFullJetPtLeadjetPt.SetTitle("Leading pT vs. Jet pT, Full Jets")
outputFilename = os.path.join(outputDirJets, "hFullJetPtLeadjetPt" + fileFormat)
if ispp:
hFullJetPtLeadjetPt.GetXaxis().SetRangeUser(0,200)
hFullJetPtLeadjetPt.GetYaxis().SetRangeUser(0,100)
if isPtHard:
yMin = hFullJetPt.GetBinContent(hFullJetPt.FindBin(200)) #find entry in bin at 200 GeV to get the right y-Axis scale
maxBin= hFullJetPt.GetBinContent(hFullJetPt.GetMaximumBin())
hFullJetPt.SetMinimum(yMin);
hFullJetPt.SetMaximum(maxBin*1.1);
plotHist(hFullJetPtLeadjetPt, outputFilename, "colz", "", True)
else:
plotHist(hFullJetPtLeadjetPt, outputFilename, "colz", "", True)
# Plot full jet pT, background subtracted
if not ispp:
hFullJetPtCorr = fullJetTHnSparse.Projection(4)
hFullJetPtCorr.SetName("hFullJetPtCorr")
hFullJetPtCorrRef = ""
if fullJetListRef:
hFullJetPtCorrRef = fullJetTHnSparseRef.Projection(4)
hFullJetPtCorrRef.SetName("hFullJetPtCorrRef")
outputFilename = os.path.join(outputDirJets, "hFullJetPtCorr" + fileFormat)
xRangeMax = 150
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Full jets, background subtracted"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hFullJetPtCorr, hFullJetPtCorrRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename)
# Plot full jet pT, background-subtracted, by centrality
fullJetTHnSparse.GetAxis(0).SetRange(1, 1)
hFullJetPtCorr010 = fullJetTHnSparse.Projection(4)
hFullJetPtCorr010.SetName("hFullJetPtCorr010")
fullJetTHnSparse.GetAxis(0).SetRange(2, 2)
hFullJetPtCorr1030 = fullJetTHnSparse.Projection(4)
hFullJetPtCorr1030.SetName("hFullJetPtCorr1030")
fullJetTHnSparse.GetAxis(0).SetRange(3, 3)
hFullJetPtCorr3050 = fullJetTHnSparse.Projection(4)
hFullJetPtCorr3050.SetName("hFullJetPtCorr3050")
fullJetTHnSparse.GetAxis(0).SetRange(4, 4)
hFullJetPtCorr5090 = fullJetTHnSparse.Projection(4)
hFullJetPtCorr5090.SetName("hFullJetPtCorr5090")
outputFilename = os.path.join(outputDirJets, "hFullJetPtCorrCentral" + fileFormat)
xRangeMax = 150
yAxisTitle = "#propto#frac{1}{N_{evts}N_{coll}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Full jets, background subtracted"
legendRunLabel = "0-10%"
legendRefLabel = "50-90%"
DCalLegendLabel = "10-30%"
PHOSLegendLabel = "30-50%"
ratioYAxisTitle = "R_{CP}"
# Scale by Ncoll, to compare different centralities
# Values taken from https://twiki.cern.ch/twiki/bin/view/ALICE/CentralityCodeSnippets
Ncoll010 = 1636.
Ncoll1030 = 801.
Ncoll3050 = 264.
Ncoll5090 = 38.1
Ncoll090 = 435.3
hFullJetPtCorr010.Scale(4.) # Scale by number of events in 0-10% relative to 50-90%
hFullJetPtCorr1030.Scale(Ncoll010/Ncoll1030 * 2.)
hFullJetPtCorr3050.Scale(Ncoll010/Ncoll3050 * 2.)
hFullJetPtCorr5090.Scale(Ncoll010/Ncoll5090)
plotSpectra(hFullJetPtCorr010, hFullJetPtCorr5090, 0, 0, nEvents, nEvents, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", DCalLegendLabel, PHOSLegendLabel)
fullJetTHnSparse.GetAxis(0).SetRange(1,4)
# Plot full jet pT spectra separately for EMCal, DCal, PHOS jets
if isRun2 and includePhos:
#EMCal jets -- divide from DCal/PHOS by phi cut
if ispp:
phiDivideBin = fullJetTHnSparse.GetAxis(1).FindBin(4.)
fullJetTHnSparse.GetAxis(1).SetRange(0, phiDivideBin)
hFullJetEMCalEtaPhiPt = fullJetTHnSparse.Projection(0,1,2, "o") # "o" keep the original axis range
else:
phiDivideBin = fullJetTHnSparse.GetAxis(2).FindBin(4.)
fullJetTHnSparse.GetAxis(2).SetRange(0, phiDivideBin)
hFullJetEMCalEtaPhiPt = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetEMCalEtaPhiPtCorr = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetEMCalEtaPhiPtCorr.SetName("FullJetEMCalEtaPhiPtCorr");
hFullJetEMCalEtaPhiPt.SetName("FullJetEMCalEtaPhiPt");
hFullJetEMCalEtaPhi = hFullJetEMCalEtaPhiPt.Project3D("yx")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhiEMCal" + fileFormat)
plotHist(hFullJetEMCalEtaPhi, outputFilename, "colz")
hFullJetEMCalPt = hFullJetEMCalEtaPhiPt.Project3D("z")
if not ispp:
hFullJetEMCalPtCorr = hFullJetEMCalEtaPhiPtCorr.Project3D("z")
# DCal jets -- divide from EMCal by phi cut, and divide from PHOS by |eta| > 0.22 (no fiducial cut on inner eta)
if ispp:
etaMinDCalBinNeg = fullJetTHnSparse.GetAxis(0).FindBin(-0.22)
etaMinDCalBinPos = fullJetTHnSparse.GetAxis(0).FindBin(0.22)
fullJetTHnSparse.GetAxis(1).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(0).SetRange(1, etaMinDCalBinNeg)
hFullJetDCalEtaPhiPtNeg = fullJetTHnSparse.Projection(0,1,2, "o")
else:
etaMinDCalBinNeg = fullJetTHnSparse.GetAxis(1).FindBin(-0.22)
etaMinDCalBinPos = fullJetTHnSparse.GetAxis(1).FindBin(0.22)
fullJetTHnSparse.GetAxis(2).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(1).SetRange(1, etaMinDCalBinNeg)
hFullJetDCalEtaPhiPtNeg = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetDCalEtaPhiPtCorrNeg = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetDCalEtaPhiPtCorrNeg.SetName("FullJetDCalEtaPhiPtCorrNeg");
hFullJetDCalEtaPhiPtNeg.SetName("FullJetDCalEtaPhiPtNeg");
if ispp:
fullJetTHnSparse.GetAxis(1).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(0).SetRange(etaMinDCalBinPos, 70)
hFullJetDCalEtaPhiPtPos = fullJetTHnSparse.Projection(0,1,2, "o")
else:
fullJetTHnSparse.GetAxis(2).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(1).SetRange(etaMinDCalBinPos, 70)
hFullJetDCalEtaPhiPtPos = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetDCalEtaPhiPtCorrPos = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetDCalEtaPhiPtCorrPos.SetName("FullJetDCalEtaPhiPtCorrPos");
hFullJetDCalEtaPhiPtPos.SetName("FullJetDCalEtaPhiPtPos");
# Add the TH3s
hFullJetDCalEtaPhiPt = hFullJetDCalEtaPhiPtNeg.Clone()
hFullJetDCalEtaPhiPt.Add(hFullJetDCalEtaPhiPtPos)
if not ispp:
hFullJetDCalEtaPhiPtCorr = hFullJetDCalEtaPhiPtCorrNeg.Clone()
hFullJetDCalEtaPhiPtCorr.Add(hFullJetDCalEtaPhiPtCorrPos)
# Project to TH2 for eta-phi, and TH1 of pT
hFullJetDCalEtaPhi = hFullJetDCalEtaPhiPt.Project3D("yx")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhiDCal" + fileFormat)
plotHist(hFullJetDCalEtaPhi, outputFilename, "colz")
hFullJetDCalPt = hFullJetDCalEtaPhiPt.Project3D("z")
if not ispp:
hFullJetDCalPtCorr = hFullJetDCalEtaPhiPtCorr.Project3D("z")
# Gap jets -- divide from EMCal by phi cut, and divide from PHOS by |eta| > 0.13 and DCal by |eta| < 0.22
if ispp:
etaMinPHOSBin = fullJetTHnSparse.GetAxis(0).FindBin(-0.13)
etaMaxPHOSBin = fullJetTHnSparse.GetAxis(0).FindBin(0.13)
fullJetTHnSparse.GetAxis(1).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(0).SetRange(etaMinDCalBinNeg, etaMinPHOSBin)
hFullJetGapEtaPhiPtNeg = fullJetTHnSparse.Projection(0,1,2, "o")
else:
etaMinPHOSBin = fullJetTHnSparse.GetAxis(1).FindBin(-0.13)
etaMaxPHOSBin = fullJetTHnSparse.GetAxis(1).FindBin(0.13)
fullJetTHnSparse.GetAxis(2).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(1).SetRange(etaMinDCalBinNeg, etaMinPHOSBin)
hFullJetGapEtaPhiPtNeg = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetGapEtaPhiPtCorrNeg = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetGapEtaPhiPtCorrNeg.SetName("FullJetGapEtaPhiPtCorrNeg");
hFullJetGapEtaPhiPtNeg.SetName("FullJetGapEtaPhiPtNeg");
if ispp:
fullJetTHnSparse.GetAxis(1).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(0).SetRange(etaMaxPHOSBin, etaMinDCalBinPos)
hFullJetGapEtaPhiPtPos = fullJetTHnSparse.Projection(0,1,2, "o")
else:
fullJetTHnSparse.GetAxis(2).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(1).SetRange(etaMaxPHOSBin, etaMinDCalBinPos)
hFullJetGapEtaPhiPtPos = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetGapEtaPhiPtCorrPos = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetGapEtaPhiPtCorrPos.SetName("FullJetGapEtaPhiPtCorrPos");
hFullJetGapEtaPhiPtPos.SetName("FullJetGapEtaPhiPtPos");
# Add the TH3s
hFullJetGapEtaPhiPt = hFullJetGapEtaPhiPtNeg.Clone()
hFullJetGapEtaPhiPt.Add(hFullJetGapEtaPhiPtPos)
if not ispp:
hFullJetGapEtaPhiPtCorr = hFullJetGapEtaPhiPtCorrNeg.Clone()
hFullJetGapEtaPhiPtCorr.Add(hFullJetGapEtaPhiPtCorrPos)
# Project to TH2 for eta-phi, and TH1 of pT
hFullJetGapEtaPhi = hFullJetGapEtaPhiPt.Project3D("yx")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhiGap" + fileFormat)
plotHist(hFullJetGapEtaPhi, outputFilename, "colz")
hFullJetGapPt = hFullJetGapEtaPhiPt.Project3D("z")
if not ispp:
hFullJetGapPtCorr = hFullJetGapEtaPhiPtCorr.Project3D("z")
# PHOS jets -- divide from EMCal by phi cut, and divide from DCal by eta < 0.13 (no fiducial cut on inner eta)
# fiducial cut on DCal (kDCALfid) ensures that remaining region is only PHOS
if ispp:
fullJetTHnSparse.GetAxis(1).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(0).SetRange(etaMinPHOSBin, etaMaxPHOSBin)
hFullJetPHOSEtaPhiPt = fullJetTHnSparse.Projection(0,1,2, "o")
else:
fullJetTHnSparse.GetAxis(2).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(1).SetRange(etaMinPHOSBin, etaMaxPHOSBin)
hFullJetPHOSEtaPhiPt = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetPHOSEtaPhiPtCorr = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetPHOSEtaPhiPtCorr.SetName("FullJetPHOSEtaPhiPtCorr");
hFullJetPHOSEtaPhiPt.SetName("FullJetPHOSEtaPhiPt");
hFullJetPHOSEtaPhi = hFullJetPHOSEtaPhiPt.Project3D("yx")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhiPHOS" + fileFormat)
plotHist(hFullJetPHOSEtaPhi, outputFilename, "colz")
hFullJetPtRef = ""
if fullJetListRef:
if ispp:
hFullJetPtRef = fullJetTHnSparseRef.Projection(2)
else:
hFullJetPtRef = fullJetTHnSparseRef.Projection(3)
hFullJetPtCorrRef = fullJetTHnSparseRef.Projection(4)
hFullJetPtCorrRef.SetName("hFullJetPtCorr")
hFullJetPtRef.SetName("hFullJetPt")
hFullJetPHOSPt = hFullJetPHOSEtaPhiPt.Project3D("z")
if not ispp:
hFullJetPHOSPtCorr = hFullJetPHOSEtaPhiPtCorr.Project3D("z")
# Now plot the EMCal/DCal/PHOS jet pT spectra and their ratio to the reference
outputFilename = os.path.join(outputDirJets, "hFullJetPtCalo" + fileFormat)
xRangeMax = 250
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Full jets"
legendRunLabel = "EMCal jets"
if fullJetListRef:
legendRefLabel = "All full jets"
ratioYAxisTitle = "Ratio to all"
DCalLegendLabel = "DCal jets"
PHOSLegendLabel = "PHOS jets"
plotSpectra(hFullJetEMCalPt, hFullJetPtRef, hFullJetDCalPt, hFullJetPHOSPt, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", DCalLegendLabel, PHOSLegendLabel)
else:
legendRefLabel = "PHOS jets"
ratioYAxisTitle = "Ratio to PHOS"
h2LegendLabel = "DCal jets"
plotSpectra(hFullJetEMCalPt, hFullJetPHOSPt, hFullJetDCalPt, "", nEvents, nEvents, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", h2LegendLabel)
# And plot the background subtracted EMCal/DCal/PHOS jet pT spectra and their ratio to the reference
if not ispp:
outputFilename = os.path.join(outputDirJets, "hFullJetPtCorrCalo" + fileFormat)
xRangeMax = 250
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Full jets, background subtracted"
legendRunLabel = "EMCal jets"
if fullJetListRef:
legendRefLabel = "All full jets"
ratioYAxisTitle = "Ratio to all"
DCalLegendLabel = "DCal jets"
PHOSLegendLabel = "PHOS jets"
plotSpectra(hFullJetEMCalPtCorr, hFullJetPtCorrRef, hFullJetDCalPtCorr, hFullJetPHOSPtCorr, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", DCalLegendLabel, PHOSLegendLabel)
else:
legendRefLabel = "PHOS jets"
ratioYAxisTitle = "Ratio to PHOS"
h2LegendLabel = "DCal jets"
plotSpectra(hFullJetEMCalPtCorr, hFullJetPHOSPtCorr, hFullJetDCalPtCorr, "", nEvents, nEvents, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", h2LegendLabel)
########################################################################################################
# Plot event histograms ##############################################################################
########################################################################################################
def plotEventQA(ispp, isRun2, includePhos, qaList, outputDir, fileFormat):
histNEvent = qaList.FindObject("fHistEventCount")
nEvents = histNEvent.GetBinContent(1)
#print("N events: %d" % nEvents)
# Create subdirectory for EventQA
outputDirEventQA = outputDir + "EventQA/"
if not os.path.exists(outputDirEventQA):
os.makedirs(outputDirEventQA)
eventQATHnSparse = qaList.FindObject("eventQA")
# (Centrality, N tracks, pT leading track, N clusters, leading cluster E)
if ispp:
# N tracks
hEventNtracks = eventQATHnSparse.Projection(0)
outputFilename = os.path.join(outputDirEventQA, "hEventNtracks" + fileFormat)
plotHist(hEventNtracks, outputFilename, "hist E")
# N clusters
hEventNclusters = eventQATHnSparse.Projection(2)
outputFilename = os.path.join(outputDirEventQA, "hEventNclusters" + fileFormat)
plotHist(hEventNclusters, outputFilename, "hist E")
else:
# N tracks vs. Centrality
hEventNtracksCentrality = eventQATHnSparse.Projection(1,0)
outputFilename = os.path.join(outputDirEventQA, "hEventNtracksCentrality" + fileFormat)
plotHist(hEventNtracksCentrality, outputFilename, "colz", False, True)
# N clusters vs. Centrality
hEventNclustersCentrality = eventQATHnSparse.Projection(3,0)
outputFilename = os.path.join(outputDirEventQA, "hEventNclustersCentrality" + fileFormat)
plotHist(hEventNclustersCentrality, outputFilename, "colz", False, True)
if ispp:
# Plot leading cluster energy
hEventEmcalLeadClusE = eventQATHnSparse.Projection(3)
outputFilename = os.path.join(outputDirEventQA, "hEventLeadClusE" + fileFormat)
plotHist(hEventEmcalLeadClusE, outputFilename, "hist E", True)
else:
# Plot leading cluster energy vs. Centrality
hEventLeadClusECentrality = eventQATHnSparse.Projection(4,0)
outputFilename = os.path.join(outputDirEventQA, "hEventLeadClusECentrality" + fileFormat)
plotHist(hEventLeadClusECentrality, outputFilename, "colz", False, True)
# Event rejection reasons
EventCutList = qaList.FindObject("EventCutOutput")
hEventReject = EventCutList.FindObject("fCutStats")
hEventReject.GetYaxis().SetTitle("N events accepted")
outputFilename = os.path.join(outputDirEventQA, "hEventReject" + fileFormat)
textNEvents = ROOT.TLatex()
textNEvents.SetNDC()
textNEvents.DrawLatex(0.65,0.87,"#it{N}_{events} = %d" % nEvents)
plotHist(hEventReject, outputFilename, "hist", False)
########################################################################################################
# Plot Pt-hard histograms ##############################################################################
########################################################################################################
def plotPtHard(f, qaList, nEvents, qaListRef, nEventsRef, outputDir, fileFormat):
# Note: errors have not been propagated correctly for Pt-hard histos, so we do not plot them.
# Create subdirectory for PtHard
outputDirPtHard = outputDir + "PtHard/"
if not os.path.exists(outputDirPtHard):
os.makedirs(outputDirPtHard)
ROOT.gStyle.SetOptTitle(1)
hNEvents = f.Get("hNEventsAcc")
outputFilename = os.path.join(outputDirPtHard, "hPtHardNEvents" + fileFormat)
plotHist(hNEvents, outputFilename, "hist")
hXSecPerEvent = f.Get("hXSecPerEvent")
if hXSecPerEvent:
outputFilename = os.path.join(outputDirPtHard, "hPtHardXSecPerEvent" + fileFormat)
plotHist(hXSecPerEvent, outputFilename, "hist", True)
hNTrialsPerEvent = f.Get("hNTrialsPerEvent")
if hNTrialsPerEvent:
outputFilename = os.path.join(outputDirPtHard, "hPtHardNTrialsPerEvent" + fileFormat)
plotHist(hNTrialsPerEvent, outputFilename, "hist")
hScaleFactor = f.Get("hScaleFactor")
if hScaleFactor:
outputFilename = os.path.join(outputDirPtHard, "hPtHardScaleFactor" + fileFormat)
plotHist(hScaleFactor, outputFilename, "hist", True)
hPtHard = qaList.FindObject("hPtHard")
outputFilename = os.path.join(outputDirPtHard, "hPtHard" + fileFormat)
plotHist(hPtHard, outputFilename, "hist", True)
#if a reference is provided
if qaListRef:
hPtHardRef = qaListRef.FindObject("hPtHard")
outputFilename = os.path.join(outputDirPtHard, "hPtHard_Ratio" + fileFormat)
xRangeMax = 100
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "pT Hard production"
legendRunLabel = "hPtHard this run"
legendRefLabel = "hPtHard all runs"
ratioYAxisTitle = "Ratio: run / all runs"
hPtHardRef.SetLineColor(1)
ispp=1
if nEventsRef!=0:
plotSpectra(hPtHard, hPtHardRef,0x0, 0x0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "1", "2", "3")
########################################################################################################
# Plot basic histogram ##############################################################################
########################################################################################################
def plotHist(h, outputFilename, drawOptions = "", setLogy = False, setLogz = False):
c = ROOT.TCanvas("c","c: hist",600,450)
c.cd()
if setLogy:
c.SetLogy()
if setLogz:
c.SetLogz()
h.Draw(drawOptions)
c.SaveAs(outputFilename)
c.Close()
########################################################################################################
# Plot spectra (and ratio, if reference file suppled) ################################################
########################################################################################################
def plotSpectra(h, hRef, h2, h3, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, scalingOptions = "", h2LegendLabel = "", h3LegendLabel = ""):
h.SetLineColor(1)
h.SetLineWidth(2)
h.SetLineStyle(1)
h.Scale(1./nEvents, scalingOptions)
h.GetYaxis().SetTitle(yAxisTitle)
h.GetYaxis().SetTitleSize(0.06)
h.GetXaxis().SetRangeUser(0,xRangeMax)
h.GetYaxis().SetRangeUser(2e-9,2e3)
if ispp:
h.GetYaxis().SetRangeUser(2e-11,20)
h.GetYaxis().SetLabelFont(43)
h.GetYaxis().SetLabelSize(20)
if h2:
h2.SetLineColor(2)
h2.SetLineWidth(2)
h2.SetLineStyle(1)
h2.Scale(1./nEvents, scalingOptions)
h2.GetYaxis().SetTitle(yAxisTitle)
h2.GetYaxis().SetTitleSize(0.06)
h2.GetXaxis().SetRangeUser(0,xRangeMax)
h2.GetYaxis().SetRangeUser(2e-9,2e3)
if ispp:
h2.GetYaxis().SetRangeUser(2e-11,20)
h2.GetYaxis().SetLabelFont(43)
h2.GetYaxis().SetLabelSize(20)
h2.GetXaxis().SetTitleOffset(1.4)
if h3:
h3.SetLineStyle(1)
h3.SetLineColor(4)
h3.SetLineWidth(2)
h3.Scale(1./nEvents, scalingOptions)
if not hRef:
c = ROOT.TCanvas("c","c: pT",600,450)
c.cd()
ROOT.gPad.SetLeftMargin(0.16)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.14)
ROOT.gPad.SetTopMargin(0.05)
ROOT.gPad.SetLogy()
if h2 and h3:
h2.Draw("hist E")
h3.Draw("hist E same")
h.Draw("hist E same")
elif h2:
h2.Draw("hist E")
h.Draw("hist E same")
else:
h.Draw("hist E")
if hRef:
c = ROOT.TCanvas("c","c: pT",800,850)
c.cd()
pad1 = ROOT.TPad("pad1", "pad1", 0, 0.3, 1, 1.0)
pad1.SetBottomMargin(0)
pad1.SetLeftMargin(0.15)
pad1.SetRightMargin(0.05)
pad1.SetTopMargin(0.05)
pad1.SetLogy()
pad1.Draw()
pad1.cd()
if h2 and h3:
h2.Draw("hist")
h3.Draw("hist same")
h.Draw("hist same")
elif h2:
h2.Draw("hist E")
h.Draw("hist E same")
else:
h.Draw("hist E")
hRef.SetLineColor(8)
if h2 and not h3: # hack to keep color scheme consistent in cluster spectra ratio
hRef.SetLineColor(4)
hRef.SetMarkerColor(1)
hRef.SetLineStyle(1)
hRef.Scale(1./nEventsRef, scalingOptions)
hRef.Draw("hist E same")
c.cd()
pad2 = ROOT.TPad("pad2", "pad2", 0, 0.05, 1, 0.3)
pad2.SetTopMargin(0)
pad2.SetBottomMargin(0.35)
pad2.SetLeftMargin(0.15)
pad2.SetRightMargin(0.05)
pad2.Draw()
pad2.cd()
hRatio = h.Clone()
hRatio.Divide(hRef)
hRatio.SetMarkerStyle(20)
hRatio.SetMarkerSize(0.5)
hRatio.SetMarkerColor(1)
if h2:
hRatio2 = h2.Clone()
hRatio2.Divide(hRef)
hRatio2.SetMarkerStyle(21)
hRatio2.SetMarkerColor(2)
hRatio2.GetYaxis().SetTitle(ratioYAxisTitle)
hRatio2.GetYaxis().SetTitleSize(20)
hRatio2.GetYaxis().SetTitleFont(43)
hRatio2.GetYaxis().SetTitleOffset(2.2)
hRatio2.GetYaxis().SetLabelFont(43)
hRatio2.GetYaxis().SetLabelSize(20)
hRatio2.GetYaxis().SetNdivisions(505)
hRatio2.GetYaxis().SetRangeUser(0,2.2)
if ratioYAxisTitle in "Ratio to all":
hRatio2.GetYaxis().SetRangeUser(0,1.2)
hRatio2.GetXaxis().SetRangeUser(0,xRangeMax)
hRatio2.GetXaxis().SetTitleSize(30)
hRatio2.GetXaxis().SetTitleFont(43)
hRatio2.GetXaxis().SetTitleOffset(4.)
hRatio2.GetXaxis().SetLabelFont(43)
hRatio2.GetXaxis().SetLabelSize(20)
if h3:
hRatio3 = h3.Clone()
hRatio3.Divide(hRef)
hRatio3.SetMarkerStyle(21)
hRatio3.SetMarkerColor(4)
if h2 and h3:
hRatio2.Draw("P E")
hRatio3.Draw("P E same")
hRatio.Draw("P E same")
elif h2:
hRatio2.GetYaxis().SetRangeUser(0,25)
hRatio2.Draw("P E")
hRatio.Draw("P E same")
if not h2 and not h3:
hRatio.GetYaxis().SetTitle(ratioYAxisTitle)
hRatio.GetYaxis().SetTitleSize(20)
hRatio.GetYaxis().SetTitleFont(43)
hRatio.GetYaxis().SetTitleOffset(2.2)
hRatio.GetYaxis().SetLabelFont(43)
hRatio.GetYaxis().SetLabelSize(20)
hRatio.GetYaxis().SetNdivisions(505)
hRatio.GetYaxis().SetRangeUser(0,2.2)
hRatio.GetXaxis().SetRangeUser(0,xRangeMax)
hRatio.GetXaxis().SetTitleSize(30)
hRatio.GetXaxis().SetTitleFont(43)
hRatio.GetXaxis().SetTitleOffset(4.)
hRatio.GetXaxis().SetLabelFont(43)
hRatio.GetXaxis().SetLabelSize(20)
hRatio.Draw("P E")
pad1.cd()
if nEvents > 1:
textNEvents = ROOT.TLatex()
textNEvents.SetNDC()
textNEvents.DrawLatex(0.55,0.6,"#it{N}_{events} = %d" % nEvents)
leg2 = ROOT.TLegend(0.3,0.7,0.88,0.93,legendTitle)
leg2.SetFillColor(10)
leg2.SetBorderSize(0)
leg2.SetFillStyle(0)
leg2.SetTextSize(0.04)
leg2.AddEntry(h, legendRunLabel, "l")
if h2:
leg2.AddEntry(h2, h2LegendLabel, "l")
if h3:
leg2.AddEntry(h3, h3LegendLabel, "l")
if hRef:
leg2.AddEntry(hRef, legendRefLabel, "l")
leg2.Draw("same")
c.SaveAs(outputFilename)
c.Close()
########################################################################################################
# Plot spectra and ratio (h2,h3 will be divided by h) ################################################
########################################################################################################
def plotSpectraCent(h, h2, h3, nEvents, ispp, outputFilename, xRangeMax, yAxisTitle, ratioYAxisTitle, legendTitle, h1legendLabel, h2legendLabel, h3legendLabel = "", scalingOptions = "", yRatioMax = 32):
h.SetLineColor(4)
if not h3:
h.SetLineColor(2)
h.SetLineWidth(2)
h.SetLineStyle(1)
h.Scale(1./nEvents, scalingOptions)
h.GetYaxis().SetTitle(yAxisTitle)
h.GetYaxis().SetTitleSize(0.06)
h.GetXaxis().SetRangeUser(0,xRangeMax)
h.GetYaxis().SetRangeUser(2e-9,2e3)
if ispp:
h.GetYaxis().SetRangeUser(2e-11,20)
h.GetYaxis().SetLabelFont(43)
h.GetYaxis().SetLabelSize(20)
h2.SetLineColor(1)
h2.SetLineWidth(2)
h2.SetLineStyle(1)
h2.Scale(1./nEvents, scalingOptions)
h2.GetYaxis().SetTitle(yAxisTitle)
h2.GetYaxis().SetTitleSize(0.06)
h2.GetXaxis().SetRangeUser(0,xRangeMax)
h2.GetYaxis().SetRangeUser(2e-9,2e3)
if ispp:
h2.GetYaxis().SetRangeUser(2e-11,20)
h2.GetYaxis().SetLabelFont(43)
h2.GetYaxis().SetLabelSize(20)
if h3:
h3.SetLineStyle(1)
h3.SetLineColor(2)
h3.SetLineWidth(2)
h3.Scale(1./nEvents, scalingOptions)
c = ROOT.TCanvas("c","c: pT",800,850)
c.cd()
pad1 = ROOT.TPad("pad1", "pad1", 0, 0.3, 1, 1.0)
pad1.SetBottomMargin(0)
pad1.SetLeftMargin(0.15)
pad1.SetRightMargin(0.05)
pad1.SetTopMargin(0.05)
pad1.SetLogy()
pad1.Draw()
pad1.cd()
if h3:
h2.Draw("hist E")
h3.Draw("hist E same")
h.Draw("hist E same")
else:
h2.Draw("hist E")
h.Draw("hist E same")
c.cd()
pad2 = ROOT.TPad("pad2", "pad2", 0, 0.05, 1, 0.3)
pad2.SetTopMargin(0)
pad2.SetBottomMargin(0.35)
pad2.SetLeftMargin(0.15)
pad2.SetRightMargin(0.05)
pad2.Draw()
pad2.cd()
hRatio = h2.Clone()
hRatio.Divide(h)
hRatio.SetMarkerStyle(21)
hRatio.SetMarkerColor(1)
if h3:
hRatio2 = h3.Clone()
hRatio2.Divide(h)
hRatio2.SetMarkerStyle(21)
hRatio2.SetMarkerColor(2)
hRatio2.GetYaxis().SetTitle(ratioYAxisTitle)
hRatio2.GetYaxis().SetTitleSize(20)
hRatio2.GetYaxis().SetTitleFont(43)
hRatio2.GetYaxis().SetTitleOffset(2.2)
hRatio2.GetYaxis().SetLabelFont(43)
hRatio2.GetYaxis().SetLabelSize(20)
hRatio2.GetYaxis().SetNdivisions(505)
hRatio2.GetYaxis().SetRangeUser(0,yRatioMax)
hRatio2.GetXaxis().SetRangeUser(0,xRangeMax)
hRatio2.GetXaxis().SetTitleSize(30)
hRatio2.GetXaxis().SetTitleFont(43)
hRatio2.GetXaxis().SetTitleOffset(4.)
hRatio2.GetXaxis().SetLabelFont(43)
hRatio2.GetXaxis().SetLabelSize(20)
hRatio2.Draw("P E")
hRatio.Draw("P E same")
else:
hRatio.GetYaxis().SetTitle(ratioYAxisTitle)
hRatio.GetYaxis().SetTitleSize(20)
hRatio.GetYaxis().SetTitleFont(43)
hRatio.GetYaxis().SetTitleOffset(2.2)
hRatio.GetYaxis().SetLabelFont(43)
hRatio.GetYaxis().SetLabelSize(20)
hRatio.GetYaxis().SetNdivisions(505)
hRatio.GetYaxis().SetRangeUser(0,yRatioMax)
hRatio.GetXaxis().SetRangeUser(0,xRangeMax)
hRatio.GetXaxis().SetTitleSize(30)
hRatio.GetXaxis().SetTitleFont(43)
hRatio.GetXaxis().SetTitleOffset(4.)
hRatio.GetXaxis().SetLabelFont(43)
hRatio.GetXaxis().SetLabelSize(20)
hRatio.Draw("P E")
pad1.cd()
leg = ROOT.TLegend(0.3,0.7,0.88,0.93,legendTitle)
leg.SetFillColor(10)
leg.SetBorderSize(0)
leg.SetFillStyle(0)
leg.SetTextSize(0.04)
leg.AddEntry(h2, h2legendLabel, "l")
if h3:
leg.AddEntry(h3, h3legendLabel, "l")
leg.AddEntry(h, h1legendLabel, "l")
leg.Draw("same")
c.SaveAs(outputFilename)
c.Close()
########################################################################################################
# Plot spectra (and ratio, if reference file suppled) ################################################
########################################################################################################
def plotNEFSpectra(h, h2, h3, nEvents, ispp, xRangeMax, yAxisTitle, h1legendLabel, outputFilename, scalingOptions = "", h2legendLabel = "", h3legendLabel = ""):
h.SetLineColor(1)
h.SetLineWidth(2)
h.SetLineStyle(1)
h.Scale(1./nEvents, scalingOptions)
if ispp:
h.GetYaxis().SetRangeUser(0.0000005, 0.05)
h.GetYaxis().SetTitle(yAxisTitle)
h.GetYaxis().SetTitleSize(0.06)
h.GetXaxis().SetRangeUser(0,xRangeMax)
h.GetYaxis().SetLabelFont(43)
h.GetYaxis().SetLabelSize(20)
if h2:
h2.SetLineColor(2)
h2.SetLineWidth(2)
h2.SetLineStyle(1)
h2.Scale(1./nEvents, scalingOptions)
h2.GetYaxis().SetTitle(yAxisTitle)
h2.GetYaxis().SetTitleSize(0.06)
h2.GetXaxis().SetRangeUser(0,xRangeMax)
#h2.GetYaxis().SetRangeUser(2e-9,2e3)
if ispp:
h2.GetYaxis().SetRangeUser(5e-7,0.05)
h2.GetYaxis().SetLabelFont(43)
h2.GetYaxis().SetLabelSize(20)
h2.GetXaxis().SetTitleOffset(1.4)
c = ROOT.TCanvas("c","c: hist",600,450)
c.cd().SetLogy()
ROOT.gPad.SetLeftMargin(0.16)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.14)
ROOT.gPad.SetTopMargin(0.05)
if h3:
h2.Draw("hist E")
h3.Draw("hist E same")
h.Draw("hist E same")
else:
h2.Draw("hist E")
h.Draw("hist E same")
leg = ROOT.TLegend(0.3,0.7,0.88,0.93)
leg.SetFillColor(10)
leg.SetBorderSize(0)
leg.SetFillStyle(0)
leg.SetTextSize(0.04)
leg.AddEntry(h, h1legendLabel, "l")
if h3:
leg.AddEntry(h3, h3legendLabel, "l")
leg.AddEntry(h2, h2legendLabel, "l")
leg.Draw("same")
c.SaveAs(outputFilename)
c.Close()
#########################################################################################
# Function to iterate recursively through an object to set Sumw2 on all TH1/TH2/THnSparse
#########################################################################################
def SetSumw2(obj):
if obj.InheritsFrom(ROOT.TProfile.Class()):
pass
#print("Sumw2 not called for TProfile %s" % obj.GetName())
elif obj.InheritsFrom(ROOT.TH2.Class()):
obj.Sumw2()
#print("Sumw2 called on TH2 %s" % obj.GetName())
elif obj.InheritsFrom(ROOT.TH1.Class()):
obj.Sumw2()
#print("Sumw2 called on TH1 %s" % obj.GetName())
elif obj.InheritsFrom(ROOT.THnSparse.Class()):
obj.Sumw2()
#print("Sumw2 called on THnSparse %s" % obj.GetName())
else:
#print("Not a histogram!")
#print obj.GetName()
for subobj in obj:
SetSumw2(subobj)
#---------------------------------------------------------------------------------------------------
if __name__ == '__main__':
# Define arguments
parser = argparse.ArgumentParser(description="Compare histograms to test the new EMCal corrections framework")
parser.add_argument("-f", "--inputFile", action="store",
type=str, metavar="inputFile",
default="AnalysisResults.root",
help="Path of AnalysisResults.root file")
parser.add_argument("-o", "--outputDir", action="store",
type=str, metavar="outputDir",
default="./outputQA/",
help="Output directory for QA plots to be written to")
parser.add_argument("-r", "--referenceFile", action="store",
type=str, metavar="referenceFile",
default="",
help="Reference root file for the inputFile histos to be compared to (when doing run-by-run QA)")
parser.add_argument("-i", "--imageFormat", action="store",
type=str, metavar="imageFormat",
default=".pdf",
help="Image format to save plots in, e.g. \".pdf\" or \".png\"")
# Parse the arguments
args = parser.parse_args()
print("Configuring...")
print("inputFile: \"{0}\"".format(args.inputFile))
print("ouputDir: \"{0}\"".format(args.outputDir))
print("referenceFile: \"{0}\"".format(args.referenceFile))
print("imageFormat: \"{0}\"".format(args.imageFormat))
# If invalid inputFile is given, exit
if not os.path.exists(args.inputFile):
print("File \"{0}\" does not exist! Exiting!".format(args.inputFile))
sys.exit(0)
plotPWGJEQA(inputFile = args.inputFile, outputDir = args.outputDir, referenceFile = args.referenceFile, fileFormat = args.imageFormat)
|
trungnt13/scikit-learn
|
refs/heads/master
|
examples/linear_model/plot_ard.py
|
248
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
marctc/django-blog-zinnia
|
refs/heads/develop
|
zinnia/views/mixins/callable_queryset.py
|
16
|
"""Callable Queryset mixins for Zinnia views"""
from django.core.exceptions import ImproperlyConfigured
class CallableQuerysetMixin(object):
"""
Mixin for handling a callable queryset,
which will force the update of the queryset.
Related to issue http://code.djangoproject.com/ticket/8378
"""
queryset = None
def get_queryset(self):
"""
Check that the queryset is defined and call it.
"""
if self.queryset is None:
raise ImproperlyConfigured(
"'%s' must define 'queryset'" % self.__class__.__name__)
return self.queryset()
|
pypot/scikit-learn
|
refs/heads/master
|
sklearn/metrics/scorer.py
|
211
|
"""
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
|
MotiurRahman/AppTest1
|
refs/heads/master
|
modules/android/com.soasta.touchtest/1.0/hooks/add.py
|
365
|
#!/usr/bin/env python
#
# This is the module project add hook that will be
# called when your module is added to a project
#
import os, sys
def dequote(s):
if s[0:1] == '"':
return s[1:-1]
return s
def main(args,argc):
# You will get the following command line arguments
# in the following order:
#
# project_dir = the full path to the project root directory
# project_type = the type of project (desktop, mobile, ipad)
# project_name = the name of the project
#
project_dir = dequote(os.path.expanduser(args[1]))
project_type = dequote(args[2])
project_name = dequote(args[3])
# TODO: write your add hook here (optional)
# exit
sys.exit(0)
if __name__ == '__main__':
main(sys.argv,len(sys.argv))
|
skipzone/Illumicone
|
refs/heads/master
|
simulator/openpixelcontrol/python/miami.py
|
4
|
#!/usr/bin/env python
"""A demo client for Open Pixel Control
http://github.com/zestyping/openpixelcontrol
Creates moving blobby colors with sparkles on top.
To run:
First start the gl simulator using, for example, the included "wall" layout
make
bin/gl_server layouts/wall.json
Then run this script in another shell to send colors to the simulator
python_clients/miami.py --layout layouts/wall.json
"""
from __future__ import division
import time
import sys
import optparse
import random
try:
import json
except ImportError:
import simplejson as json
import opc
import color_utils
#-------------------------------------------------------------------------------
# command line
parser = optparse.OptionParser()
parser.add_option('-l', '--layout', dest='layout',
action='store', type='string',
help='layout file')
parser.add_option('-s', '--server', dest='server', default='127.0.0.1:7890',
action='store', type='string',
help='ip and port of server')
parser.add_option('-f', '--fps', dest='fps', default=20,
action='store', type='int',
help='frames per second')
options, args = parser.parse_args()
if not options.layout:
parser.print_help()
print
print 'ERROR: you must specify a layout file using --layout'
print
sys.exit(1)
#-------------------------------------------------------------------------------
# parse layout file
print
print ' parsing layout file'
print
coordinates = []
for item in json.load(open(options.layout)):
if 'point' in item:
coordinates.append(tuple(item['point']))
#-------------------------------------------------------------------------------
# connect to server
client = opc.Client(options.server)
if client.can_connect():
print ' connected to %s' % options.server
else:
# can't connect, but keep running in case the server appears later
print ' WARNING: could not connect to %s' % options.server
print
#-------------------------------------------------------------------------------
# color function
def pixel_color(t, coord, ii, n_pixels, random_values):
"""Compute the color of a given pixel.
t: time in seconds since the program started.
ii: which pixel this is, starting at 0
coord: the (x, y, z) position of the pixel as a tuple
n_pixels: the total number of pixels
random_values: a list containing a constant random value for each pixel
Returns an (r, g, b) tuple in the range 0-255
"""
# make moving stripes for x, y, and z
x, y, z = coord
y += color_utils.cos(x + 0.2*z, offset=0, period=1, minn=0, maxx=0.6)
z += color_utils.cos(x, offset=0, period=1, minn=0, maxx=0.3)
x += color_utils.cos(y + z, offset=0, period=1.5, minn=0, maxx=0.2)
# rotate
x, y, z = y, z, x
# # shift some of the pixels to a new xyz location
# if ii % 17 == 0:
# x += ((ii*123)%5) / n_pixels * 32.12 + 0.1
# y += ((ii*137)%5) / n_pixels * 22.23 + 0.1
# z += ((ii*147)%7) / n_pixels * 44.34 + 0.1
# make x, y, z -> r, g, b sine waves
r = color_utils.cos(x, offset=t / 4, period=2.5, minn=0, maxx=1)
g = color_utils.cos(y, offset=t / 4, period=2.5, minn=0, maxx=1)
b = color_utils.cos(z, offset=t / 4, period=2.5, minn=0, maxx=1)
r, g, b = color_utils.contrast((r, g, b), 0.5, 1.4)
clampdown = (r + g + b)/2
clampdown = color_utils.remap(clampdown, 0.4, 0.5, 0, 1)
clampdown = color_utils.clamp(clampdown, 0, 1)
clampdown *= 0.9
r *= clampdown
g *= clampdown
b *= clampdown
# # shift the color of a few outliers
# if random_values[ii] < 0.03:
# r, g, b = b, g, r
# black out regions
r2 = color_utils.cos(x, offset=t / 10 + 12.345, period=4, minn=0, maxx=1)
g2 = color_utils.cos(y, offset=t / 10 + 24.536, period=4, minn=0, maxx=1)
b2 = color_utils.cos(z, offset=t / 10 + 34.675, period=4, minn=0, maxx=1)
clampdown = (r2 + g2 + b2)/2
clampdown = color_utils.remap(clampdown, 0.2, 0.3, 0, 1)
clampdown = color_utils.clamp(clampdown, 0, 1)
r *= clampdown
g *= clampdown
b *= clampdown
# color scheme: fade towards blue-and-orange
# g = (r+b) / 2
g = g * 0.6 + ((r+b) / 2) * 0.4
# # stretched vertical smears
# v = color_utils.cos(ii / n_pixels, offset=t*0.1, period = 0.07, minn=0, maxx=1) ** 5 * 0.3
# r += v
# g += v
# b += v
# fade behind twinkle
fade = color_utils.cos(t - ii/n_pixels, offset=0, period=7, minn=0, maxx=1) ** 20
fade = 1 - fade*0.2
r *= fade
g *= fade
b *= fade
# twinkle occasional LEDs
twinkle_speed = 0.07
twinkle_density = 0.1
twinkle = (random_values[ii]*7 + time.time()*twinkle_speed) % 1
twinkle = abs(twinkle*2 - 1)
twinkle = color_utils.remap(twinkle, 0, 1, -1/twinkle_density, 1.1)
twinkle = color_utils.clamp(twinkle, -0.5, 1.1)
twinkle **= 5
twinkle *= color_utils.cos(t - ii/n_pixels, offset=0, period=7, minn=0, maxx=1) ** 20
twinkle = color_utils.clamp(twinkle, -0.3, 1)
r += twinkle
g += twinkle
b += twinkle
# apply gamma curve
# only do this on live leds, not in the simulator
#r, g, b = color_utils.gamma((r, g, b), 2.2)
return (r*256, g*256, b*256)
#-------------------------------------------------------------------------------
# send pixels
print ' sending pixels forever (control-c to exit)...'
print
n_pixels = len(coordinates)
random_values = [random.random() for ii in range(n_pixels)]
start_time = time.time()
while True:
t = time.time() - start_time
pixels = [pixel_color(t*0.6, coord, ii, n_pixels, random_values) for ii, coord in enumerate(coordinates)]
client.put_pixels(pixels, channel=0)
time.sleep(1 / options.fps)
|
dushu1203/chromium.src
|
refs/heads/nw12
|
third_party/jinja2/nodes.py
|
623
|
# -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import next, izip, with_metaclass, text_type, \
method_type, function_type
#: the types we support for context functions
_context_function_types = (function_type, method_type)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Substract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
|
mrkn/iTerm2
|
refs/heads/master
|
tests/esctest/esctypes.py
|
31
|
class ChecksumException(Exception):
def __init__(self, points, actual, expected):
message = "Checksum failed at the following locations:\n%s\nActual:\n%s\n\nExpected:\n%s" % (
"\n".join(map(str, points)),
"\n".join(actual),
"\n".join(expected))
super(ChecksumException, self).__init__(message)
class BadResponse(Exception):
def __init__(self, actual, expected):
message = "Bad response from server. Expected '%s' but got '%s'" % (expected, actual)
super(BadResponse, self).__init__(message)
class TestFailure(Exception):
def __init__(self, actual, expected, details=None):
message = "Test failed: expected '%s' but got '%s'" % (str(expected), str(actual))
if details is not None:
message += ". " + details
super(TestFailure, self).__init__(message)
class InternalError(Exception):
def __init__(self, message):
super(InternalError, self).__init__(message)
class KnownBug(Exception):
def __init__(self, reason):
super(KnownBug, self).__init__(reason)
class BrokenTest(Exception):
def __init__(self, reason):
super(BrokenTest, self).__init__(reason)
class InsufficientVTLevel(Exception):
def __init(self, actualLevel, minimumLevel):
reason = "Terminal implements VT level %d but %d is needed." % (
actualLevel, minimumLevel)
super(InsufficientVTLevel, self).__init__(reason)
class Point(object):
def __init__(self, x, y):
self._x = x
self._y = y
def __str__(self):
return "Point(x=%d, y=%d)" % (self._x, self._y)
def x(self):
return self._x
def y(self):
return self._y
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class Size(object):
def __init__(self, width, height):
self._width = width
self._height = height
def __str__(self):
return "Size(width=%d, height=%d)" % (self._width, self._height)
def width(self):
return self._width
def height(self):
return self._height
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class Rect(object):
def __init__(self, left, top, right, bottom):
self._left = left
self._top = top
self._right = right
self._bottom = bottom
def __str__(self):
return "Rect(left=%d, top=%d, right=%d, bottom=%d)" % (
self._left, self._top, self._right, self._bottom)
def left(self):
return self._left
def top(self):
return self._top
def right(self):
return self._right
def bottom(self):
return self._bottom
def width(self):
return self._right - self._left + 1
def height(self):
return self._bottom - self._top + 1
def params(self):
return [ self._top, self._left, self._bottom, self._right ]
def points(self):
y = self._top
while y <= self._bottom:
x = self._left
while x <= self._right:
yield Point(x, y)
x += 1
y += 1
|
teichopsia-/take_brake
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py
|
1002
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
from . import tarfile
try:
import bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive", "ignore_patterns"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registery operation with the archiving
and unpacking registeries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError as why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file"),
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, collections.Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, collections.Callable):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
|
be-cloud-be/horizon-addons
|
refs/heads/9.0
|
server-tools/email_template_template/model/__init__.py
|
67
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import email_template
|
proxysh/Safejumper-for-Desktop
|
refs/heads/master
|
buildlinux/env64/lib/python2.7/site-packages/twisted/test/test_usage.py
|
13
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.usage}, a command line option parsing library.
"""
from __future__ import division, absolute_import
from twisted.trial import unittest
from twisted.python import usage
class WellBehaved(usage.Options):
optParameters = [['long', 'w', 'default', 'and a docstring'],
['another', 'n', 'no docstring'],
['longonly', None, 'noshort'],
['shortless', None, 'except',
'this one got docstring'],
]
optFlags = [['aflag', 'f',
"""
flagallicious docstringness for this here
"""],
['flout', 'o'],
]
def opt_myflag(self):
self.opts['myflag'] = "PONY!"
def opt_myparam(self, value):
self.opts['myparam'] = "%s WITH A PONY!" % (value,)
class ParseCorrectnessTests(unittest.TestCase):
"""
Test L{usage.Options.parseOptions} for correct values under
good conditions.
"""
def setUp(self):
"""
Instantiate and parseOptions a well-behaved Options class.
"""
self.niceArgV = ("--long Alpha -n Beta "
"--shortless Gamma -f --myflag "
"--myparam Tofu").split()
self.nice = WellBehaved()
self.nice.parseOptions(self.niceArgV)
def test_checkParameters(self):
"""
Parameters have correct values.
"""
self.assertEqual(self.nice.opts['long'], "Alpha")
self.assertEqual(self.nice.opts['another'], "Beta")
self.assertEqual(self.nice.opts['longonly'], "noshort")
self.assertEqual(self.nice.opts['shortless'], "Gamma")
def test_checkFlags(self):
"""
Flags have correct values.
"""
self.assertEqual(self.nice.opts['aflag'], 1)
self.assertEqual(self.nice.opts['flout'], 0)
def test_checkCustoms(self):
"""
Custom flags and parameters have correct values.
"""
self.assertEqual(self.nice.opts['myflag'], "PONY!")
self.assertEqual(self.nice.opts['myparam'], "Tofu WITH A PONY!")
class TypedOptions(usage.Options):
optParameters = [
['fooint', None, 392, 'Foo int', int],
['foofloat', None, 4.23, 'Foo float', float],
['eggint', None, None, 'Egg int without default', int],
['eggfloat', None, None, 'Egg float without default', float],
]
def opt_under_score(self, value):
"""
This option has an underscore in its name to exercise the _ to -
translation.
"""
self.underscoreValue = value
opt_u = opt_under_score
class TypedTests(unittest.TestCase):
"""
Test L{usage.Options.parseOptions} for options with forced types.
"""
def setUp(self):
self.usage = TypedOptions()
def test_defaultValues(self):
"""
Default values are parsed.
"""
argV = []
self.usage.parseOptions(argV)
self.assertEqual(self.usage.opts['fooint'], 392)
self.assertIsInstance(self.usage.opts['fooint'], int)
self.assertEqual(self.usage.opts['foofloat'], 4.23)
self.assertIsInstance(self.usage.opts['foofloat'], float)
self.assertIsNone(self.usage.opts['eggint'])
self.assertIsNone(self.usage.opts['eggfloat'])
def test_parsingValues(self):
"""
int and float values are parsed.
"""
argV = ("--fooint 912 --foofloat -823.1 "
"--eggint 32 --eggfloat 21").split()
self.usage.parseOptions(argV)
self.assertEqual(self.usage.opts['fooint'], 912)
self.assertIsInstance(self.usage.opts['fooint'], int)
self.assertEqual(self.usage.opts['foofloat'], -823.1)
self.assertIsInstance(self.usage.opts['foofloat'], float)
self.assertEqual(self.usage.opts['eggint'], 32)
self.assertIsInstance(self.usage.opts['eggint'], int)
self.assertEqual(self.usage.opts['eggfloat'], 21.)
self.assertIsInstance(self.usage.opts['eggfloat'], float)
def test_underscoreOption(self):
"""
A dash in an option name is translated to an underscore before being
dispatched to a handler.
"""
self.usage.parseOptions(['--under-score', 'foo'])
self.assertEqual(self.usage.underscoreValue, 'foo')
def test_underscoreOptionAlias(self):
"""
An option name with a dash in it can have an alias.
"""
self.usage.parseOptions(['-u', 'bar'])
self.assertEqual(self.usage.underscoreValue, 'bar')
def test_invalidValues(self):
"""
Passing wrong values raises an error.
"""
argV = "--fooint egg".split()
self.assertRaises(usage.UsageError, self.usage.parseOptions, argV)
class WrongTypedOptions(usage.Options):
optParameters = [
['barwrong', None, None, 'Bar with wrong coerce', 'he']
]
class WeirdCallableOptions(usage.Options):
def _bar(value):
raise RuntimeError("Ouch")
def _foo(value):
raise ValueError("Yay")
optParameters = [
['barwrong', None, None, 'Bar with strange callable', _bar],
['foowrong', None, None, 'Foo with strange callable', _foo]
]
class WrongTypedTests(unittest.TestCase):
"""
Test L{usage.Options.parseOptions} for wrong coerce options.
"""
def test_nonCallable(self):
"""
Using a non-callable type fails.
"""
us = WrongTypedOptions()
argV = "--barwrong egg".split()
self.assertRaises(TypeError, us.parseOptions, argV)
def test_notCalledInDefault(self):
"""
The coerce functions are not called if no values are provided.
"""
us = WeirdCallableOptions()
argV = []
us.parseOptions(argV)
def test_weirdCallable(self):
"""
Errors raised by coerce functions are handled properly.
"""
us = WeirdCallableOptions()
argV = "--foowrong blah".split()
# ValueError is swallowed as UsageError
e = self.assertRaises(usage.UsageError, us.parseOptions, argV)
self.assertEqual(str(e), "Parameter type enforcement failed: Yay")
us = WeirdCallableOptions()
argV = "--barwrong blah".split()
# RuntimeError is not swallowed
self.assertRaises(RuntimeError, us.parseOptions, argV)
class OutputTests(unittest.TestCase):
def test_uppercasing(self):
"""
Error output case adjustment does not mangle options
"""
opt = WellBehaved()
e = self.assertRaises(usage.UsageError,
opt.parseOptions, ['-Z'])
self.assertEqual(str(e), 'option -Z not recognized')
class InquisitionOptions(usage.Options):
optFlags = [
('expect', 'e'),
]
optParameters = [
('torture-device', 't',
'comfy-chair',
'set preferred torture device'),
]
class HolyQuestOptions(usage.Options):
optFlags = [('horseback', 'h',
'use a horse'),
('for-grail', 'g'),
]
class SubCommandOptions(usage.Options):
optFlags = [('europian-swallow', None,
'set default swallow type to Europian'),
]
subCommands = [
('inquisition', 'inquest', InquisitionOptions,
'Perform an inquisition'),
('holyquest', 'quest', HolyQuestOptions,
'Embark upon a holy quest'),
]
class SubCommandTests(unittest.TestCase):
"""
Test L{usage.Options.parseOptions} for options with subcommands.
"""
def test_simpleSubcommand(self):
"""
A subcommand is recognized.
"""
o = SubCommandOptions()
o.parseOptions(['--europian-swallow', 'inquisition'])
self.assertTrue(o['europian-swallow'])
self.assertEqual(o.subCommand, 'inquisition')
self.assertIsInstance(o.subOptions, InquisitionOptions)
self.assertFalse(o.subOptions['expect'])
self.assertEqual(o.subOptions['torture-device'], 'comfy-chair')
def test_subcommandWithFlagsAndOptions(self):
"""
Flags and options of a subcommand are assigned.
"""
o = SubCommandOptions()
o.parseOptions(['inquisition', '--expect', '--torture-device=feather'])
self.assertFalse(o['europian-swallow'])
self.assertEqual(o.subCommand, 'inquisition')
self.assertIsInstance(o.subOptions, InquisitionOptions)
self.assertTrue(o.subOptions['expect'])
self.assertEqual(o.subOptions['torture-device'], 'feather')
def test_subcommandAliasWithFlagsAndOptions(self):
"""
Flags and options of a subcommand alias are assigned.
"""
o = SubCommandOptions()
o.parseOptions(['inquest', '--expect', '--torture-device=feather'])
self.assertFalse(o['europian-swallow'])
self.assertEqual(o.subCommand, 'inquisition')
self.assertIsInstance(o.subOptions, InquisitionOptions)
self.assertTrue(o.subOptions['expect'])
self.assertEqual(o.subOptions['torture-device'], 'feather')
def test_anotherSubcommandWithFlagsAndOptions(self):
"""
Flags and options of another subcommand are assigned.
"""
o = SubCommandOptions()
o.parseOptions(['holyquest', '--for-grail'])
self.assertFalse(o['europian-swallow'])
self.assertEqual(o.subCommand, 'holyquest')
self.assertIsInstance(o.subOptions, HolyQuestOptions)
self.assertFalse(o.subOptions['horseback'])
self.assertTrue(o.subOptions['for-grail'])
def test_noSubcommand(self):
"""
If no subcommand is specified and no default subcommand is assigned,
a subcommand will not be implied.
"""
o = SubCommandOptions()
o.parseOptions(['--europian-swallow'])
self.assertTrue(o['europian-swallow'])
self.assertIsNone(o.subCommand)
self.assertFalse(hasattr(o, 'subOptions'))
def test_defaultSubcommand(self):
"""
Flags and options in the default subcommand are assigned.
"""
o = SubCommandOptions()
o.defaultSubCommand = 'inquest'
o.parseOptions(['--europian-swallow'])
self.assertTrue(o['europian-swallow'])
self.assertEqual(o.subCommand, 'inquisition')
self.assertIsInstance(o.subOptions, InquisitionOptions)
self.assertFalse(o.subOptions['expect'])
self.assertEqual(o.subOptions['torture-device'], 'comfy-chair')
def test_subCommandParseOptionsHasParent(self):
"""
The parseOptions method from the Options object specified for the
given subcommand is called.
"""
class SubOpt(usage.Options):
def parseOptions(self, *a, **kw):
self.sawParent = self.parent
usage.Options.parseOptions(self, *a, **kw)
class Opt(usage.Options):
subCommands = [
('foo', 'f', SubOpt, 'bar'),
]
o = Opt()
o.parseOptions(['foo'])
self.assertTrue(hasattr(o.subOptions, 'sawParent'))
self.assertEqual(o.subOptions.sawParent , o)
def test_subCommandInTwoPlaces(self):
"""
The .parent pointer is correct even when the same Options class is
used twice.
"""
class SubOpt(usage.Options):
pass
class OptFoo(usage.Options):
subCommands = [
('foo', 'f', SubOpt, 'quux'),
]
class OptBar(usage.Options):
subCommands = [
('bar', 'b', SubOpt, 'quux'),
]
oFoo = OptFoo()
oFoo.parseOptions(['foo'])
oBar=OptBar()
oBar.parseOptions(['bar'])
self.assertTrue(hasattr(oFoo.subOptions, 'parent'))
self.assertTrue(hasattr(oBar.subOptions, 'parent'))
self.failUnlessIdentical(oFoo.subOptions.parent, oFoo)
self.failUnlessIdentical(oBar.subOptions.parent, oBar)
class HelpStringTests(unittest.TestCase):
"""
Test generated help strings.
"""
def setUp(self):
"""
Instantiate a well-behaved Options class.
"""
self.niceArgV = ("--long Alpha -n Beta "
"--shortless Gamma -f --myflag "
"--myparam Tofu").split()
self.nice = WellBehaved()
def test_noGoBoom(self):
"""
__str__ shouldn't go boom.
"""
try:
self.nice.__str__()
except Exception as e:
self.fail(e)
def test_whitespaceStripFlagsAndParameters(self):
"""
Extra whitespace in flag and parameters docs is stripped.
"""
# We test this by making sure aflag and it's help string are on the
# same line.
lines = [s for s in str(self.nice).splitlines() if s.find("aflag")>=0]
self.assertTrue(len(lines) > 0)
self.assertTrue(lines[0].find("flagallicious") >= 0)
class PortCoerceTests(unittest.TestCase):
"""
Test the behavior of L{usage.portCoerce}.
"""
def test_validCoerce(self):
"""
Test the answers with valid input.
"""
self.assertEqual(0, usage.portCoerce("0"))
self.assertEqual(3210, usage.portCoerce("3210"))
self.assertEqual(65535, usage.portCoerce("65535"))
def test_errorCoerce(self):
"""
Test error path.
"""
self.assertRaises(ValueError, usage.portCoerce, "")
self.assertRaises(ValueError, usage.portCoerce, "-21")
self.assertRaises(ValueError, usage.portCoerce, "212189")
self.assertRaises(ValueError, usage.portCoerce, "foo")
class ZshCompleterTests(unittest.TestCase):
"""
Test the behavior of the various L{twisted.usage.Completer} classes
for producing output usable by zsh tab-completion system.
"""
def test_completer(self):
"""
Completer produces zsh shell-code that produces no completion matches.
"""
c = usage.Completer()
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:')
c = usage.Completer(descr='some action', repeat=True)
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, '*:some action:')
def test_files(self):
"""
CompleteFiles produces zsh shell-code that completes file names
according to a glob.
"""
c = usage.CompleteFiles()
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option (*):_files -g "*"')
c = usage.CompleteFiles('*.py')
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option (*.py):_files -g "*.py"')
c = usage.CompleteFiles('*.py', descr="some action", repeat=True)
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, '*:some action (*.py):_files -g "*.py"')
def test_dirs(self):
"""
CompleteDirs produces zsh shell-code that completes directory names.
"""
c = usage.CompleteDirs()
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:_directories')
c = usage.CompleteDirs(descr="some action", repeat=True)
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, '*:some action:_directories')
def test_list(self):
"""
CompleteList produces zsh shell-code that completes words from a fixed
list of possibilities.
"""
c = usage.CompleteList('ABC')
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:(A B C)')
c = usage.CompleteList(['1', '2', '3'])
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:(1 2 3)')
c = usage.CompleteList(['1', '2', '3'], descr='some action',
repeat=True)
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, '*:some action:(1 2 3)')
def test_multiList(self):
"""
CompleteMultiList produces zsh shell-code that completes multiple
comma-separated words from a fixed list of possibilities.
"""
c = usage.CompleteMultiList('ABC')
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:_values -s , \'some-option\' A B C')
c = usage.CompleteMultiList(['1','2','3'])
got = c._shellCode('some-option', usage._ZSH)
self.assertEqual(got, ':some-option:_values -s , \'some-option\' 1 2 3')
c = usage.CompleteMultiList(['1','2','3'], descr='some action',
repeat=True)
got = c._shellCode('some-option', usage._ZSH)
expected = '*:some action:_values -s , \'some action\' 1 2 3'
self.assertEqual(got, expected)
def test_usernames(self):
"""
CompleteUsernames produces zsh shell-code that completes system
usernames.
"""
c = usage.CompleteUsernames()
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, ':some-option:_users')
c = usage.CompleteUsernames(descr='some action', repeat=True)
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, '*:some action:_users')
def test_groups(self):
"""
CompleteGroups produces zsh shell-code that completes system group
names.
"""
c = usage.CompleteGroups()
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, ':group:_groups')
c = usage.CompleteGroups(descr='some action', repeat=True)
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, '*:some action:_groups')
def test_hostnames(self):
"""
CompleteHostnames produces zsh shell-code that completes hostnames.
"""
c = usage.CompleteHostnames()
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, ':some-option:_hosts')
c = usage.CompleteHostnames(descr='some action', repeat=True)
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, '*:some action:_hosts')
def test_userAtHost(self):
"""
CompleteUserAtHost produces zsh shell-code that completes hostnames or
a word of the form <username>@<hostname>.
"""
c = usage.CompleteUserAtHost()
out = c._shellCode('some-option', usage._ZSH)
self.assertTrue(out.startswith(':host | user@host:'))
c = usage.CompleteUserAtHost(descr='some action', repeat=True)
out = c._shellCode('some-option', usage._ZSH)
self.assertTrue(out.startswith('*:some action:'))
def test_netInterfaces(self):
"""
CompleteNetInterfaces produces zsh shell-code that completes system
network interface names.
"""
c = usage.CompleteNetInterfaces()
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, ':some-option:_net_interfaces')
c = usage.CompleteNetInterfaces(descr='some action', repeat=True)
out = c._shellCode('some-option', usage._ZSH)
self.assertEqual(out, '*:some action:_net_interfaces')
class CompleterNotImplementedTests(unittest.TestCase):
"""
Using an unknown shell constant with the various Completer() classes
should raise NotImplementedError
"""
def test_unknownShell(self):
"""
Using an unknown shellType should raise NotImplementedError
"""
classes = [usage.Completer, usage.CompleteFiles,
usage.CompleteDirs, usage.CompleteList,
usage.CompleteMultiList, usage.CompleteUsernames,
usage.CompleteGroups, usage.CompleteHostnames,
usage.CompleteUserAtHost, usage.CompleteNetInterfaces]
for cls in classes:
try:
action = cls()
except:
action = cls(None)
self.assertRaises(NotImplementedError, action._shellCode,
None, "bad_shell_type")
class FlagFunctionTests(unittest.TestCase):
"""
Tests for L{usage.flagFunction}.
"""
class SomeClass(object):
"""
Dummy class for L{usage.flagFunction} tests.
"""
def oneArg(self, a):
"""
A one argument method to be tested by L{usage.flagFunction}.
@param a: a useless argument to satisfy the function's signature.
"""
def noArg(self):
"""
A no argument method to be tested by L{usage.flagFunction}.
"""
def manyArgs(self, a, b, c):
"""
A multiple arguments method to be tested by L{usage.flagFunction}.
@param a: a useless argument to satisfy the function's signature.
@param b: a useless argument to satisfy the function's signature.
@param c: a useless argument to satisfy the function's signature.
"""
def test_hasArg(self):
"""
L{usage.flagFunction} returns C{False} if the method checked allows
exactly one argument.
"""
self.assertIs(False, usage.flagFunction(self.SomeClass().oneArg))
def test_noArg(self):
"""
L{usage.flagFunction} returns C{True} if the method checked allows
exactly no argument.
"""
self.assertIs(True, usage.flagFunction(self.SomeClass().noArg))
def test_tooManyArguments(self):
"""
L{usage.flagFunction} raises L{usage.UsageError} if the method checked
allows more than one argument.
"""
exc = self.assertRaises(
usage.UsageError, usage.flagFunction, self.SomeClass().manyArgs)
self.assertEqual("Invalid Option function for manyArgs", str(exc))
def test_tooManyArgumentsAndSpecificErrorMessage(self):
"""
L{usage.flagFunction} uses the given method name in the error message
raised when the method allows too many arguments.
"""
exc = self.assertRaises(
usage.UsageError,
usage.flagFunction, self.SomeClass().manyArgs, "flubuduf")
self.assertEqual("Invalid Option function for flubuduf", str(exc))
class OptionsInternalTests(unittest.TestCase):
"""
Tests internal behavior of C{usage.Options}.
"""
def test_optionsAliasesOrder(self):
"""
Options which are synonyms to another option are aliases towards the
longest option name.
"""
class Opts(usage.Options):
def opt_very_very_long(self):
"""
This is an option method with a very long name, that is going to
be aliased.
"""
opt_short = opt_very_very_long
opt_s = opt_very_very_long
opts = Opts()
self.assertEqual(
dict.fromkeys(
["s", "short", "very-very-long"], "very-very-long"), {
"s": opts.synonyms["s"],
"short": opts.synonyms["short"],
"very-very-long": opts.synonyms["very-very-long"],
})
|
hgl888/chromium-crosswalk
|
refs/heads/master
|
tools/findit/common/http_client.py
|
74
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class HttpClient(object):
"""Represent a http client for sending request to a http[s] server.
If cookies need to be sent, they should be in a file pointed to by
COOKIE_FILE in the environment.
"""
@staticmethod
def Get(url, params={}, timeout=120, retries=5, retry_interval=0.5,
retry_if_not=None):
"""Send a GET request to the given url with the given parameters.
Args:
url: the url to send request to.
params: parameters to send as part of the http request.
timeout: timeout for the http request, default is 120 seconds.
retries: indicate how many retries before failing, default is 5.
retry_interval: interval in second to wait before retry, default is 0.5.
retry_if_not: a http status code. If set, retry only when the failed http
status code is a different value.
Returns:
(status_code, data)
state_code: the http status code in the response.
data: the body of the response.
"""
raise NotImplemented()
|
jmartinm/invenio-master
|
refs/heads/master
|
modules/docextract/lib/refextract_config.py
|
11
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""RefExtract configuration"""
from invenio.config import CFG_VERSION, CFG_ETCDIR
# pylint: disable=C0301
CFG_REFEXTRACT_VERSION_NUM = '1.5.40'
# Version number:
CFG_REFEXTRACT_VERSION = "Invenio/%s refextract/%s" \
% (CFG_VERSION, CFG_REFEXTRACT_VERSION_NUM)
# Module config directory
CFG_CONF_DIR = '%s/docextract' % CFG_ETCDIR
CFG_REFEXTRACT_KBS = {
'journals' : "%s/journal-titles.kb" % CFG_CONF_DIR,
'journals-re' : "%s/journal-titles-re.kb" % CFG_CONF_DIR,
'report-numbers' : "%s/report-numbers.kb" % CFG_CONF_DIR,
'authors' : "%s/authors.kb" % CFG_CONF_DIR,
'collaborations' : "%s/collaborations.kb" % CFG_CONF_DIR,
'books' : "%s/books.kb" % CFG_CONF_DIR,
'conferences' : "%s/conferences.kb" % CFG_CONF_DIR,
'publishers' : "%s/publishers.kb" % CFG_CONF_DIR,
'special-journals': "%s/special-journals.kb" % CFG_CONF_DIR,
}
# Prefix for temp files
CFG_REFEXTRACT_FILENAME = "refextract"
## MARC Fields and subfields used by refextract:
# Reference fields:
CFG_REFEXTRACT_FIELDS = {
'misc': 'm',
'linemarker': 'o',
'doi': 'a',
'reportnumber': 'r',
'journal': 's',
'url': 'u',
'urldesc': 'z',
'author': 'h',
'title': 't',
'isbn': 'i',
'publisher': 'p',
'year': 'y',
'collaboration': 'c',
'recid': '0',
}
CFG_REFEXTRACT_TAG_ID_REFERENCE = "999" # ref field tag
CFG_REFEXTRACT_IND1_REFERENCE = "C" # ref field ind1
CFG_REFEXTRACT_IND2_REFERENCE = "5" # ref field ind2
## refextract statistics fields:
CFG_REFEXTRACT_TAG_ID_EXTRACTION_STATS = "999C6" # ref-stats tag
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_STATS = "a" # ref-stats subfield
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_TIME = "t" # ref-stats time subfield
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_VERSION = "v" # ref-stats version subfield
## Internal tags are used by refextract to mark-up recognised citation
## information.
CFG_REFEXTRACT_MARKER_OPENING_REPORT_NUM = r"<cds.REPORTNUMBER>"
CFG_REFEXTRACT_MARKER_OPENING_TITLE = r"<cds.JOURNAL>"
CFG_REFEXTRACT_MARKER_OPENING_TITLE_IBID = r"<cds.JOURNALibid>"
CFG_REFEXTRACT_MARKER_OPENING_SERIES = r"<cds.SER>"
CFG_REFEXTRACT_MARKER_OPENING_VOLUME = r"<cds.VOL>"
CFG_REFEXTRACT_MARKER_OPENING_YEAR = r"<cds.YR>"
CFG_REFEXTRACT_MARKER_OPENING_PAGE = r"<cds.PG>"
CFG_REFEXTRACT_MARKER_OPENING_QUOTED = r"<cds.QUOTED>"
CFG_REFEXTRACT_MARKER_OPENING_ISBN = r"<cds.ISBN>"
CFG_REFEXTRACT_MARKER_OPENING_PUBLISHER = r"<cds.PUBLISHER>"
CFG_REFEXTRACT_MARKER_OPENING_COLLABORATION = r"<cds.COLLABORATION>"
# These are the "closing tags:
CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM = r"</cds.REPORTNUMBER>"
CFG_REFEXTRACT_MARKER_CLOSING_TITLE = r"</cds.JOURNAL>"
CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID = r"</cds.JOURNALibid>"
CFG_REFEXTRACT_MARKER_CLOSING_SERIES = r"</cds.SER>"
CFG_REFEXTRACT_MARKER_CLOSING_VOLUME = r"</cds.VOL>"
CFG_REFEXTRACT_MARKER_CLOSING_YEAR = r"</cds.YR>"
CFG_REFEXTRACT_MARKER_CLOSING_PAGE = r"</cds.PG>"
CFG_REFEXTRACT_MARKER_CLOSING_QUOTED = r"</cds.QUOTED>"
CFG_REFEXTRACT_MARKER_CLOSING_ISBN = r"</cds.ISBN>"
CFG_REFEXTRACT_MARKER_CLOSING_PUBLISHER = r"</cds.PUBLISHER>"
CFG_REFEXTRACT_MARKER_CLOSING_COLLABORATION = r"</cds.COLLABORATION>"
## Of the form '</cds.AUTHxxxx>' only
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND = r"</cds.AUTHstnd>"
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL = r"</cds.AUTHetal>"
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL = r"</cds.AUTHincl>"
## The minimum length of a reference's misc text to be deemed insignificant.
## when comparing misc text with semi-colon defined sub-references.
## Values higher than this value reflect meaningful misc text.
## Hence, upon finding a correct semi-colon, but having current misc text
## length less than this value (without other meaningful reference objects:
## report numbers, titles...) then no split will occur.
## (A higher value will increase splitting strictness. i.e. Fewer splits)
CGF_REFEXTRACT_SEMI_COLON_MISC_TEXT_SENSITIVITY = 60
## The length of misc text between two adjacent authors which is
## deemed as insignificant. As such, when misc text of a length less
## than this value is found, then the latter author group is dumped into misc.
## (A higher value will increase splitting strictness. i.e. Fewer splits)
CGF_REFEXTRACT_ADJACENT_AUTH_MISC_SEPARATION = 10
## Maximum number of lines for a citation before it is considered invalid
CFG_REFEXTRACT_MAX_LINES = 25
|
tszym/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/panos/panos_nat_policy.py
|
72
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_nat_policy
short_description: create a policy NAT rule
description:
- Create a policy nat rule. Keep in mind that we can either end up configuring source NAT, destination NAT, or both. Instead of splitting it
into two we will make a fair attempt to determine which one the user wants.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
rule_name:
description:
- name of the SNAT rule
required: true
from_zone:
description:
- list of source zones
required: true
to_zone:
description:
- destination zone
required: true
source:
description:
- list of source addresses
required: false
default: ["any"]
destination:
description:
- list of destination addresses
required: false
default: ["any"]
service:
description:
- service
required: false
default: "any"
snat_type:
description:
- type of source translation
required: false
default: None
snat_address:
description:
- snat translated address
required: false
default: None
snat_interface:
description:
- snat interface
required: false
default: None
snat_interface_address:
description:
- snat interface address
required: false
default: None
snat_bidirectional:
description:
- bidirectional flag
required: false
default: "false"
dnat_address:
description:
- dnat translated address
required: false
default: None
dnat_port:
description:
- dnat translated port
required: false
default: None
override:
description:
- attempt to override rule if one with the same name already exists
required: false
default: "false"
commit:
description:
- commit if changed
required: false
default: true
'''
EXAMPLES = '''
# Create a source and destination nat rule
- name: create nat SSH221 rule for 10.0.1.101
panos_nat:
ip_address: "192.168.1.1"
password: "admin"
rule_name: "Web SSH"
from_zone: ["external"]
to_zone: "external"
source: ["any"]
destination: ["10.0.0.100"]
service: "service-tcp-221"
snat_type: "dynamic-ip-and-port"
snat_interface: "ethernet1/2"
dnat_address: "10.0.1.101"
dnat_port: "22"
commit: False
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
_NAT_XPATH = "/config/devices/entry[@name='localhost.localdomain']" + \
"/vsys/entry[@name='vsys1']" + \
"/rulebase/nat/rules/entry[@name='%s']"
def nat_rule_exists(xapi, rule_name):
xapi.get(_NAT_XPATH % rule_name)
e = xapi.element_root.find('.//entry')
if e is None:
return False
return True
def dnat_xml(m, dnat_address, dnat_port):
if dnat_address is None and dnat_port is None:
return None
exml = ["<destination-translation>"]
if dnat_address is not None:
exml.append("<translated-address>%s</translated-address>" %
dnat_address)
if dnat_port is not None:
exml.append("<translated-port>%s</translated-port>" %
dnat_port)
exml.append('</destination-translation>')
return ''.join(exml)
def snat_xml(m, snat_type, snat_address, snat_interface,
snat_interface_address, snat_bidirectional):
if snat_type == 'static-ip':
if snat_address is None:
m.fail_json(msg="snat_address should be speicified "
"for snat_type static-ip")
exml = ["<source-translation>", "<static-ip>"]
if snat_bidirectional:
exml.append('<bi-directional>%s</bi-directional>' % 'yes')
else:
exml.append('<bi-directional>%s</bi-directional>' % 'no')
exml.append('<translated-address>%s</translated-address>' %
snat_address)
exml.append('</static-ip>')
exml.append('</source-translation>')
elif snat_type == 'dynamic-ip-and-port':
exml = ["<source-translation>",
"<dynamic-ip-and-port>"]
if snat_interface is not None:
exml = exml + [
"<interface-address>",
"<interface>%s</interface>" % snat_interface]
if snat_interface_address is not None:
exml.append("<ip>%s</ip>" % snat_interface_address)
exml.append("</interface-address>")
elif snat_address is not None:
exml.append("<translated-address>")
for t in snat_address:
exml.append("<member>%s</member>" % t)
exml.append("</translated-address>")
else:
m.fail_json(msg="no snat_interface or snat_address "
"specified for snat_type dynamic-ip-and-port")
exml.append('</dynamic-ip-and-port>')
exml.append('</source-translation>')
else:
m.fail_json(msg="unknown snat_type %s" % snat_type)
return ''.join(exml)
def add_nat(xapi, module, rule_name, from_zone, to_zone,
source, destination, service, dnatxml=None, snatxml=None):
exml = []
if dnatxml:
exml.append(dnatxml)
if snatxml:
exml.append(snatxml)
exml.append("<to><member>%s</member></to>" % to_zone)
exml.append("<from>")
exml = exml + ["<member>%s</member>" % e for e in from_zone]
exml.append("</from>")
exml.append("<source>")
exml = exml + ["<member>%s</member>" % e for e in source]
exml.append("</source>")
exml.append("<destination>")
exml = exml + ["<member>%s</member>" % e for e in destination]
exml.append("</destination>")
exml.append("<service>%s</service>" % service)
exml.append("<nat-type>ipv4</nat-type>")
exml = ''.join(exml)
xapi.set(xpath=_NAT_XPATH % rule_name, element=exml)
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
rule_name=dict(required=True),
from_zone=dict(type='list', required=True),
to_zone=dict(required=True),
source=dict(type='list', default=["any"]),
destination=dict(type='list', default=["any"]),
service=dict(default="any"),
snat_type=dict(),
snat_address=dict(),
snat_interface=dict(),
snat_interface_address=dict(),
snat_bidirectional=dict(default=False),
dnat_address=dict(),
dnat_port=dict(),
override=dict(type='bool', default=False),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
rule_name = module.params['rule_name']
from_zone = module.params['from_zone']
to_zone = module.params['to_zone']
source = module.params['source']
destination = module.params['destination']
service = module.params['service']
snat_type = module.params['snat_type']
snat_address = module.params['snat_address']
snat_interface = module.params['snat_interface']
snat_interface_address = module.params['snat_interface_address']
snat_bidirectional = module.params['snat_bidirectional']
dnat_address = module.params['dnat_address']
dnat_port = module.params['dnat_port']
commit = module.params['commit']
override = module.params["override"]
if not override and nat_rule_exists(xapi, rule_name):
module.exit_json(changed=False, msg="rule exists")
try:
changed = add_nat(
xapi,
module,
rule_name,
from_zone,
to_zone,
source,
destination,
service,
dnatxml=dnat_xml(module, dnat_address, dnat_port),
snatxml=snat_xml(module, snat_type, snat_address,
snat_interface, snat_interface_address,
snat_bidirectional)
)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
if __name__ == '__main__':
main()
|
imuntil/Python
|
refs/heads/master
|
L/django-p/mysite/polls/admin.py
|
1
|
from django.contrib import admin
from .models import Question
# Register your models here.
admin.site.register(Question)
|
KrishanBhasin/exercism
|
refs/heads/master
|
luhn/luhn.py
|
1
|
class Luhn:
"""
A class to calculate and verify numbers against the Luhn Formula
"""
def __init__(self, num):
self.input = num
self.addEndsList = []
def LuhnDouble(self,to_double):
"""
Handles numbers that exceed 9 during the 'doubling' phase of the Luhn Formula
"""
doubled = to_double*2
if doubled > 9:
doubled -= 9
return doubled
def addends(self):
"""
iterates through the provided number, doubling every second digit, starting from the RHS
"""
for_doubling = self.input
# while for_doubling:
# self.for_doubling,testdigit = divmod(for_doubling,10)
# if testdigit%2==0:
# self.new_value = self.LuhnDouble(testdigit)
# self.addEndsList.append(new_value)
self.input = str(self.input) #convert to string so we can cycle through it digit by digit
for i in range(1,len(self.input)+1):
if i%2==0:
self.new_value = self.LuhnDouble(int(self.input[-i]))
else:
self.new_value = int(self.input[-i])
self.addEndsList.append(self.new_value)
return self.addEndsList #return a list containing the calculated values after every second digit has been 'doubled'
def checksum(self):
list_after_adding = self.addends()
return sum(list_after_adding)%10 #return only the last digit of the checksum calculation
def is_valid(self):
if self.checksum() == 0:
return True
return False
@staticmethod
def create(num_in):
#add a '0' to the end of the string, to ensure the correct digits are doubled
#(Luhns formula counts checkdigit as the first digit, so if this '0' is not added
#then the wrong digits are doubled)
checkitem = Luhn.checksum(Luhn(int( str(num_in)+'0')))
#the '0' we added is expected to be incorrect - we now create checkdigit to be the number required
#to ensure the sum ends in a zero
checkdigit = 10 - checkitem
num_out = int( str(num_in)+ str(checkdigit)[-1])
return num_out
|
auready/django
|
refs/heads/master
|
django/db/backends/sqlite3/operations.py
|
2
|
import datetime
import uuid
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models import aggregates, fields
from django.utils import timezone
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.duration import duration_string
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there's only a single field to insert, the limit is 500
(SQLITE_MAX_COMPOUND_SELECT).
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_expression_support(self, expression):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev)
if isinstance(expression, bad_aggregates):
for expr in expression.get_source_expressions():
try:
output_field = expr.output_field
if isinstance(output_field, bad_fields):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev, and Variance '
'aggregations on date/time fields in sqlite3 '
'since date/time is saved as text.'
)
except FieldError:
# Not every subexpression has an output_field which is fine
# to ignore.
pass
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, timedelta):
return "'%s'" % duration_string(timedelta), []
def format_for_duration_arithmetic(self, sql):
"""Do nothing here, we will handle it in the custom function."""
return sql
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def time_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def datetime_cast_date_sql(self, field_name, tzname):
return "django_datetime_cast_date(%s, %%s)" % field_name, [tzname]
def datetime_cast_time_sql(self, field_name, tzname):
return "django_datetime_cast_time(%s, %%s)" % field_name, [tzname]
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def time_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_time_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name)
def pk_default_value(self):
return "NULL"
def _quote_params_for_last_executed_query(self, params):
"""
Only for last_executed_query! Don't use this to execute SQL queries!
"""
# This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the
# number of parameters, default = 999) and SQLITE_MAX_COLUMN (the
# number of return values, default = 2000). Since Python's sqlite3
# module doesn't expose the get_limit() C API, assume the default
# limits are in effect and split the work in batches if needed.
BATCH_SIZE = 999
if len(params) > BATCH_SIZE:
results = ()
for index in range(0, len(params), BATCH_SIZE):
chunk = params[index:index + BATCH_SIZE]
results += self._quote_params_for_last_executed_query(chunk)
return results
sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params))
# Bypass Django's wrappers and use the underlying sqlite3 connection
# to avoid logging this query - it would trigger infinite recursion.
cursor = self.connection.connection.cursor()
# Native sqlite3 cursors cannot be used as context managers.
try:
return cursor.execute(sql, params).fetchone()
finally:
cursor.close()
def last_executed_query(self, cursor, sql, params):
# Python substitutes parameters in Modules/_sqlite/cursor.c with:
# pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars);
# Unfortunately there is no way to reach self->statement from Python,
# so we quote and substitute parameters manually.
if params:
if isinstance(params, (list, tuple)):
params = self._quote_params_for_last_executed_query(params)
else:
keys = params.keys()
values = tuple(params.values())
values = self._quote_params_for_last_executed_query(values)
params = dict(zip(keys, values))
return sql % params
# For consistency with SQLiteCursorWrapper.execute(), just return sql
# when there are no parameters. See #13648 and #17158.
else:
return sql
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return str(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return str(value)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'DecimalField':
converters.append(self.convert_decimalfield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
elif internal_type in ('NullBooleanField', 'BooleanField'):
converters.append(self.convert_booleanfield_value)
return converters
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.datetime):
value = parse_datetime(value)
if settings.USE_TZ and not timezone.is_aware(value):
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.date):
value = parse_date(value)
return value
def convert_timefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.time):
value = parse_time(value)
return value
def convert_decimalfield_value(self, value, expression, connection, context):
if value is not None:
value = expression.output_field.format_number(value)
value = backend_utils.typecast_decimal(value)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def convert_booleanfield_value(self, value, expression, connection, context):
return bool(value) if value in (1, 0) else value
def bulk_insert_sql(self, fields, placeholder_rows):
return " UNION ALL ".join(
"SELECT %s" % ", ".join(row)
for row in placeholder_rows
)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a power function, so we fake it with a
# user-defined function django_power that's registered in connect().
if connector == '^':
return 'django_power(%s)' % ','.join(sub_expressions)
return super().combine_expression(connector, sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
if connector not in ['+', '-']:
raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector)
fn_params = ["'%s'" % connector] + sub_expressions
if len(fn_params) > 3:
raise ValueError('Too many params for timedelta operations.')
return "django_format_dtdelta(%s)" % ', '.join(fn_params)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if internal_type == 'TimeField':
return "django_time_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return "django_timestamp_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
|
demonchild2112/travis-test
|
refs/heads/master
|
grr/server/grr_response_server/flows/__init__.py
|
240
|
#!/usr/bin/env python
|
drxaero/calibre
|
refs/heads/master
|
src/calibre/ebooks/lrf/pylrs/elements.py
|
24
|
""" elements.py -- replacements and helpers for ElementTree """
class ElementWriter(object):
def __init__(self, e, header=False, sourceEncoding="ascii",
spaceBeforeClose=True, outputEncodingName="UTF-16"):
self.header = header
self.e = e
self.sourceEncoding=sourceEncoding
self.spaceBeforeClose = spaceBeforeClose
self.outputEncodingName = outputEncodingName
def _encodeCdata(self, rawText):
if type(rawText) is str:
rawText = rawText.decode(self.sourceEncoding)
text = rawText.replace("&", "&")
text = text.replace("<", "<")
text = text.replace(">", ">")
return text
def _writeAttribute(self, f, name, value):
f.write(u' %s="' % unicode(name))
if not isinstance(value, basestring):
value = unicode(value)
value = self._encodeCdata(value)
value = value.replace('"', '"')
f.write(value)
f.write(u'"')
def _writeText(self, f, rawText):
text = self._encodeCdata(rawText)
f.write(text)
def _write(self, f, e):
f.write(u'<' + unicode(e.tag))
attributes = e.items()
attributes.sort()
for name, value in attributes:
self._writeAttribute(f, name, value)
if e.text is not None or len(e) > 0:
f.write(u'>')
if e.text:
self._writeText(f, e.text)
for e2 in e:
self._write(f, e2)
f.write(u'</%s>' % e.tag)
else:
if self.spaceBeforeClose:
f.write(' ')
f.write(u'/>')
if e.tail is not None:
self._writeText(f, e.tail)
def toString(self):
class x:
pass
buffer = []
x.write = buffer.append
self.write(x)
return u''.join(buffer)
def write(self, f):
if self.header:
f.write(u'<?xml version="1.0" encoding="%s"?>\n' % self.outputEncodingName)
self._write(f, self.e)
|
Fierydemise/ShadowCraft-Engine
|
refs/heads/legion
|
shadowcraft/objects/artifact_data.py
|
1
|
traits = {
('rogue', 'assassination'): (
'kingsbane',
'assassins_blades',
'toxic_blades',
'poison_knives',
'urge_to_kill',
'balanced_blades',
'surge_of_toxins',
'shadow_walker',
'master_assassin',
'shadow_swiftness',
'serrated_edge',
'bag_of_tricks',
'master_alchemist',
'gushing_wounds',
'fade_into_shadows',
'from_the_shadows',
'blood_of_the_assassinated',
'slayers_precision',
'silence_of_the_uncrowned',
'strangler',
'dense_concoction',
'sinister_circulation',
'concordance_of_the_legionfall',
),
('rogue', 'outlaw'): (
'curse_of_the_dreadblades',
'cursed_edges',
'fates_thirst',
'blade_dancer',
'fatebringer',
'gunslinger',
'hidden_blade',
'fortune_strikes',
'ghostly_shell',
'deception',
'black_powder',
'greed',
'blurred_time',
'fortunes_boon',
'fortunes_strike',
'blademaster',
'blunderbuss',
'cursed_steel',
'bravado_of_the_uncrowned',
'sabermetrics',
'dreadblades_vigor',
'loaded_dice',
'concordance_of_the_legionfall',
),
('rogue', 'subtlety'): (
'goremaws_bite',
'shadow_fangs',
'gutripper',
'fortunes_bite',
'catlike_reflexes',
'embrace_of_darkness',
'ghost_armor',
'precision_strike',
'energetic_stabbing',
'flickering_shadows',
'second_shuriken',
'demons_kiss',
'finality',
'the_quiet_knife',
'akarris_soul',
'soul_shadows',
'shadow_nova',
'legionblade',
'shadows_of_the_uncrowned',
'weak_point',
'shadows_whisper',
'feeding_frenzy',
'concordance_of_the_legionfall',
),
('all','netherlight'): (
'chaotic_darkness',
'dark_sorrows',
'infusion_of_light',
'light_speed',
'lights_embrace',
'master_of_shadows',
'murderous_intent',
'refractive_shell',
'secure_in_the_light',
'shadowbind',
'shocklight',
'torment_the_weak',
)
}
#Single Rank Traits for each spec
#Used for binary trait ranking
single_rank = {
('rogue', 'assassination'): (
'kingsbane',
'assassins_blades',
'urge_to_kill',
'surge_of_toxins',
'shadow_swiftness',
'bag_of_tricks',
'from_the_shadows',
'blood_of_the_assassinated',
'slayers_precision',
'silence_of_the_uncrowned',
'dense_concoction',
'sinister_circulation',
),
('rogue', 'outlaw'): (
'curse_of_the_dreadblades',
'cursed_edges',
'hidden_blade',
'deception',
'greed',
'blurred_time',
'blademaster',
'blunderbuss',
'cursed_steel',
'bravado_of_the_uncrowned',
'dreadblades_vigor',
'loaded_dice',
),
('rogue', 'subtlety'): (
'goremaws_bite',
'shadow_fangs',
'embrace_of_darkness',
'flickering_shadows',
'second_shuriken',
'finality',
'akarris_soul',
'shadow_nova',
'legionblade',
'shadows_of_the_uncrowned',
'shadows_whisper',
'feeding_frenzy',
),
}
|
melund/python-prompt-toolkit
|
refs/heads/master
|
examples/get-password.py
|
3
|
#!/usr/bin/env python
from __future__ import unicode_literals
from prompt_toolkit import prompt
if __name__ == '__main__':
password = prompt('Password: ', is_password=True)
print('You said: %s' % password)
|
chrismeyersfsu/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/digital_ocean/digital_ocean_sshkey.py
|
50
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
short_description: Create/delete an SSH key in DigitalOcean
description:
- Create/delete an SSH key.
version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the SSH key id you want to operate on.
name:
description:
- String, this is the name of an SSH key to create or destroy.
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
- Version 1 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
- dopy
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean_sshkey:
state: present
name: my_ssh_key
ssh_pub_key: 'ssh-rsa AAAA...'
client_id: XXX
api_key: XXX
'''
import os
import traceback
try:
from dopy.manager import DoError, DoManager
HAS_DOPY = True
except ImportError:
HAS_DOPY = False
from ansible.module_utils.basic import AnsibleModule
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
state = module.params['state']
SSH.setup(client_id, api_key)
name = getkeyordie('name')
if state in ('present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['present', 'absent'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),
id = dict(aliases=['droplet_id'], type='int'),
ssh_pub_key = dict(type='str'),
),
required_one_of = (
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy required for this module')
try:
core(module)
except (DoError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
qwertyjune/BethSaidaBible
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/gis/admin/options.py
|
66
|
from django.contrib.admin import ModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.gdal import OGRGeomType
from django.contrib.gis.db import models
class GeoModelAdmin(ModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'http://openlayers.org/api/2.13/OpenLayers.js'
point_zoom = num_zoom - 6
wms_url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
wms_options = {'format': 'image/jpeg'}
debug = False
widget = OpenLayersWidget
@property
def media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoModelAdmin, self).media
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing 2D GeometryFields (OpenLayers 2 does not support
3D editing).
"""
if isinstance(db_field, models.GeometryField) and db_field.dim < 3:
kwargs.pop('request', None)
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoModelAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION':
collection_type = 'Any'
else:
collection_type = OGRGeomType(db_field.geom_type.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template = self.map_template
geom_type = db_field.geom_type
wms_options = ''
if self.wms_options:
wms_options = ["%s: '%s'" % pair for pair in self.wms_options.items()]
wms_options = ', %s' % ', '.join(wms_options)
params = {'default_lon': self.default_lon,
'default_lat': self.default_lat,
'default_zoom': self.default_zoom,
'display_wkt': self.debug or self.display_wkt,
'geom_type': OGRGeomType(db_field.geom_type),
'field_name': db_field.name,
'is_collection': is_collection,
'scrollable': self.scrollable,
'layerswitcher': self.layerswitcher,
'collection_type': collection_type,
'is_generic': db_field.geom_type == 'GEOMETRY',
'is_linestring': db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon': db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'),
'is_point': db_field.geom_type in ('POINT', 'MULTIPOINT'),
'num_zoom': self.num_zoom,
'max_zoom': self.max_zoom,
'min_zoom': self.min_zoom,
'units': self.units, # likely should get from object
'max_resolution': self.max_resolution,
'max_extent': self.max_extent,
'modifiable': self.modifiable,
'mouse_position': self.mouse_position,
'scale_text': self.scale_text,
'map_width': self.map_width,
'map_height': self.map_height,
'point_zoom': self.point_zoom,
'srid': self.map_srid,
'display_srid': self.display_srid,
'wms_url': self.wms_url,
'wms_layer': self.wms_layer,
'wms_name': self.wms_name,
'wms_options': wms_options,
'debug': self.debug,
}
return OLMap
from django.contrib.gis import gdal
if gdal.HAS_GDAL:
# Use the official spherical mercator projection SRID on versions
# of GDAL that support it; otherwise, fallback to 900913.
if gdal.GDAL_VERSION >= (1, 7):
spherical_mercator_srid = 3857
else:
spherical_mercator_srid = 900913
class OSMGeoAdmin(GeoModelAdmin):
map_template = 'gis/admin/osm.html'
num_zoom = 20
map_srid = spherical_mercator_srid
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = '156543.0339'
point_zoom = num_zoom - 6
units = 'm'
|
kaday/cylc
|
refs/heads/master
|
lib/cylc/task_pool.py
|
1
|
#!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2015 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Manage the task pool of a suite.
All new task proxies (including spawned ones) are added first to the runahead
pool, which does not participate in dependency matching and is not visible in
the GUI. Tasks are then released to the task pool if not beyond the current
runahead limit.
check_auto_shutdown() and remove_spent_tasks() have to consider tasks in the
runahead pool too.
TODO - spawn-on-submit means a only one waiting instance of each task exists,
in the pool, so if a new stop cycle is set we just need to check waiting pool
tasks against the new stop cycle.
restart: runahead tasks are all in the 'waiting' state and will be reloaded
as such, on restart, into the runahead pool.
"""
from logging import ERROR, DEBUG, INFO, WARNING
import os
from Pyro.errors import NamingError
import shlex
import sys
from tempfile import NamedTemporaryFile
from time import time
import traceback
from cylc.batch_sys_manager import BATCH_SYS_MANAGER
from cylc.broker import broker
from cylc.cfgspec.globalcfg import GLOBAL_CFG
from cylc.config import SuiteConfig
from cylc.cycling.loader import (
get_interval, get_interval_cls, ISO8601_CYCLING_TYPE)
from cylc.CylcError import SchedulerError, TaskNotFoundError
import cylc.flags
from cylc.get_task_proxy import get_task_proxy
from cylc.mp_pool import SuiteProcPool, SuiteProcContext
from cylc.network.ext_trigger import ExtTriggerServer
from cylc.network.suite_broadcast import BroadcastServer
from cylc.owner import is_remote_user
from cylc.suite_host import is_remote_host
from cylc.task_proxy import TaskProxy
from cylc.task_state import task_state
class TaskPool(object):
"""Task pool of a suite."""
JOBS_KILL = "jobs-kill"
JOBS_POLL = "jobs-poll"
JOBS_SUBMIT = "jobs-submit"
def __init__(self, suite, pri_dao, pub_dao, stop_point, pyro, log,
run_mode):
self.suite_name = suite
self.pyro = pyro
self.run_mode = run_mode
self.log = log
self.stop_point = stop_point
self.reconfiguring = False
self.pri_dao = pri_dao
self.pub_dao = pub_dao
config = SuiteConfig.get_inst()
self.custom_runahead_limit = config.get_custom_runahead_limit()
self.max_future_offset = None
self._prev_runahead_base_point = None
self.max_num_active_cycle_points = (
config.get_max_num_active_cycle_points())
self._prev_runahead_base_point = None
self._prev_runahead_sequence_points = None
self.reload_warned = False
self.pool = {}
self.runahead_pool = {}
self.myq = {}
self.queues = {}
self.assign_queues()
self.pool_list = []
self.rhpool_list = []
self.pool_changed = []
self.rhpool_changed = []
self.is_held = False
self.hold_point = None
self.held_future_tasks = []
self.broker = broker()
self.orphans = []
self.task_name_list = config.get_task_name_list()
def assign_queues(self):
"""self.myq[taskname] = qfoo"""
config = SuiteConfig.get_inst()
qconfig = config.cfg['scheduling']['queues']
self.myq = {}
for queue in qconfig:
for taskname in qconfig[queue]['members']:
self.myq[taskname] = queue
def add_to_runahead_pool(self, itask):
"""Add a new task to the runahead pool if possible.
Tasks whose recurrences allow them to spawn beyond the suite
stop point are added to the pool in the held state, ready to be
released if the suite stop point is changed.
"""
# do not add if a task with the same ID already exists
# e.g. an inserted task caught up with an existing one
if self.id_exists(itask.identity):
self.log.warning(
itask.identity +
' cannot be added to pool: task ID already exists')
del itask
return False
# do not add if an inserted task is beyond its own stop point
# (note this is not the same as recurrence bounds)
if itask.stop_point and itask.point > itask.stop_point:
self.log.info(
itask.identity + ' not adding to pool: beyond task stop cycle')
del itask
return False
# add in held state if beyond the suite stop point
if self.stop_point and itask.point > self.stop_point:
itask.log(
INFO,
"holding (beyond suite stop point) " + str(self.stop_point))
itask.reset_state_held()
# add in held state if beyond the suite hold point
elif self.hold_point and itask.point > self.hold_point:
itask.log(
INFO,
"holding (beyond suite hold point) " + str(self.hold_point))
itask.reset_state_held()
# add in held state if a future trigger goes beyond the suite stop
# point (note this only applies to tasks below the suite stop point
# themselves)
elif self.task_has_future_trigger_overrun(itask):
itask.log(INFO, "holding (future trigger beyond stop point)")
self.held_future_tasks.append(itask.identity)
itask.reset_state_held()
elif self.is_held and itask.state.is_currently("waiting"):
# Hold newly-spawned tasks in a held suite (e.g. due to manual
# triggering of a held task).
itask.reset_state_held()
# add to the runahead pool
self.runahead_pool.setdefault(itask.point, {})
self.runahead_pool[itask.point][itask.identity] = itask
self.rhpool_changed = True
return True
def release_runahead_tasks(self):
"""Release tasks from the runahead pool to the main pool."""
if not self.runahead_pool:
return
# Any finished tasks can be released immediately (this can happen at
# restart when all tasks are initially loaded into the runahead pool).
for itask_id_maps in self.runahead_pool.values():
for itask in itask_id_maps.values():
if itask.state.is_currently('failed', 'succeeded', 'expired'):
self.release_runahead_task(itask)
self.rhpool_changed = True
limit = self.max_num_active_cycle_points
points = []
for point, itasks in sorted(
self.get_tasks_by_point(incl_runahead=True).items()):
has_unfinished_itasks = False
for itask in itasks:
if not itask.state.is_currently(
'failed', 'succeeded', 'expired'):
has_unfinished_itasks = True
break
if not points and not has_unfinished_itasks:
# We need to begin with an unfinished cycle point.
continue
points.append(point)
if not points:
return
# Get the earliest point with unfinished tasks.
runahead_base_point = min(points)
# Get all cycling points possible after the runahead base point.
if (self._prev_runahead_base_point is not None and
runahead_base_point == self._prev_runahead_base_point):
# Cache for speed.
sequence_points = self._prev_runahead_sequence_points
else:
sequence_points = []
config = SuiteConfig.get_inst()
for sequence in config.sequences:
point = runahead_base_point
for _ in range(limit):
point = sequence.get_next_point(point)
if point is None:
break
sequence_points.append(point)
sequence_points = set(sequence_points)
self._prev_runahead_sequence_points = sequence_points
self._prev_runahead_base_point = runahead_base_point
points = set(points).union(sequence_points)
if self.custom_runahead_limit is None:
# Calculate which tasks to release based on a maximum number of
# active cycle points (active meaning non-finished tasks).
latest_allowed_point = sorted(points)[:limit][-1]
if self.max_future_offset is not None:
# For the first N points, release their future trigger tasks.
latest_allowed_point += self.max_future_offset
else:
# Calculate which tasks to release based on a maximum duration
# measured from the oldest non-finished task.
latest_allowed_point = (
runahead_base_point + self.custom_runahead_limit)
if (self._prev_runahead_base_point is None or
self._prev_runahead_base_point != runahead_base_point):
if self.custom_runahead_limit < self.max_future_offset:
self.log.warning(
('custom runahead limit of %s is less than ' +
'future triggering offset %s: suite may stall.') % (
self.custom_runahead_limit,
self.max_future_offset
)
)
self._prev_runahead_base_point = runahead_base_point
for point, itask_id_map in self.runahead_pool.items():
if point <= latest_allowed_point:
for itask in itask_id_map.values():
self.release_runahead_task(itask)
def release_runahead_task(self, itask):
"""Release itask to the appropriate queue in the active pool."""
queue = self.myq[itask.tdef.name]
if queue not in self.queues:
self.queues[queue] = {}
self.queues[queue][itask.identity] = itask
self.pool.setdefault(itask.point, {})
self.pool[itask.point][itask.identity] = itask
self.pool_changed = True
cylc.flags.pflag = True
itask.log(DEBUG, "released to the task pool")
del self.runahead_pool[itask.point][itask.identity]
if not self.runahead_pool[itask.point]:
del self.runahead_pool[itask.point]
self.rhpool_changed = True
try:
self.pyro.connect(itask.message_queue, itask.identity)
except Exception, exc:
if cylc.flags.debug:
raise
print >> sys.stderr, exc
self.log.warning(
'%s cannot be added (use --debug and see stderr)' %
itask.identity)
return False
if itask.tdef.max_future_prereq_offset is not None:
self.set_max_future_offset()
def remove(self, itask, reason=None):
"""Remove a task proxy from the pool."""
try:
del self.runahead_pool[itask.point][itask.identity]
except KeyError:
pass
else:
if not self.runahead_pool[itask.point]:
del self.runahead_pool[itask.point]
self.rhpool_changed = True
return
try:
self.pyro.disconnect(itask.message_queue)
except NamingError, exc:
print >> sys.stderr, exc
self.log.critical(
itask.identity + ' cannot be removed (task not found)')
return
except Exception, exc:
print >> sys.stderr, exc
self.log.critical(
itask.identity + ' cannot be removed (unknown error)')
return
# remove from queue
if itask.tdef.name in self.myq: # A reload can remove a task
del self.queues[self.myq[itask.tdef.name]][itask.identity]
del self.pool[itask.point][itask.identity]
if not self.pool[itask.point]:
del self.pool[itask.point]
self.pool_changed = True
msg = "task proxy removed"
if reason:
msg += " (" + reason + ")"
itask.log(DEBUG, msg)
if itask.tdef.max_future_prereq_offset is not None:
self.set_max_future_offset()
del itask
def update_pool_list(self):
"""Regenerate the task list if the pool has changed."""
if self.pool_changed:
self.pool_changed = False
self.pool_list = []
for queue in self.queues:
for itask in self.queues[queue].values():
self.pool_list.append(itask)
def update_rhpool_list(self):
"""Regenerate the runahead task list if the runhead pool has
changed."""
if self.rhpool_changed:
self.rhpool_changed = False
self.rhpool_list = []
for itask_id_maps in self.runahead_pool.values():
self.rhpool_list.extend(itask_id_maps.values())
def get_all_tasks(self):
"""Return a list of all task proxies."""
self.update_pool_list()
self.update_rhpool_list()
return self.rhpool_list + self.pool_list
def get_tasks(self):
"""Return a list of task proxies in the main task pool."""
self.update_pool_list()
return self.pool_list
def get_rh_tasks(self):
"""Return a list of task proxies in the runahead pool."""
self.update_rhpool_list()
return self.rhpool_list
def get_tasks_by_point(self, incl_runahead):
"""Return a map of task proxies by cycle point."""
point_itasks = {}
for point, itask_id_map in self.pool.items():
point_itasks[point] = itask_id_map.values()
if not incl_runahead:
return point_itasks
for point, itask_id_map in self.runahead_pool.items():
point_itasks.setdefault(point, [])
point_itasks[point].extend(itask_id_map.values())
return point_itasks
def id_exists(self, id_):
"""Check if task id is in the runahead_pool or pool"""
for itask_ids in self.runahead_pool.values():
if id_ in itask_ids:
return True
for queue in self.queues:
if id_ in self.queues[queue]:
return True
return False
def submit_tasks(self):
"""
1) queue tasks that are ready to run (prerequisites satisfied,
clock-trigger time up) or if their manual trigger flag is set.
2) then submit queued tasks if their queue limit has not been
reached or their manual trigger flag is set.
The "queued" task state says the task will submit as soon as its
internal queue allows (or immediately if manually triggered first).
Use of "cylc trigger" sets a task's manual trigger flag. Then,
below, an unqueued task will be queued whether or not it is
ready to run; and a queued task will be submitted whether or not
its queue limit has been reached. The flag is immediately unset
after use so that two manual trigger ops are required to submit
an initially unqueued task that is queue-limited.
"""
# 1) queue unqueued tasks that are ready to run or manually forced
for itask in self.get_tasks():
if not itask.state.is_currently('queued'):
# only need to check that unqueued tasks are ready
if itask.manual_trigger or itask.ready_to_run():
# queue the task
itask.set_status('queued')
itask.reset_manual_trigger()
# 2) submit queued tasks if manually forced or not queue-limited
ready_tasks = []
config = SuiteConfig.get_inst()
qconfig = config.cfg['scheduling']['queues']
for queue in self.queues:
# 2.1) count active tasks and compare to queue limit
n_active = 0
n_release = 0
n_limit = qconfig[queue]['limit']
tasks = self.queues[queue].values()
if n_limit:
for itask in tasks:
if itask.state.is_currently(
'ready', 'submitted', 'running'):
n_active += 1
n_release = n_limit - n_active
# 2.2) release queued tasks if not limited or if manually forced
for itask in tasks:
if not itask.state.is_currently('queued'):
# (Note this excludes tasks remaining 'ready' because job
# submission has been stopped by use of 'cylc shutdown').
continue
if itask.manual_trigger or not n_limit or n_release > 0:
# manual release, or no limit, or not currently limited
n_release -= 1
ready_tasks.append(itask)
itask.reset_manual_trigger()
# else leaved queued
self.log.debug('%d task(s) de-queued' % len(ready_tasks))
self.submit_task_jobs(ready_tasks)
def submit_task_jobs(self, ready_tasks):
"""Prepare and submit task jobs."""
if not ready_tasks:
return
# Prepare tasks for job submission
config = SuiteConfig.get_inst()
bcast = BroadcastServer.get_inst()
prepared_tasks = []
for itask in ready_tasks:
if (config.cfg['cylc']['log resolved dependencies'] and
not itask.job_file_written):
itask.log(
INFO,
'triggered off %s' % itask.get_resolved_dependencies())
overrides = bcast.get(itask.identity)
if self.run_mode == 'simulation':
itask.job_submission_succeeded()
elif itask.prep_submit(overrides=overrides) is not None:
prepared_tasks.append(itask)
if not prepared_tasks:
return
# Submit task jobs
auth_itasks = {}
for itask in prepared_tasks:
# The job file is now (about to be) used: reset the file write flag
# so that subsequent manual retrigger will generate a new job file.
itask.job_file_written = False
itask.set_status('ready')
if (itask.task_host, itask.task_owner) not in auth_itasks:
auth_itasks[(itask.task_host, itask.task_owner)] = []
auth_itasks[(itask.task_host, itask.task_owner)].append(itask)
for auth, itasks in sorted(auth_itasks.items()):
cmd = ["cylc", self.JOBS_SUBMIT]
if cylc.flags.debug:
cmd.append("--debug")
host, owner = auth
remote_mode = False
for key, value, test_func in [
('host', host, is_remote_host),
('user', owner, is_remote_user)]:
if test_func(value):
cmd.append('--%s=%s' % (key, value))
remote_mode = True
if remote_mode:
cmd.append('--remote-mode')
cmd.append("--")
cmd.append(GLOBAL_CFG.get_derived_host_item(
self.suite_name, 'suite job log directory', host, owner))
stdin_file_paths = []
job_log_dirs = []
for itask in sorted(itasks, key=lambda itask: itask.identity):
if remote_mode:
stdin_file_paths.append(
itask.job_conf['local job file path'])
job_log_dirs.append(itask.get_job_log_dir(
itask.tdef.name, itask.point, itask.submit_num))
cmd += job_log_dirs
SuiteProcPool.get_inst().put_command(
SuiteProcContext(
self.JOBS_SUBMIT,
cmd,
stdin_file_paths=stdin_file_paths,
job_log_dirs=job_log_dirs,
),
self.submit_task_jobs_callback)
def submit_task_jobs_callback(self, ctx):
"""Callback when submit task jobs command exits."""
self._manip_task_jobs_callback(
ctx,
lambda itask, line: itask.job_submit_callback(line),
{
BATCH_SYS_MANAGER.OUT_PREFIX_COMMAND:
lambda itask, line: itask.job_cmd_out_callback(line),
},
)
def task_has_future_trigger_overrun(self, itask):
"""Check for future triggers extending beyond the final cycle."""
if not self.stop_point:
return False
for pct in set(itask.prerequisites_get_target_points()):
if pct > self.stop_point:
return True
return False
def set_runahead(self, interval=None):
"""Set the runahead."""
if isinstance(interval, int) or isinstance(interval, basestring):
# The unit is assumed to be hours (backwards compatibility).
interval = str(interval)
interval_cls = get_interval_cls()
if interval_cls.TYPE == ISO8601_CYCLING_TYPE:
interval = get_interval("PT%sH" % interval)
else:
interval = get_interval(interval)
if interval is None:
# No limit
self.log.warning("setting NO custom runahead limit")
self.custom_runahead_limit = None
else:
self.log.info("setting custom runahead limit to %s" % interval)
self.custom_runahead_limit = interval
self.release_runahead_tasks()
def get_min_point(self):
"""Return the minimum cycle point currently in the pool."""
cycles = self.pool.keys()
minc = None
if cycles:
minc = min(cycles)
return minc
def get_max_point(self):
"""Return the maximum cycle point currently in the pool."""
cycles = self.pool.keys()
maxc = None
if cycles:
maxc = max(cycles)
return maxc
def get_max_point_runahead(self):
"""Return the maximum cycle point currently in the runahead pool."""
cycles = self.runahead_pool.keys()
maxc = None
if cycles:
maxc = max(cycles)
return maxc
def set_max_future_offset(self):
"""Calculate the latest required future trigger offset."""
max_offset = None
for itask in self.get_tasks():
if (itask.tdef.max_future_prereq_offset is not None and
(max_offset is None or
itask.tdef.max_future_prereq_offset > max_offset)):
max_offset = itask.tdef.max_future_prereq_offset
self.max_future_offset = max_offset
def reconfigure(self, stop_point):
"""Set the task pool to reload mode."""
self.reconfiguring = True
config = SuiteConfig.get_inst()
self.custom_runahead_limit = config.get_custom_runahead_limit()
self.max_num_active_cycle_points = (
config.get_max_num_active_cycle_points())
self.stop_point = stop_point
# reassign live tasks from the old queues to the new.
# self.queues[queue][id_] = task
self.assign_queues()
new_queues = {}
for queue in self.queues:
for id_, itask in self.queues[queue].items():
if itask.tdef.name not in self.myq:
continue
key = self.myq[itask.tdef.name]
if key not in new_queues:
new_queues[key] = {}
new_queues[key][id_] = itask
self.queues = new_queues
for itask in self.get_all_tasks():
itask.reconfigure_me = True
# find any old tasks that have been removed from the suite
old_task_name_list = self.task_name_list
self.task_name_list = config.get_task_name_list()
for name in old_task_name_list:
if name not in self.task_name_list:
self.orphans.append(name)
# adjust the new suite config to handle the orphans
config.adopt_orphans(self.orphans)
def reload_taskdefs(self):
"""Reload task definitions."""
found = False
config = SuiteConfig.get_inst()
for itask in self.get_all_tasks():
if itask.state.is_currently('ready', 'submitted', 'running'):
# do not reload active tasks as it would be possible to
# get a task proxy incompatible with the running task
if itask.reconfigure_me:
found = True
continue
if itask.reconfigure_me:
itask.reconfigure_me = False
if itask.tdef.name in self.orphans:
# orphaned task
if itask.state.is_currently(
'waiting', 'queued', 'submit-retrying',
'retrying'):
# if not started running yet, remove it.
self.remove(itask, '(task orphaned by suite reload)')
else:
# set spawned already so it won't carry on into the
# future
itask.state.set_spawned()
self.log.warning(
'orphaned task will not continue: ' +
itask.identity)
else:
self.log.info(
'RELOADING TASK DEFINITION FOR ' + itask.identity)
new_task = get_task_proxy(
itask.tdef.name,
itask.point,
itask.state.get_status(),
stop_point=itask.stop_point,
submit_num=itask.submit_num,
is_reload=True
)
# set reloaded task's spawn status
if itask.state.has_spawned():
new_task.state.set_spawned()
else:
new_task.state.set_unspawned()
# succeeded tasks need their outputs set completed:
if itask.state.is_currently('succeeded'):
new_task.reset_state_succeeded()
# carry some task proxy state over to the new instance
new_task.summary = itask.summary
new_task.started_time = itask.started_time
new_task.submitted_time = itask.submitted_time
new_task.finished_time = itask.finished_time
# if currently retrying, retain the old retry delay
# list, to avoid extra retries (the next instance
# of the task will still be as newly configured)
new_task.run_try_state = itask.run_try_state
new_task.sub_try_state = itask.sub_try_state
new_task.submit_num = itask.submit_num
new_task.db_inserts_map = itask.db_inserts_map
new_task.db_updates_map = itask.db_updates_map
self.remove(itask, '(suite definition reload)')
self.add_to_runahead_pool(new_task)
if found:
if not self.reload_warned:
self.log.warning(
"Reload will complete once active tasks have finished.")
self.reload_warned = True
else:
self.log.info("Reload completed.")
self.reload_warned = False
self.reconfiguring = found
def set_stop_point(self, stop_point):
"""Set the global suite stop point."""
self.stop_point = stop_point
for itask in self.get_tasks():
# check cycle stop or hold conditions
if (self.stop_point and itask.point > self.stop_point and
itask.state.is_currently('waiting', 'queued')):
itask.log(WARNING,
"not running (beyond suite stop cycle) " +
str(self.stop_point))
itask.reset_state_held()
def no_active_tasks(self):
"""Return True if no more active tasks."""
for itask in self.get_tasks():
if itask.is_active() or itask.event_handler_try_states:
return False
return True
def has_unkillable_tasks_only(self):
"""Used to identify if a task pool contains unkillable tasks.
Return True if all running and submitted tasks in the pool have had
kill operations fail, False otherwise.
"""
for itask in self.get_tasks():
if itask.state.is_currently('running', 'submitted'):
if not itask.kill_failed:
return False
return True
def poll_task_jobs(self, ids=None):
"""Poll jobs of active tasks.
If ids is specified, poll active tasks matching given IDs.
"""
if self.run_mode == 'simulation':
return
itasks = []
for itask in self.get_all_tasks():
if ids and itask.identity not in ids:
continue
if itask.is_active():
if itask.job_conf is None:
try:
itask.prep_manip()
except Exception as exc:
# Note: Exception is most likely some kind of IOError
# or OSError. Need to catch Exception here because it
# can also be an Exception raised by
# cylc.suite_host.is_remote_host
itask.command_log(SuiteProcContext(
itask.JOB_POLL, '(prepare job poll)', err=exc,
ret_code=1))
continue
itasks.append(itask)
elif ids and itask.identity in ids: # and not is_active
self.log.warning(
'%s: skip poll, state not ["submitted", "running"]' % (
itask.identity))
if not itasks:
return
self._run_job_cmd(self.JOBS_POLL, itasks, self.poll_task_jobs_callback)
def poll_task_jobs_callback(self, ctx):
"""Callback when poll tasks command exits."""
self._manip_task_jobs_callback(
ctx,
lambda itask, line: itask.job_poll_callback(line),
{
BATCH_SYS_MANAGER.OUT_PREFIX_MESSAGE:
lambda itask, line: itask.job_poll_message_callback(line),
},
)
def kill_task_jobs(self, ids=None):
"""Kill jobs of active tasks.
If ids is specified, kill active tasks matching given IDs.
"""
itasks = []
for itask in self.get_all_tasks():
if ids and itask.identity not in ids:
continue
is_active = itask.is_active()
if is_active and self.run_mode == 'simulation':
itask.reset_state_failed()
elif is_active and itask.tdef.rtconfig['manual completion']:
self.log(
WARNING,
"%s: skip kill, detaching task (job ID unknown)" % (
itask.identity))
elif is_active:
if itask.job_conf is None:
try:
itask.prep_manip()
except Exception as exc:
# Note: Exception is most likely some kind of IOError
# or OSError. Need to catch Exception here because it
# can also be an Exception raised by
# cylc.suite_host.is_remote_host
itask.command_log(SuiteProcContext(
itask.JOB_KILL, '(prepare job kill)', err=exc,
ret_code=1))
continue
itask.reset_state_held()
itasks.append(itask)
elif ids and itask.identity in ids: # and not is_active
self.log.warning(
'%s: skip kill, state not ["submitted", "running"]' % (
itask.identity))
if not itasks:
return
self._run_job_cmd(self.JOBS_KILL, itasks, self.kill_task_jobs_callback)
def kill_task_jobs_callback(self, ctx):
"""Callback when kill tasks command exits."""
self._manip_task_jobs_callback(
ctx,
lambda itask, line: itask.job_kill_callback(line),
{
BATCH_SYS_MANAGER.OUT_PREFIX_COMMAND:
lambda itask, line: itask.job_cmd_out_callback(line),
},
)
def _manip_task_jobs_callback(
self, ctx, summary_callback, more_callbacks=None):
"""Callback when poll/kill tasks command exits."""
if ctx.ret_code:
self.log.error(ctx)
else:
self.log.debug(ctx)
tasks = {}
# Note for "kill": It is possible for a job to trigger its trap and
# report back to the suite back this logic is called. If so, the task
# will no longer be in the "submitted" or "running" state, and its
# output line will be ignored here.
for itask in self.get_tasks():
if itask.point is not None and itask.submit_num:
submit_num = "%02d" % (itask.submit_num)
tasks[(str(itask.point), itask.tdef.name, submit_num)] = itask
handlers = [(BATCH_SYS_MANAGER.OUT_PREFIX_SUMMARY, summary_callback)]
if more_callbacks:
for prefix, callback in more_callbacks.items():
handlers.append((prefix, callback))
if not ctx.out:
# Something is very wrong here
# Fallback to use "job_log_dirs" list to report the problem
job_log_dirs = ctx.cmd_kwargs.get("job_log_dirs", [])
for job_log_dir in job_log_dirs:
point, name, submit_num = job_log_dir.split(os.sep, 2)
itask = tasks[(point, name, submit_num)]
callback(itask, "|".join([ctx.timestamp, job_log_dir, "1"]))
return
for line in ctx.out.splitlines(True):
for prefix, callback in handlers:
if line.startswith(prefix):
line = line[len(prefix):].strip()
try:
path = line.split("|", 2)[1] # timestamp, path, status
point, name, submit_num = path.split(os.sep, 2)
itask = tasks[(point, name, submit_num)]
callback(itask, line)
except (KeyError, ValueError) as exc:
if cylc.flags.debug:
self.log.warning(
'Unhandled %s output: %s' % (
ctx.cmd_key, line))
traceback.print_exc()
def get_hold_point(self):
"""Return the point after which tasks must be held."""
return self.hold_point
def set_hold_point(self, point):
"""Set the point after which tasks must be held."""
self.hold_point = point
if point is not None:
for itask in self.get_all_tasks():
if itask.point > point:
itask.reset_state_held()
def hold_tasks(self, ids):
"""Hold tasks with IDs matching any item in "ids"."""
for itask in self.get_all_tasks():
if itask.identity in ids:
itask.reset_state_held()
def release_tasks(self, ids):
"""Release held tasks with IDs matching any item in "ids"."""
for itask in self.get_all_tasks():
if itask.identity in ids:
itask.reset_state_unheld()
def hold_all_tasks(self):
"""Hold all tasks."""
self.log.info("Holding all waiting or queued tasks now")
self.is_held = True
for itask in self.get_all_tasks():
itask.reset_state_held()
def release_all_tasks(self):
"""Release all held tasks."""
self.is_held = False
for itask in self.get_all_tasks():
itask.reset_state_unheld()
def get_failed_tasks(self):
failed = []
for itask in self.get_tasks():
if itask.state.is_currently('failed', 'submit-failed'):
failed.append(itask)
return failed
def any_task_failed(self):
for itask in self.get_tasks():
if itask.state.is_currently('failed', 'submit-failed'):
return True
return False
def match_dependencies(self):
"""Run time dependency negotiation.
Tasks attempt to get their prerequisites satisfied by other tasks'
outputs. BROKERED NEGOTIATION is O(n) in number of tasks.
"""
self.broker.reset()
self.broker.register(self.get_tasks())
for itask in self.get_tasks():
# try to satisfy itask if not already satisfied.
if itask.not_fully_satisfied():
self.broker.negotiate(itask)
def process_queued_task_messages(self):
"""Handle incoming task messages for each task proxy."""
for itask in self.get_tasks():
itask.process_incoming_messages()
def process_queued_task_event_handlers(self):
"""Process task event handlers."""
ctx_groups = {}
env = None
for itask in self.get_tasks():
for key, try_state in itask.event_handler_try_states.items():
# This should not happen, ignore for now.
if try_state.ctx is None:
del itask.event_handler_try_states[key]
continue
if try_state.is_waiting:
continue
# Set timer if timeout is None.
if not try_state.is_timeout_set():
if try_state.next() is None:
itask.log(ERROR, "%s failed" % str(key))
del itask.event_handler_try_states[key]
continue
# Report 1st and retries
if try_state.num == 1:
level = INFO
tmpl = "%s will run after %s (after %s)"
else:
level = WARNING
tmpl = "%s failed, retrying in %s (after %s)"
itask.log(level, tmpl % (
str(key),
try_state.delay_as_seconds(),
try_state.timeout_as_str()))
# Ready to run?
if not try_state.is_delay_done():
continue
try_state.set_waiting()
if try_state.ctx.ctx_type == TaskProxy.CUSTOM_EVENT_HANDLER:
# Run custom event handlers on their own
if env is None:
env = dict(os.environ)
if TaskProxy.event_handler_env:
env.update(TaskProxy.event_handler_env)
SuiteProcPool.get_inst().put_command(
SuiteProcContext(
key, try_state.ctx.cmd, env=env, shell=True,
),
itask.custom_event_handler_callback)
else:
# Group together built-in event handlers, where possible
if try_state.ctx not in ctx_groups:
ctx_groups[try_state.ctx] = []
# "itask.submit_num" may have moved on at this point
key1, submit_num = key
ctx_groups[try_state.ctx].append(
(key1, str(itask.point), itask.tdef.name, submit_num))
for ctx, id_keys in ctx_groups.items():
if ctx.ctx_type == TaskProxy.EVENT_MAIL:
self._process_task_event_email(ctx, id_keys)
elif ctx.ctx_type == TaskProxy.JOB_LOGS_REGISTER:
self._process_task_job_logs_register(ctx, id_keys)
elif ctx.ctx_type == TaskProxy.JOB_LOGS_RETRIEVE:
self._process_task_job_logs_retrieval(ctx, id_keys)
def _process_task_event_email(self, ctx, id_keys):
"""Process event notification, by email."""
subject = "[%(n_tasks)d task(s) %(event)s] %(suite_name)s" % {
"suite_name": self.suite_name,
"n_tasks": len(id_keys),
"event": ctx.event}
cmd = ["mail", "-s", subject]
# From: and To:
cmd.append("-r")
cmd.append(ctx.mail_from)
cmd.append(ctx.mail_to)
# Tasks
stdin_str = ""
for _, point, name, submit_num in id_keys:
stdin_str += "%s/%s/%02d: %s\n" % (
point, name, submit_num, ctx.event)
# SMTP server
env = dict(os.environ)
mail_smtp = ctx.mail_smtp
if mail_smtp:
env["smtp"] = mail_smtp
SuiteProcPool.get_inst().put_command(
SuiteProcContext(
ctx, cmd, env=env, stdin_str=stdin_str, id_keys=id_keys,
),
self._task_event_email_callback)
def _task_event_email_callback(self, ctx):
"""Call back when email notification command exits."""
tasks = {}
for itask in self.get_tasks():
if itask.point is not None and itask.submit_num:
tasks[(str(itask.point), itask.tdef.name)] = itask
for id_key in ctx.cmd_kwargs["id_keys"]:
key1, point, name, submit_num = id_key
try:
itask = tasks[(point, name)]
try_states = itask.event_handler_try_states
if ctx.ret_code == 0:
del try_states[(key1, submit_num)]
log_ctx = SuiteProcContext((key1, submit_num), None)
log_ctx.ret_code = 0
itask.command_log(log_ctx)
else:
try_states[(key1, submit_num)].unset_waiting()
except KeyError:
if cylc.flags.debug:
traceback.print_exc()
def _process_task_job_logs_register(self, ctx, id_keys):
"""Register task job logs."""
tasks = {}
for itask in self.get_tasks():
if itask.point is not None and itask.submit_num:
tasks[(str(itask.point), itask.tdef.name)] = itask
for id_key in id_keys:
key1, point, name, submit_num = id_key
try:
itask = tasks[(point, name)]
try_states = itask.event_handler_try_states
filenames = itask.register_job_logs(submit_num)
if "job.out" in filenames and "job.err" in filenames:
log_ctx = SuiteProcContext((key1, submit_num), None)
log_ctx.ret_code = 0
itask.command_log(log_ctx)
del try_states[(key1, submit_num)]
else:
try_states[(key1, submit_num)].unset_waiting()
except KeyError:
if cylc.flags.debug:
traceback.print_exc()
def _process_task_job_logs_retrieval(self, ctx, id_keys):
"""Process retrieval of task job logs from remote user@host."""
if ctx.user_at_host and "@" in ctx.user_at_host:
s_user, s_host = ctx.user_at_host.split("@", 1)
else:
s_user, s_host = (None, ctx.user_at_host)
ssh_tmpl = str(GLOBAL_CFG.get_host_item(
"remote shell template", s_host, s_user)).replace(" %s", "")
rsync_str = str(GLOBAL_CFG.get_host_item(
"retrieve job logs command", s_host, s_user))
cmd = shlex.split(rsync_str) + ["--rsh=" + ssh_tmpl]
if cylc.flags.debug:
cmd.append("-v")
if ctx.max_size:
cmd.append("--max-size=%s" % (ctx.max_size,))
# Includes and excludes
includes = set()
for _, point, name, submit_num in id_keys:
# Include relevant directories, all levels needed
includes.add("/%s" % (point))
includes.add("/%s/%s" % (point, name))
includes.add("/%s/%s/%02d" % (point, name, submit_num))
includes.add("/%s/%s/%02d/**" % (point, name, submit_num))
cmd += ["--include=%s" % (include) for include in sorted(includes)]
cmd.append("--exclude=/**") # exclude everything else
# Remote source
cmd.append(ctx.user_at_host + ":" + GLOBAL_CFG.get_derived_host_item(
self.suite_name, "suite job log directory", s_host, s_user) + "/")
# Local target
cmd.append(GLOBAL_CFG.get_derived_host_item(
self.suite_name, "suite job log directory") + "/")
SuiteProcPool.get_inst().put_command(
SuiteProcContext(ctx, cmd, env=dict(os.environ), id_keys=id_keys),
self._task_job_logs_retrieval_callback)
def _task_job_logs_retrieval_callback(self, ctx):
"""Call back when log job retrieval completes."""
tasks = {}
for itask in self.get_tasks():
if itask.point is not None and itask.submit_num:
tasks[(str(itask.point), itask.tdef.name)] = itask
for id_key in ctx.cmd_kwargs["id_keys"]:
key1, point, name, submit_num = id_key
try:
itask = tasks[(point, name)]
try_states = itask.event_handler_try_states
filenames = []
if ctx.ret_code == 0:
filenames = itask.register_job_logs(submit_num)
if "job.out" in filenames and "job.err" in filenames:
log_ctx = SuiteProcContext((key1, submit_num), None)
log_ctx.ret_code = 0
itask.command_log(log_ctx)
del try_states[(key1, submit_num)]
else:
try_states[(key1, submit_num)].unset_waiting()
except KeyError:
if cylc.flags.debug:
traceback.print_exc()
def process_queued_db_ops(self):
"""Handle queued db operations for each task proxy."""
for itask in self.get_all_tasks():
# (runahead pool tasks too, to get new state recorders).
for table_name, db_inserts in sorted(itask.db_inserts_map.items()):
while db_inserts:
db_insert = db_inserts.pop(0)
db_insert.update({
"name": itask.tdef.name,
"cycle": str(itask.point),
})
if "submit_num" not in db_insert:
db_insert["submit_num"] = itask.submit_num
self.pri_dao.add_insert_item(table_name, db_insert)
self.pub_dao.add_insert_item(table_name, db_insert)
for table_name, db_updates in sorted(itask.db_updates_map.items()):
while db_updates:
set_args = db_updates.pop(0)
where_args = {
"cycle": str(itask.point), "name": itask.tdef.name}
if "submit_num" not in set_args:
where_args["submit_num"] = itask.submit_num
self.pri_dao.add_update_item(
table_name, set_args, where_args)
self.pub_dao.add_update_item(
table_name, set_args, where_args)
# record any broadcast settings to be dumped out
bcast = BroadcastServer.get_inst()
for table_name, db_inserts in sorted(bcast.db_inserts_map.items()):
while db_inserts:
db_insert = db_inserts.pop(0)
self.pri_dao.add_insert_item(table_name, db_insert)
self.pub_dao.add_insert_item(table_name, db_insert)
for table_name, db_deletes in sorted(bcast.db_deletes_map.items()):
while db_deletes:
where_args = db_deletes.pop(0)
self.pri_dao.add_delete_item(table_name, where_args)
self.pub_dao.add_delete_item(table_name, where_args)
# Previously, we used a separate thread for database writes. This has
# now been removed. For the private database, there is no real
# advantage in using a separate thread, because we want it to be like
# the state dump - always in sync with what is current. For the public
# database, which does not need to be fully in sync, there is some
# advantage of using a separate thread/process, if writing to it
# becomes a bottleneck. At the moment, there is no evidence that this
# is a bottleneck, so it is better to keep the logic simple.
self.pri_dao.execute_queued_items()
self.pub_dao.execute_queued_items()
def force_spawn(self, itask):
"""Spawn successor of itask."""
if itask.state.has_spawned():
return None
itask.state.set_spawned()
itask.log(DEBUG, 'forced spawning')
new_task = itask.spawn('waiting')
if new_task and self.add_to_runahead_pool(new_task):
return new_task
else:
return None
def spawn_tasks(self):
"""Spawn successors of tasks in pool."""
for itask in self.get_tasks():
if itask.ready_to_spawn():
self.force_spawn(itask)
def remove_suiciding_tasks(self):
"""Remove any tasks that have suicide-triggered."""
for itask in self.get_tasks():
if itask.suicide_prerequisites:
if itask.suicide_prerequisites_are_all_satisfied():
if itask.state.is_currently(
'ready', 'submitted', 'running'):
itask.log(WARNING, 'suiciding while active')
else:
itask.log(INFO, 'suiciding')
self.force_spawn(itask)
self.remove(itask, 'suicide')
def _get_earliest_unsatisfied_point(self):
"""Get earliest unsatisfied cycle point."""
cutoff = None
for itask in self.get_all_tasks():
# this has to consider tasks in the runahead pool too, e.g.
# ones that have just spawned and not been released yet.
if itask.state.is_currently('waiting', 'held'):
if cutoff is None or itask.point < cutoff:
cutoff = itask.point
elif not itask.state.has_spawned():
# (e.g. 'ready')
nxt = itask.next_point()
if nxt is not None and (cutoff is None or nxt < cutoff):
cutoff = nxt
return cutoff
def remove_spent_tasks(self):
"""Remove cycling tasks that are no longer needed.
Remove cycling tasks that are no longer needed to satisfy others'
prerequisites. Each task proxy knows its "cleanup cutoff" from the
graph. For example:
graph = 'foo[T-6]=>bar \n foo[T-12]=>baz'
implies foo's cutoff is T+12: if foo has succeeded (or expired) and
spawned, it can be removed if no unsatisfied task proxy exists with
T<=T+12. Note this only uses information about the cycle point of
downstream dependents - if we used specific IDs instead spent
tasks could be identified and removed even earlier).
"""
# first find the cycle point of the earliest unsatisfied task
cutoff = self._get_earliest_unsatisfied_point()
if not cutoff:
return
# now check each succeeded task against the cutoff
spent = []
for itask in self.get_tasks():
if (itask.state.is_currently('succeeded', 'expired') and
itask.state.has_spawned() and
not itask.event_handler_try_states and
itask.cleanup_cutoff is not None and
cutoff > itask.cleanup_cutoff):
spent.append(itask)
for itask in spent:
self.remove(itask)
def reset_task_states(self, ids, state):
"""Reset task states.
We only allow resetting to a subset of available task states
"""
if state not in task_state.legal_for_reset:
raise SchedulerError('Illegal reset state: ' + state)
tasks = []
for itask in self.get_tasks():
if itask.identity in ids:
tasks.append(itask)
for itask in tasks:
if itask.state.is_currently('ready'):
# Currently can't reset a 'ready' task in the job submission
# thread!
self.log.warning(
"A 'ready' task cannot be reset: " + itask.identity)
itask.log(INFO, "resetting to " + state + " state")
if state == 'ready':
itask.reset_state_ready()
elif state == 'waiting':
itask.reset_state_waiting()
elif state == 'succeeded':
itask.reset_state_succeeded()
elif state == 'failed':
itask.reset_state_failed()
elif state == 'held':
itask.reset_state_held()
elif state == 'spawn':
self.force_spawn(itask)
def remove_entire_cycle(self, point, spawn):
for itask in self.get_tasks():
if itask.point == point:
if spawn:
self.force_spawn(itask)
self.remove(itask, 'by request')
def remove_tasks(self, ids, spawn):
for itask in self.get_tasks():
if itask.identity in ids:
if spawn:
self.force_spawn(itask)
self.remove(itask, 'by request')
def trigger_tasks(self, ids):
for itask in self.get_tasks():
if itask.identity in ids:
itask.manual_trigger = True
if not itask.state.is_currently('queued'):
itask.reset_state_ready()
def dry_run_task(self, id_):
"""Create job file for "cylc trigger --edit"."""
bcast = BroadcastServer.get_inst()
for itask in self.get_tasks():
if itask.identity == id_:
itask.prep_submit(
overrides=bcast.get(itask.identity), dry_run=True)
def check_task_timers(self):
"""Check submission and execution timeout timers for current tasks.
Not called in simulation mode.
"""
now = time()
poll_task_ids = set()
for itask in self.get_tasks():
if itask.state.is_currently('submitted'):
if (itask.submission_timer_timeout is not None and
now > itask.submission_timer_timeout):
itask.handle_submission_timeout()
itask.submission_timer_timeout = None
poll_task_ids.add(itask.identity)
if (itask.submission_poll_timer and
itask.submission_poll_timer.get()):
itask.submission_poll_timer.set_timer()
poll_task_ids.add(itask.identity)
elif itask.state.is_currently('running'):
if (itask.execution_timer_timeout is not None and
now > itask.execution_timer_timeout):
itask.handle_execution_timeout()
itask.execution_timer_timeout = None
poll_task_ids.add(itask.identity)
if (itask.execution_poll_timer and
itask.execution_poll_timer.get()):
itask.execution_poll_timer.set_timer()
poll_task_ids.add(itask.identity)
if poll_task_ids:
self.poll_task_jobs(poll_task_ids)
def check_auto_shutdown(self):
"""Check if we should do a normal automatic shutdown."""
shutdown = True
for itask in self.get_all_tasks():
if self.stop_point is None:
# Don't if any unsucceeded task exists.
if (not itask.state.is_currently('succeeded', 'expired') or
itask.event_handler_try_states):
shutdown = False
break
elif (itask.point <= self.stop_point and
not itask.state.is_currently('succeeded', 'expired')):
# Don't if any unsucceeded task exists < stop point...
if itask.identity not in self.held_future_tasks:
# ...unless it has a future trigger extending > stop point.
shutdown = False
break
return shutdown
def sim_time_check(self):
sim_task_succeeded = False
for itask in self.get_tasks():
if itask.state.is_currently('running'):
# set sim-mode tasks to "succeeded" after their alotted run
# time
if itask.sim_time_check():
sim_task_succeeded = True
return sim_task_succeeded
def shutdown(self):
if not self.no_active_tasks():
self.log.warning("some active tasks will be orphaned")
for itask in self.get_tasks():
try:
self.pyro.disconnect(itask.message_queue)
except KeyError:
# Wasn't connected yet.
pass
def waiting_tasks_ready(self):
"""Waiting tasks can become ready for internal reasons.
Namely clock-triggers or retry-delay timers
"""
result = False
for itask in self.get_tasks():
if itask.ready_to_run():
result = True
break
return result
def task_succeeded(self, id_):
res = False
for itask in self.get_tasks():
if itask.identity == id_ and itask.state.is_currently('succeeded'):
res = True
break
return res
def ping_task(self, id_, exists_only=False):
found = False
running = False
for itask in self.get_tasks():
if itask.identity == id_:
found = True
if itask.state.is_currently('running'):
running = True
break
if not found:
return False, "task not found"
else:
if exists_only:
return True, "task found"
else:
if running:
return True, " running"
else:
return False, "task not running"
def get_task_jobfile_path(self, id_):
"""Return a task job log dir, sans submit number.
TODO - this method name (and same in scheduler.py) should be changed.
"""
found = False
for itask in self.get_tasks():
if itask.identity == id_:
found = True
job_parent_dir = os.path.dirname(itask.get_job_log_dir(
itask.tdef.name, itask.point, suite=self.suite_name))
break
if not found:
return False, "task not found"
else:
return True, job_parent_dir
def get_task_requisites(self, taskid):
info = {}
found = False
for itask in self.get_tasks():
id_ = itask.identity
if id_ == taskid:
found = True
extra_info = {}
if itask.tdef.clocktrigger_offset is not None:
extra_info['Clock trigger time reached'] = (
itask.start_time_reached())
extra_info['Triggers at'] = itask.delayed_start_str
for trig, satisfied in itask.external_triggers.items():
if satisfied:
state = 'satisfied'
else:
state = 'NOT satisfied'
extra_info['External trigger "%s"' % trig] = state
info[id_] = [
itask.prerequisites_dump(),
itask.outputs.dump(),
extra_info,
]
if not found:
self.log.warning('task state info request: task(s) not found')
return info
def match_ext_triggers(self):
"""See if any queued external event messages can trigger tasks."""
ets = ExtTriggerServer.get_inst()
for itask in self.get_tasks():
if itask.external_triggers:
ets.retrieve(itask)
def _run_job_cmd(self, cmd_key, itasks, callback, **kwargs):
"""Run job commands, e.g. poll, kill, etc.
Group itasks with their user@host.
Put a job command for each user@host to the multiprocess pool.
"""
if not itasks:
return
auth_itasks = {}
for itask in itasks:
if (itask.task_host, itask.task_owner) not in auth_itasks:
auth_itasks[(itask.task_host, itask.task_owner)] = []
auth_itasks[(itask.task_host, itask.task_owner)].append(itask)
for auth, itasks in sorted(auth_itasks.items()):
cmd = ["cylc", cmd_key]
if cylc.flags.debug:
cmd.append("--debug")
host, owner = auth
for key, value, test_func in [
('host', host, is_remote_host),
('user', owner, is_remote_user)]:
if test_func(value):
cmd.append('--%s=%s' % (key, value))
cmd.append("--")
cmd.append(GLOBAL_CFG.get_derived_host_item(
self.suite_name, 'suite job log directory', host, owner))
job_log_dirs = []
for itask in sorted(itasks, key=lambda itask: itask.identity):
job_log_dirs.append(itask.get_job_log_dir(
itask.tdef.name, itask.point, itask.submit_num))
cmd += job_log_dirs
kwargs["job_log_dirs"] = job_log_dirs
SuiteProcPool.get_inst().put_command(
SuiteProcContext(cmd_key, cmd, **kwargs), callback)
|
morenopc/edx-platform
|
refs/heads/ok-merge-from-master
|
lms/djangoapps/instructor_task/migrations/0002_add_subtask_field.py
|
60
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'InstructorTask.subtasks'
db.add_column('instructor_task_instructortask', 'subtasks',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'InstructorTask.subtasks'
db.delete_column('instructor_task_instructortask', 'subtasks')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'instructor_task.instructortask': {
'Meta': {'object_name': 'InstructorTask'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'subtasks': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_input': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['instructor_task']
|
Statoil/libres
|
refs/heads/master
|
test-data/local/snake_oil_structure/snake_oil/jobs/snake_oil_npv.py
|
9
|
#!/usr/bin/env python
from ecl.summary import EclSum
OIL_PRICES = {"2010-01-01": 78.33,
"2010-02-01": 76.39,
"2010-03-01": 81.20,
"2010-04-01": 84.29,
"2010-05-01": 73.74,
"2010-06-01": 75.34,
"2010-07-01": 76.32,
"2010-08-01": 76.60,
"2010-09-01": 75.24,
"2010-10-01": 81.89,
"2010-11-01": 84.25,
"2010-12-01": 89.15,
"2011-01-01": 89.17,
"2011-02-01": 88.58,
"2011-03-01": 102.86,
"2011-04-01": 109.53,
"2011-05-01": 100.90,
"2011-06-01": 96.26,
"2011-07-01": 97.30,
"2011-08-01": 86.33,
"2011-09-01": 85.52,
"2011-10-01": 86.32,
"2011-11-01": 97.16,
"2011-12-01": 98.56,
"2012-01-01": 100.27,
"2012-02-01": 102.20,
"2012-03-01": 106.16,
"2012-04-01": 103.32,
"2012-05-01": 94.65,
"2012-06-01": 82.30,
"2012-07-01": 87.90,
"2012-08-01": 94.13,
"2012-09-01": 94.51,
"2012-10-01": 89.49,
"2012-11-01": 86.53,
"2012-12-01": 87.86,
"2013-01-01": 94.76,
"2013-02-01": 95.31,
"2013-03-01": 92.94,
"2013-04-01": 92.02,
"2013-05-01": 94.51,
"2013-06-01": 95.77,
"2013-07-01": 104.67,
"2013-08-01": 106.57,
"2013-09-01": 106.29,
"2013-10-01": 100.54,
"2013-11-01": 93.86,
"2013-12-01": 97.63,
"2014-01-01": 94.62,
"2014-02-01": 100.82,
"2014-03-01": 100.80,
"2014-04-01": 102.07,
"2014-05-01": 102.18,
"2014-06-01": 105.79,
"2014-07-01": 103.59,
"2014-08-01": 96.54,
"2014-09-01": 93.21,
"2014-10-01": 84.40,
"2014-11-01": 75.79,
"2014-12-01": 59.29,
"2015-01-01": 47.22,
"2015-02-01": 50.58,
"2015-03-01": 47.82,
"2015-04-01": 54.45,
"2015-05-01": 59.27,
"2015-06-01": 59.82,
"2015-07-01": 50.90,
"2015-08-01": 42.87,
"2015-09-01": 45.48}
if __name__ == '__main__':
ecl_sum = EclSum("SNAKE_OIL_FIELD")
start_time = ecl_sum.getStartTime()
date_ranges = ecl_sum.timeRange(start_time, interval="1M")
production_sums = ecl_sum.blockedProduction("FOPT", date_ranges)
npv = 0.0
for index in range(0, len(date_ranges) - 1):
date = date_ranges[index + 1] # end of period
production_sum = production_sums[index]
oil_price = OIL_PRICES[date.date().strftime("%Y-%m-%d")]
production_value = oil_price * production_sum
npv += production_value
with open("snake_oil_npv.txt", "w") as output_file:
output_file.write("NPV %s\n" % npv)
if npv < 80000:
rating = "POOR"
elif 80000 <= npv < 100000:
rating = "AVERAGE"
elif 100000 <= npv < 120000:
rating = "GOOD"
else:
rating = "EXCELLENT"
output_file.write("RATING %s\n" % rating)
|
cnoviello/micropython
|
refs/heads/master
|
tests/bytecode/mp-tests/import4.py
|
22
|
import a as y
import a.b as y
import a.b.c as y
|
mvesper/invenio
|
refs/heads/master
|
modules/miscutil/lib/upgrades/invenio_2015_03_03_tag_value.py
|
3
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Modifies column `tag.value`."""
from invenio.dbquery import run_sql
depends_on = ['invenio_2012_11_15_hstRECORD_marcxml_longblob']
def info():
"""Return upgrade recipe information."""
return "Modifies column tag.value"
def do_upgrade():
"""Carry out the upgrade."""
create_statement = run_sql('SHOW CREATE TABLE tag')[0][1]
if 'affected_fields' not in create_statement:
run_sql("ALTER TABLE tag MODIFY COLUMN value VARCHAR(6) default ''")
def estimate():
"""Estimate running time of upgrade in seconds (optional)."""
return 1
def pre_upgrade():
"""Pre-upgrade checks."""
pass
def post_upgrade():
"""Post-upgrade checks."""
pass
|
w1ll1am23/home-assistant
|
refs/heads/dev
|
homeassistant/util/network.py
|
5
|
"""Network utilities."""
from __future__ import annotations
from ipaddress import IPv4Address, IPv6Address, ip_address, ip_network
import yarl
# RFC6890 - IP addresses of loopback interfaces
LOOPBACK_NETWORKS = (
ip_network("127.0.0.0/8"),
ip_network("::1/128"),
ip_network("::ffff:127.0.0.0/104"),
)
# RFC6890 - Address allocation for Private Internets
PRIVATE_NETWORKS = (
ip_network("fd00::/8"),
ip_network("10.0.0.0/8"),
ip_network("172.16.0.0/12"),
ip_network("192.168.0.0/16"),
)
# RFC6890 - Link local ranges
LINK_LOCAL_NETWORK = ip_network("169.254.0.0/16")
def is_loopback(address: IPv4Address | IPv6Address) -> bool:
"""Check if an address is a loopback address."""
return any(address in network for network in LOOPBACK_NETWORKS)
def is_private(address: IPv4Address | IPv6Address) -> bool:
"""Check if an address is a private address."""
return any(address in network for network in PRIVATE_NETWORKS)
def is_link_local(address: IPv4Address | IPv6Address) -> bool:
"""Check if an address is link local."""
return address in LINK_LOCAL_NETWORK
def is_local(address: IPv4Address | IPv6Address) -> bool:
"""Check if an address is loopback or private."""
return is_loopback(address) or is_private(address)
def is_invalid(address: IPv4Address | IPv6Address) -> bool:
"""Check if an address is invalid."""
return bool(address == ip_address("0.0.0.0"))
def is_ip_address(address: str) -> bool:
"""Check if a given string is an IP address."""
try:
ip_address(address)
except ValueError:
return False
return True
def normalize_url(address: str) -> str:
"""Normalize a given URL."""
url = yarl.URL(address.rstrip("/"))
if url.is_default_port():
return str(url.with_port(None))
return str(url)
|
atsao72/sympy
|
refs/heads/master
|
sympy/physics/quantum/tests/test_grover.py
|
48
|
from sympy import sqrt
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.qubit import IntQubit
from sympy.physics.quantum.grover import (apply_grover, superposition_basis,
OracleGate, grover_iteration, WGate)
def return_one_on_two(qubits):
return qubits == IntQubit(2, qubits.nqubits)
def return_one_on_one(qubits):
return qubits == IntQubit(1, qubits.nqubits)
def test_superposition_basis():
nbits = 2
first_half_state = IntQubit(0, nbits)/2 + IntQubit(1, nbits)/2
second_half_state = IntQubit(2, nbits)/2 + IntQubit(3, nbits)/2
assert first_half_state + second_half_state == superposition_basis(nbits)
nbits = 3
firstq = (1/sqrt(8))*IntQubit(0, nbits) + (1/sqrt(8))*IntQubit(1, nbits)
secondq = (1/sqrt(8))*IntQubit(2, nbits) + (1/sqrt(8))*IntQubit(3, nbits)
thirdq = (1/sqrt(8))*IntQubit(4, nbits) + (1/sqrt(8))*IntQubit(5, nbits)
fourthq = (1/sqrt(8))*IntQubit(6, nbits) + (1/sqrt(8))*IntQubit(7, nbits)
assert firstq + secondq + thirdq + fourthq == superposition_basis(nbits)
def test_OracleGate():
v = OracleGate(1, lambda qubits: qubits == IntQubit(0))
assert qapply(v*IntQubit(0)) == -IntQubit(0)
assert qapply(v*IntQubit(1)) == IntQubit(1)
nbits = 2
v = OracleGate(2, return_one_on_two)
assert qapply(v*IntQubit(0, nbits)) == IntQubit(0, nbits)
assert qapply(v*IntQubit(1, nbits)) == IntQubit(1, nbits)
assert qapply(v*IntQubit(2, nbits)) == -IntQubit(2, nbits)
assert qapply(v*IntQubit(3, nbits)) == IntQubit(3, nbits)
def test_WGate():
nqubits = 2
basis_states = superposition_basis(nqubits)
assert qapply(WGate(nqubits)*basis_states) == basis_states
expected = ((2/sqrt(pow(2, nqubits)))*basis_states) - IntQubit(1, nqubits)
assert qapply(WGate(nqubits)*IntQubit(1, nqubits)) == expected
def test_grover_iteration_1():
numqubits = 2
basis_states = superposition_basis(numqubits)
v = OracleGate(numqubits, return_one_on_one)
expected = IntQubit(1, numqubits)
assert qapply(grover_iteration(basis_states, v)) == expected
def test_grover_iteration_2():
numqubits = 4
basis_states = superposition_basis(numqubits)
v = OracleGate(numqubits, return_one_on_two)
# After (pi/4)sqrt(pow(2, n)), IntQubit(2) should have highest prob
# In this case, after around pi times (3 or 4)
iterated = grover_iteration(basis_states, v)
iterated = qapply(iterated)
iterated = grover_iteration(iterated, v)
iterated = qapply(iterated)
iterated = grover_iteration(iterated, v)
iterated = qapply(iterated)
# In this case, probability was highest after 3 iterations
# Probability of Qubit('0010') was 251/256 (3) vs 781/1024 (4)
# Ask about measurement
expected = (-13*basis_states)/64 + 264*IntQubit(2, numqubits)/256
assert qapply(expected) == iterated
def test_grover():
nqubits = 2
assert apply_grover(return_one_on_one, nqubits) == IntQubit(1, nqubits)
nqubits = 4
basis_states = superposition_basis(nqubits)
expected = (-13*basis_states)/64 + 264*IntQubit(2, nqubits)/256
assert apply_grover(return_one_on_two, 4) == qapply(expected)
|
ThoughtWorksInc/treadmill
|
refs/heads/master
|
treadmill/plugins.ref/api/authz.py
|
3
|
"""Authorization plugin."""
# Disable E0611: No 'name' in module
from treadmill import authz # pylint: disable=E0611
class _Authorizer(object):
"""Authorizer."""
def __init__(self, user_clbk):
pass
def authorize(self, resource, action, args, _kwargs):
"""Authorize user/resource/action."""
del resource
del action
del args
authorized = True
if not authorized:
raise authz.AuthorizationError('some reason.')
def init(user_clbk):
"""Initialize the authorizer."""
return _Authorizer(user_clbk)
|
qrkourier/ansible
|
refs/heads/devel
|
test/units/modules/network/junos/test_junos_config.py
|
36
|
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.junos import junos_config
from .junos_module import TestJunosModule, load_fixture, set_module_args
class TestJunosConfigModule(TestJunosModule):
module = junos_config
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.junos.junos_config.get_configuration')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.junos.junos_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_lock_configuration = patch('ansible.module_utils.junos.lock_configuration')
self.lock_configuration = self.mock_lock_configuration.start()
self.mock_unlock_configuration = patch('ansible.module_utils.junos.unlock_configuration')
self.unlock_configuration = self.mock_unlock_configuration.start()
self.mock_commit_configuration = patch('ansible.modules.network.junos.junos_config.commit_configuration')
self.commit_configuration = self.mock_commit_configuration.start()
self.mock_get_diff = patch('ansible.modules.network.junos.junos_config.get_diff')
self.get_diff = self.mock_get_diff.start()
self.mock_send_request = patch('ansible.modules.network.junos.junos_config.send_request')
self.send_request = self.mock_send_request.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_lock_configuration.stop()
self.mock_unlock_configuration.stop()
self.mock_commit_configuration.stop()
self.mock_get_diff.stop()
self.mock_send_request.stop()
def load_fixtures(self, commands=None, format='text', changed=False):
self.get_config.return_value = load_fixture('get_configuration_rpc_reply.txt')
if changed:
self.load_config.return_value = load_fixture('get_configuration_rpc_reply_diff.txt')
else:
self.load_config.return_value = None
def test_junos_config_unchanged(self):
src = load_fixture('junos_config.set', content='str')
set_module_args(dict(src=src))
self.execute_module()
def test_junos_config_src_set(self):
src = load_fixture('junos_config.set', content='str')
set_module_args(dict(src=src))
self.execute_module(changed=True)
args, kwargs = self.load_config.call_args
self.assertEqual(kwargs['action'], 'set')
self.assertEqual(kwargs['format'], 'text')
def test_junos_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_junos_config_lines(self):
set_module_args(dict(lines=['delete interfaces ae11', 'set interfaces ae11 unit 0 description Test']))
self.execute_module(changed=True)
args, kwargs = self.load_config.call_args
self.assertEqual(args[1][0], 'set interfaces ae11 unit 0 description Test')
self.assertEqual(kwargs['action'], 'set')
self.assertEqual(kwargs['format'], 'text')
def test_junos_config_confirm(self):
src = load_fixture('junos_config.set', content='str')
set_module_args(dict(src=src, confirm=40))
self.execute_module(changed=True)
args, kwargs = self.commit_configuration.call_args
self.assertEqual(kwargs['confirm_timeout'], 40)
def test_junos_config_rollback(self):
set_module_args(dict(rollback=10))
self.execute_module(changed=True)
self.assertEqual(self.get_diff.call_count, 1)
def test_junos_config_src_text(self):
src = load_fixture('junos_config.text', content='str')
set_module_args(dict(src=src))
self.execute_module(changed=True)
args, kwargs = self.load_config.call_args
self.assertEqual(kwargs['action'], 'merge')
self.assertEqual(kwargs['format'], 'text')
def test_junos_config_src_xml(self):
src = load_fixture('junos_config.xml', content='str')
set_module_args(dict(src=src))
self.execute_module(changed=True)
args, kwargs = self.load_config.call_args
self.assertEqual(kwargs['action'], 'merge')
self.assertEqual(kwargs['format'], 'xml')
def test_junos_config_src_json(self):
src = load_fixture('junos_config.json', content='str')
set_module_args(dict(src=src))
self.execute_module(changed=True)
args, kwargs = self.load_config.call_args
self.assertEqual(kwargs['action'], 'merge')
self.assertEqual(kwargs['format'], 'json')
def test_junos_config_update_override(self):
src = load_fixture('junos_config.xml', content='str')
set_module_args(dict(src=src, update='override'))
self.execute_module()
args, kwargs = self.load_config.call_args
self.assertEqual(kwargs['action'], 'override')
self.assertEqual(kwargs['format'], 'xml')
def test_junos_config_update_replace(self):
src = load_fixture('junos_config.json', content='str')
set_module_args(dict(src=src, update='replace'))
self.execute_module()
args, kwargs = self.load_config.call_args
self.assertEqual(kwargs['action'], 'replace')
self.assertEqual(kwargs['format'], 'json')
def test_junos_config_zeroize(self):
src = load_fixture('junos_config.json', content='str')
set_module_args(dict(zeroize='yes'))
self.execute_module(changed=True)
self.assertEqual(self.send_request.call_count, 1)
def test_junos_config_src_format_xml(self):
src = load_fixture('junos_config.json', content='str')
set_module_args(dict(src=src, src_format='xml'))
self.execute_module()
args, kwargs = self.load_config.call_args
self.assertEqual(kwargs['format'], 'xml')
def test_junos_config_confirm_commit(self):
set_module_args(dict(confirm_commit=True))
self.execute_module(changed=True)
self.assertEqual(self.commit_configuration.call_count, 1)
|
dcosentino/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/views/tests/test_checklists.py
|
42
|
""" Unit tests for checklist methods in views.py. """
from contentstore.utils import reverse_course_url
from contentstore.views.checklist import expand_checklist_action_url
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.django import modulestore
import json
from contentstore.tests.utils import CourseTestCase
class ChecklistTestCase(CourseTestCase):
""" Test for checklist get and put methods. """
def setUp(self):
""" Creates the test course. """
super(ChecklistTestCase, self).setUp()
self.course = CourseFactory.create(org='mitX', number='333', display_name='Checklists Course')
self.checklists_url = self.get_url()
def get_url(self, checklist_index=None):
url_args = {'checklist_index': checklist_index} if checklist_index else None
return reverse_course_url('checklists_handler', self.course.id, kwargs=url_args)
def get_persisted_checklists(self):
""" Returns the checklists as persisted in the modulestore. """
return modulestore().get_item(self.course.location).checklists
def compare_checklists(self, persisted, request):
"""
Handles url expansion as possible difference and descends into guts
"""
self.assertEqual(persisted['short_description'], request['short_description'])
expanded_checklist = expand_checklist_action_url(self.course, persisted)
for pers, req in zip(expanded_checklist['items'], request['items']):
self.assertEqual(pers['short_description'], req['short_description'])
self.assertEqual(pers['long_description'], req['long_description'])
self.assertEqual(pers['is_checked'], req['is_checked'])
self.assertEqual(pers['action_url'], req['action_url'])
self.assertEqual(pers['action_text'], req['action_text'])
self.assertEqual(pers['action_external'], req['action_external'])
def test_get_checklists(self):
""" Tests the get checklists method and URL expansion. """
response = self.client.get(self.checklists_url)
self.assertContains(response, "Getting Started With Studio")
# Verify expansion of action URL happened.
self.assertContains(response, 'course_team/mitX/333/Checklists_Course')
# Verify persisted checklist does NOT have expanded URL.
checklist_0 = self.get_persisted_checklists()[0]
self.assertEqual('ManageUsers', get_action_url(checklist_0, 0))
payload = response.content
# Now delete the checklists from the course and verify they get repopulated (for courses
# created before checklists were introduced).
self.course.checklists = None
# Save the changed `checklists` to the underlying KeyValueStore before updating the modulestore
self.course.save()
modulestore().update_item(self.course, self.user.id)
self.assertEqual(self.get_persisted_checklists(), None)
response = self.client.get(self.checklists_url)
self.assertEqual(payload, response.content)
def test_get_checklists_html(self):
""" Tests getting the HTML template for the checklists page). """
response = self.client.get(self.checklists_url, HTTP_ACCEPT='text/html')
self.assertContains(response, "Getting Started With Studio")
# The HTML generated will define the handler URL (for use by the Backbone model).
self.assertContains(response, self.checklists_url)
def test_update_checklists_no_index(self):
""" No checklist index, should return all of them. """
returned_checklists = json.loads(self.client.get(self.checklists_url).content)
# Verify that persisted checklists do not have expanded action URLs.
# compare_checklists will verify that returned_checklists DO have expanded action URLs.
pers = self.get_persisted_checklists()
self.assertEqual('CourseOutline', get_first_item(pers[1]).get('action_url'))
for pay, resp in zip(pers, returned_checklists):
self.compare_checklists(pay, resp)
def test_update_checklists_index_ignored_on_get(self):
""" Checklist index ignored on get. """
update_url = self.get_url(1)
returned_checklists = json.loads(self.client.get(update_url).content)
for pay, resp in zip(self.get_persisted_checklists(), returned_checklists):
self.compare_checklists(pay, resp)
def test_update_checklists_post_no_index(self):
""" No checklist index, will error on post. """
response = self.client.post(self.checklists_url)
self.assertContains(response, 'Could not save checklist', status_code=400)
def test_update_checklists_index_out_of_range(self):
""" Checklist index out of range, will error on post. """
update_url = self.get_url(100)
response = self.client.post(update_url)
self.assertContains(response, 'Could not save checklist', status_code=400)
def test_update_checklists_index(self):
""" Check that an update of a particular checklist works. """
update_url = self.get_url(1)
payload = self.course.checklists[1]
self.assertFalse(get_first_item(payload).get('is_checked'))
self.assertEqual('CourseOutline', get_first_item(payload).get('action_url'))
get_first_item(payload)['is_checked'] = True
returned_checklist = json.loads(self.client.ajax_post(update_url, payload).content)
self.assertTrue(get_first_item(returned_checklist).get('is_checked'))
persisted_checklist = self.get_persisted_checklists()[1]
# Verify that persisted checklist does not have expanded action URLs.
# compare_checklists will verify that returned_checklist DOES have expanded action URLs.
self.assertEqual('CourseOutline', get_first_item(persisted_checklist).get('action_url'))
self.compare_checklists(persisted_checklist, returned_checklist)
def test_update_checklists_delete_unsupported(self):
""" Delete operation is not supported. """
update_url = self.get_url(100)
response = self.client.delete(update_url)
self.assertEqual(response.status_code, 405)
def test_expand_checklist_action_url(self):
"""
Tests the method to expand checklist action url.
"""
def test_expansion(checklist, index, stored, expanded):
"""
Tests that the expected expanded value is returned for the item at the given index.
Also verifies that the original checklist is not modified.
"""
self.assertEqual(get_action_url(checklist, index), stored)
expanded_checklist = expand_checklist_action_url(self.course, checklist)
self.assertEqual(get_action_url(expanded_checklist, index), expanded)
# Verify no side effect in the original list.
self.assertEqual(get_action_url(checklist, index), stored)
test_expansion(self.course.checklists[0], 0, 'ManageUsers', '/course_team/mitX/333/Checklists_Course/')
test_expansion(self.course.checklists[1], 1, 'CourseOutline', '/course/mitX/333/Checklists_Course')
test_expansion(self.course.checklists[2], 0, 'http://help.edge.edx.org/', 'http://help.edge.edx.org/')
def get_first_item(checklist):
""" Returns the first item from the checklist. """
return checklist['items'][0]
def get_action_url(checklist, index):
"""
Returns the action_url for the item at the specified index in the given checklist.
"""
return checklist['items'][index]['action_url']
|
dreamsxin/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/idlelib/configHelpSourceEdit.py
|
82
|
"Dialog to specify or edit the parameters for a user configured help source."
import os
import sys
from tkinter import *
import tkinter.messagebox as tkMessageBox
import tkinter.filedialog as tkFileDialog
class GetHelpSourceDialog(Toplevel):
def __init__(self, parent, title, menuItem='', filePath='', _htest=False):
"""Get menu entry and url/ local file location for Additional Help
User selects a name for the Help resource and provides a web url
or a local file as its source. The user can enter a url or browse
for the file.
_htest - bool, change box location when running htest
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.result = None
self.CreateWidgets()
self.menu.set(menuItem)
self.path.set(filePath)
self.withdraw() #hide while setting geometry
#needs to be done here so that the winfo_reqwidth is valid
self.update_idletasks()
#centre dialog over parent. below parent if running htest.
self.geometry(
"+%d+%d" % (
parent.winfo_rootx() +
(parent.winfo_width()/2 - self.winfo_reqwidth()/2),
parent.winfo_rooty() +
((parent.winfo_height()/2 - self.winfo_reqheight()/2)
if not _htest else 150)))
self.deiconify() #geometry set, unhide
self.bind('<Return>', self.Ok)
self.wait_window()
def CreateWidgets(self):
self.menu = StringVar(self)
self.path = StringVar(self)
self.fontSize = StringVar(self)
self.frameMain = Frame(self, borderwidth=2, relief=GROOVE)
self.frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
labelMenu = Label(self.frameMain, anchor=W, justify=LEFT,
text='Menu Item:')
self.entryMenu = Entry(self.frameMain, textvariable=self.menu,
width=30)
self.entryMenu.focus_set()
labelPath = Label(self.frameMain, anchor=W, justify=LEFT,
text='Help File Path: Enter URL or browse for file')
self.entryPath = Entry(self.frameMain, textvariable=self.path,
width=40)
self.entryMenu.focus_set()
labelMenu.pack(anchor=W, padx=5, pady=3)
self.entryMenu.pack(anchor=W, padx=5, pady=3)
labelPath.pack(anchor=W, padx=5, pady=3)
self.entryPath.pack(anchor=W, padx=5, pady=3)
browseButton = Button(self.frameMain, text='Browse', width=8,
command=self.browseFile)
browseButton.pack(pady=3)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
self.buttonOk = Button(frameButtons, text='OK',
width=8, default=ACTIVE, command=self.Ok)
self.buttonOk.grid(row=0, column=0, padx=5,pady=5)
self.buttonCancel = Button(frameButtons, text='Cancel',
width=8, command=self.Cancel)
self.buttonCancel.grid(row=0, column=1, padx=5, pady=5)
def browseFile(self):
filetypes = [
("HTML Files", "*.htm *.html", "TEXT"),
("PDF Files", "*.pdf", "TEXT"),
("Windows Help Files", "*.chm"),
("Text Files", "*.txt", "TEXT"),
("All Files", "*")]
path = self.path.get()
if path:
dir, base = os.path.split(path)
else:
base = None
if sys.platform[:3] == 'win':
dir = os.path.join(os.path.dirname(sys.executable), 'Doc')
if not os.path.isdir(dir):
dir = os.getcwd()
else:
dir = os.getcwd()
opendialog = tkFileDialog.Open(parent=self, filetypes=filetypes)
file = opendialog.show(initialdir=dir, initialfile=base)
if file:
self.path.set(file)
def MenuOk(self):
"Simple validity check for a sensible menu item name"
menuOk = True
menu = self.menu.get()
menu.strip()
if not menu:
tkMessageBox.showerror(title='Menu Item Error',
message='No menu item specified',
parent=self)
self.entryMenu.focus_set()
menuOk = False
elif len(menu) > 30:
tkMessageBox.showerror(title='Menu Item Error',
message='Menu item too long:'
'\nLimit 30 characters.',
parent=self)
self.entryMenu.focus_set()
menuOk = False
return menuOk
def PathOk(self):
"Simple validity check for menu file path"
pathOk = True
path = self.path.get()
path.strip()
if not path: #no path specified
tkMessageBox.showerror(title='File Path Error',
message='No help file path specified.',
parent=self)
self.entryPath.focus_set()
pathOk = False
elif path.startswith(('www.', 'http')):
pass
else:
if path[:5] == 'file:':
path = path[5:]
if not os.path.exists(path):
tkMessageBox.showerror(title='File Path Error',
message='Help file path does not exist.',
parent=self)
self.entryPath.focus_set()
pathOk = False
return pathOk
def Ok(self, event=None):
if self.MenuOk() and self.PathOk():
self.result = (self.menu.get().strip(),
self.path.get().strip())
if sys.platform == 'darwin':
path = self.result[1]
if path.startswith(('www', 'file:', 'http:')):
pass
else:
# Mac Safari insists on using the URI form for local files
self.result = list(self.result)
self.result[1] = "file://" + path
self.destroy()
def Cancel(self, event=None):
self.result = None
self.destroy()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(GetHelpSourceDialog)
|
GreenRecycleBin/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/pywebsocket/src/example/abort_wsh.py
|
465
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import handshake
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
raise handshake.AbortedByUserException(
"Aborted in web_socket_transfer_data")
# vi:sts=4 sw=4 et
|
PredictiveScienceLab/py-orthpol
|
refs/heads/master
|
demos/demo7.py
|
2
|
"""
Generate the Legendre polynomials using a scipy.stats random variable.
This particular demo generates the Legendre polynomials.
This demo demonstrates how to:
+ Construct a set of orthogonal univariate polynomials given a scipy.stats
random variable.
+ Examine certain properties of a univariate polynomial.
+ Evaluate the polynomials at one or more points.
+ Evaluate the derivatives of the polynomials at one or more points.
Author:
Ilias Bilionis
Date:
3/18/2014
"""
import orthpol
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
# The desired degree
degree = 4
# The first way of doing it is write down the random variable:
rv = scipy.stats.uniform()
# Construct it:
p = orthpol.OrthogonalPolynomial(degree, rv=rv)
# An orthogonal polynomial is though of as a function.
# Here is how to get the number of inputs and outputs of that function
print 'Number of inputs:', p.num_input
print 'Number of outputs:', p.num_output
# Test if the polynomials are normalized (i.e., their norm is 1.):
print 'Is normalized:', p.is_normalized
# Get the degree of the polynomial:
print 'Polynomial degree:', p.degree
# Get the alpha-beta recursion coefficients:
print 'Alpha:', p.alpha
print 'Beta:', p.beta
# The following should print a description of the polynomial
print str(p)
# Now you can evaluate the polynomial at any points you want:
X = np.linspace(0., 1., 100)
# Here is the actual evaluation
phi = p(X)
# Phi should be a 100x11 matrix: phi(i, j) = poly(i, X[j])
# Let's plot them
plt.plot(X, phi)
plt.title('Legendre Polynomials', fontsize=16)
plt.xlabel('$x$', fontsize=16)
plt.ylabel('$p_i(x)$', fontsize=16)
plt.legend(['$p_{%d}(x)$' % i for i in range(p.num_output)], loc='best')
print 'Close the window to continue...'
plt.show()
# You may also compute the derivatives of the polynomials:
dphi = p.d(X)
# Let's plot them also
plt.plot(X, dphi)
plt.title('Derivatives of Legendre Polynomials', fontsize=16)
plt.xlabel('$x$', fontsize=16)
plt.ylabel(r'$\frac{dp_i(x)}{dx}$', fontsize=16)
plt.legend([r'$\frac{p_{%d}(x)}{dx}$' % i for i in range(p.num_output)], loc='best')
print 'Close the window to end demo...'
plt.show()
|
huihoo/reader
|
refs/heads/master
|
apps/rss_feeds/migrations/0043_favicon_color.py
|
18
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Feed.favicon_color'
db.add_column('feeds', 'favicon_color', self.gf('django.db.models.fields.CharField')(max_length=6, null=True, blank=True), keep_default=False)
# Adding field 'Feed.favicon_not_found'
db.add_column('feeds', 'favicon_not_found', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Feed.favicon_color'
db.delete_column('feeds', 'favicon_color')
# Deleting field 'Feed.favicon_not_found'
db.delete_column('feeds', 'favicon_not_found')
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'favicon_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'favicon_not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "'[Untitled]'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_classifier_counts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedicon': {
'Meta': {'object_name': 'FeedIcon'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'icon'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['rss_feeds.Feed']"}),
'icon_url': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['rss_feeds']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.