code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# service type constants:
CORE = "CORE"
DUMMY = "DUMMY"
LOADBALANCER = "LOADBALANCER"
# TODO(salvatore-orlando): Move these (or derive them) from conf file
ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER]
COMMON_PREFIXES = {
CORE: "",
DUMMY: "/dummy_svc",
LOADBALANCER: "/lb",
}
# Service operation status constants
ACTIVE = "ACTIVE"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
| wallnerryan/quantum_migrate | quantum/plugins/common/constants.py | Python | apache-2.0 | 1,169 |
from genty import genty, genty_dataset
import sys
from app.master.atomizer import Atomizer
from app.master.job_config import ConfigValidationError
from app.master.cluster_runner_config import ClusterRunnerConfig, ConfigParseError
from test.framework.base_unit_test_case import BaseUnitTestCase
@genty
class TestClusterRunnerConfig(BaseUnitTestCase):
_COMPLETE_VALID_CONFIG = """
Best Job Ever:
max_executors: 21
setup_build:
- echo "This is setup! Woo!" # no semicolons in this section
- sleep 1
commands:
- echo "Now I'm doing $THE_THING!"; # semicolons in this section
- echo "Semicolons are fun." > /tmp/my_hard_work.txt;
atomizers:
- THE_THING: printf 'something with a number %d\\n' {1..50}
"""
_MULTI_JOB_CONFIG = """
First Job:
commands:
- echo "go"
atomizers:
- ENV_VAR: echo "atom"
Second Job:
commands:
- echo "go"
atomizers:
- ENV_VAR: echo "atom"
"""
_FREEFORM_ATOMIZER = """
PHPUnit:
commands:
- echo "go"
atomizers:
- "export VARNAME='asdf'"
"""
_MINIMAL_CONFIG = """
PHPUnit:
commands:
- echo "go"
atomizers:
- ENV_VAR: find . -name "*.php"
"""
_EMPTY_CONFIG = """
PHPUnit:
"""
_VALID_CONFIG_WITH_EMPTY_COMMANDS = """
PHPUnit:
commands:
- echo "first"
-
- # some YAML comment
-
- echo "last"
atomizers:
- ENV_VAR: echo "atom"
"""
_NO_COMMAND_INVALID_CONFIG = """
PHPUnit:
max_executors: 5
setup_build:
- echo "I don't know what I'm doing."
atomizers:
- VARNAME: sleep 123
"""
_INVALID_CONFIG_WITH_EMPTY_COMMANDS = """
PHPUnit:
commands:
-
atomizers:
- ENV_VAR: echo "atom"
"""
_BACKGROUND_TASK_CONFIG = """
PHPUnit:
max_executors: 5
setup_build:
- echo "in the background" &
- echo "in the foreground" ;
- echo "another thing"
atomizers:
- VARNAME: sleep1
commands:
- echo "go"
"""
@genty_dataset(
complete_valid_config=(_COMPLETE_VALID_CONFIG, {
'name': 'Best Job Ever',
'max_executors': 21,
'setup_build': 'echo "This is setup! Woo!" && sleep 1',
'command': 'echo "Now I\'m doing $THE_THING!" && echo "Semicolons are fun." > /tmp/my_hard_work.txt',
'atomizer': [{'THE_THING': 'printf \'something with a number %d\\n\' {1..50}'}],
}),
valid_config_with_empty_command=(_VALID_CONFIG_WITH_EMPTY_COMMANDS, {
'command': 'echo "first" && echo "last"',
'atomizer': [{'ENV_VAR': 'echo "atom"'}],
}),
)
def test_valid_conf_properties_are_correctly_parsed(self, config_string, expected_loaded_config):
config = ClusterRunnerConfig(config_string)
job_config = config.get_job_config()
for method_name, expected_value in expected_loaded_config.items():
actual_value = getattr(job_config, method_name)
if isinstance(actual_value, Atomizer):
actual_value = actual_value._atomizer_dicts # special case comparison for atomizer
self.assertEqual(actual_value, expected_value,
'The output of {}() should match the expected value.'.format(method_name))
@genty_dataset(
('max_executors', sys.maxsize),
('setup_build', None),
)
def test_undefined_conf_properties_return_default_values(self, conf_method_name, expected_value):
config = ClusterRunnerConfig(self._MINIMAL_CONFIG)
job_config = config.get_job_config()
actual_value = getattr(job_config, conf_method_name)
self.assertEqual(actual_value, expected_value,
'The default output of {}() should match the expected value.'.format(conf_method_name))
@genty_dataset(
valid_config=(_COMPLETE_VALID_CONFIG, True),
empty_config=(_EMPTY_CONFIG, False),
invalid_config=(_NO_COMMAND_INVALID_CONFIG, False),
invalid_config_with_empty_commands=(_INVALID_CONFIG_WITH_EMPTY_COMMANDS, False),
)
def test_valid_configs_are_detected(self, config_contents, is_expected_valid):
config = ClusterRunnerConfig(config_contents)
try:
config.get_job_config()
except (ConfigParseError, ConfigValidationError) as e:
self.assertFalse(is_expected_valid, 'Config is valid, but threw {}'.format(type(e)))
return
self.assertTrue(is_expected_valid, 'Config is not valid, but parsed without error')
@genty_dataset(
freeform_atomizer=(_FREEFORM_ATOMIZER,),
)
def test_incorrect_atomizer_type_raises_exception(self, config_contents):
config = ClusterRunnerConfig(config_contents)
with self.assertRaises(ConfigValidationError):
config.get_job_config()
def test_get_specific_job_config(self):
config = ClusterRunnerConfig(self._MULTI_JOB_CONFIG)
job_config = config.get_job_config('Second Job')
self.assertEqual('Second Job', job_config.name, '')
job_config = config.get_job_config('First Job')
self.assertEqual('First Job', job_config.name, '')
def test_config_with_background_task(self):
config = ClusterRunnerConfig(self._BACKGROUND_TASK_CONFIG)
job_config = config.get_job_config()
self.assertEqual(job_config.setup_build,
'echo "in the background" & echo "in the foreground" && echo "another thing"')
| nickzuber/ClusterRunner | test/unit/master/test_cluster_runner_config.py | Python | apache-2.0 | 5,849 |
#!/usr/bin/env python
import sys
stderr = sys.stderr
## LZWDecoder
##
class LZWDecoder(object):
debug = 0
def __init__(self, fp):
self.fp = fp
self.buff = 0
self.bpos = 8
self.nbits = 9
self.table = None
self.prevbuf = None
return
def readbits(self, bits):
v = 0
while 1:
# the number of remaining bits we can get from the current buffer.
r = 8-self.bpos
if bits <= r:
# |-----8-bits-----|
# |-bpos-|-bits-| |
# | |----r----|
v = (v<<bits) | ((self.buff>>(r-bits)) & ((1<<bits)-1))
self.bpos += bits
break
else:
# |-----8-bits-----|
# |-bpos-|---bits----...
# | |----r----|
v = (v<<r) | (self.buff & ((1<<r)-1))
bits -= r
x = self.fp.read(1)
if not x: raise EOFError
self.buff = ord(x)
self.bpos = 0
return v
def feed(self, code):
x = ''
if code == 256:
self.table = [ chr(c) for c in xrange(256) ] # 0-255
self.table.append(None) # 256
self.table.append(None) # 257
self.prevbuf = ''
self.nbits = 9
elif code == 257:
pass
elif not self.prevbuf:
x = self.prevbuf = self.table[code]
else:
if code < len(self.table):
x = self.table[code]
self.table.append(self.prevbuf+x[0])
else:
self.table.append(self.prevbuf+self.prevbuf[0])
x = self.table[code]
l = len(self.table)
if l == 511:
self.nbits = 10
elif l == 1023:
self.nbits = 11
elif l == 2047:
self.nbits = 12
self.prevbuf = x
return x
def run(self):
while 1:
try:
code = self.readbits(self.nbits)
except EOFError:
break
x = self.feed(code)
yield x
if self.debug:
print >>stderr, ('nbits=%d, code=%d, output=%r, table=%r' %
(self.nbits, code, x, self.table[258:]))
return
def main(argv):
import StringIO
input = '\x80\x0b\x60\x50\x22\x0c\x0c\x85\x01'
fp = StringIO.StringIO(input)
expected = '\x2d\x2d\x2d\x2d\x2d\x41\x2d\x2d\x2d\x42'
LZWDecoder.debug = 1
output = ''.join(LZWDecoder(fp).run())
print (input, expected, output)
print output == expected
return 0
if __name__ == '__main__': sys.exit(main(sys.argv))
| onfire73/pypeskg | ui/pypesvds/lib/extras/pdflib/lzw.py | Python | apache-2.0 | 2,356 |
# Copyright (c) 2015 by Tegile Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver Test for Tegile storage.
"""
import mock
from cinder import context
from cinder.exception import TegileAPIException
from cinder import test
from cinder.volume.drivers import tegile
BASE_DRIVER = tegile.TegileIntelliFlashVolumeDriver
ISCSI_DRIVER = tegile.TegileISCSIDriver
FC_DRIVER = tegile.TegileFCDriver
test_config = mock.Mock()
test_config.san_ip = 'some-ip'
test_config.san_login = 'some-user'
test_config.san_password = 'some-password'
test_config.san_is_local = True
test_config.tegile_default_pool = 'random-pool'
test_config.tegile_default_project = 'random-project'
test_config.volume_backend_name = "unittest"
test_volume = {'host': 'node#testPool',
'name': 'testvol',
'id': 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e',
'_name_id': 'testvol',
'metadata': {'project': 'testProj'},
'provider_location': None,
'size': 10}
test_snapshot = {'name': 'testSnap',
'id': '07ae9978-5445-405e-8881-28f2adfee732',
'volume': {'host': 'node#testPool',
'size': 1,
'_name_id': 'testvol'
}
}
array_stats = {'total_capacity_gb': 4569.199686084874,
'free_capacity_gb': 4565.381390112452,
'pools': [{'total_capacity_gb': 913.5,
'QoS_support': False,
'free_capacity_gb': 911.812650680542,
'reserved_percentage': 0,
'pool_name': 'pyramid'
},
{'total_capacity_gb': 2742.1996604874,
'QoS_support': False,
'free_capacity_gb': 2740.148867149747,
'reserved_percentage': 0,
'pool_name': 'cobalt'
},
{'total_capacity_gb': 913.5,
'QoS_support': False,
'free_capacity_gb': 913.4198722839355,
'reserved_percentage': 0,
'pool_name': 'test'
}]
}
class FakeTegileService(object):
@staticmethod
def send_api_request(method, params=None,
request_type='post',
api_service='v2',
fine_logging=False):
if method is 'createVolume':
return ''
elif method is 'deleteVolume':
return ''
elif method is 'createVolumeSnapshot':
return ''
elif method is 'deleteVolumeSnapshot':
return ''
elif method is 'cloneVolumeSnapshot':
return ''
elif method is 'listPools':
return ''
elif method is 'resizeVolume':
return ''
elif method is 'getVolumeSizeinGB':
return 25
elif method is 'getISCSIMappingForVolume':
return {'target_lun': '27',
'target_iqn': 'iqn.2012-02.com.tegile:openstack-cobalt',
'target_portal': '10.68.103.106:3260'
}
elif method is 'getFCPortsForVolume':
return {'target_lun': '12',
'initiator_target_map':
'{"21000024ff59bb6e":["21000024ff578701",],'
'"21000024ff59bb6f":["21000024ff578700",],}',
'target_wwn': '["21000024ff578700","21000024ff578701",]'}
elif method is 'getArrayStats':
return array_stats
fake_tegile_backend = FakeTegileService()
class FakeTegileServiceFail(object):
@staticmethod
def send_api_request(method, params=None,
request_type='post',
api_service='v2',
fine_logging=False):
raise TegileAPIException
fake_tegile_backend_fail = FakeTegileServiceFail()
class TegileIntelliFlashVolumeDriverTestCase(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
self.configuration = test_config
super(TegileIntelliFlashVolumeDriverTestCase, self).setUp()
def test_create_volume(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
self.assertEqual({
'metadata': {'pool': 'testPool',
'project': test_config.tegile_default_project
}
}, tegile_driver.create_volume(test_volume))
def test_create_volume_fail(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.create_volume,
test_volume)
def test_delete_volume(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
tegile_driver.delete_volume(test_volume)
def test_delete_volume_fail(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.delete_volume,
test_volume)
def test_create_snapshot(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
tegile_driver.create_snapshot(test_snapshot)
def test_create_snapshot_fail(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.create_snapshot,
test_snapshot)
def test_delete_snapshot(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
tegile_driver.delete_snapshot(test_snapshot)
def test_delete_snapshot_fail(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.delete_snapshot,
test_snapshot)
def test_create_volume_from_snapshot(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
self.assertEqual({
'metadata': {'pool': 'testPool',
'project': test_config.tegile_default_project
}
}, tegile_driver.create_volume_from_snapshot(test_volume,
test_snapshot))
def test_create_volume_from_snapshot_fail(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.create_volume_from_snapshot,
test_volume, test_snapshot)
def test_create_cloned_volume(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
self.assertEqual({'metadata': {'project': 'testProj',
'pool': 'testPool'}},
tegile_driver.create_cloned_volume(test_volume,
test_volume))
def test_create_cloned_volume_fail(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.create_cloned_volume,
test_volume, test_volume)
def test_get_volume_stats(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
self.assertEqual({'driver_version': '1.0.0',
'free_capacity_gb': 4565.381390112452,
'pools': [{'QoS_support': False,
'allocated_capacity_gb': 0.0,
'free_capacity_gb': 911.812650680542,
'pool_name': 'pyramid',
'reserved_percentage': 0,
'total_capacity_gb': 913.5},
{'QoS_support': False,
'allocated_capacity_gb': 0.0,
'free_capacity_gb': 2740.148867149747,
'pool_name': 'cobalt',
'reserved_percentage': 0,
'total_capacity_gb': 2742.1996604874},
{'QoS_support': False,
'allocated_capacity_gb': 0.0,
'free_capacity_gb': 913.4198722839355,
'pool_name': 'test',
'reserved_percentage': 0,
'total_capacity_gb': 913.5}],
'storage_protocol': 'iSCSI',
'total_capacity_gb': 4569.199686084874,
'vendor_name': 'Tegile Systems Inc.',
'volume_backend_name': 'unittest'},
tegile_driver.get_volume_stats(True))
def test_get_pool(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
self.assertEqual('testPool', tegile_driver.get_pool(test_volume))
def test_extend_volume(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
tegile_driver.extend_volume(test_volume, 12)
def test_extend_volume_fail(self):
tegile_driver = self.get_object(self.configuration)
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.extend_volume,
test_volume, 30)
def test_manage_existing(self):
tegile_driver = self.get_object(self.configuration)
existing_ref = {'name': 'existingvol'}
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
self.assertEqual({'metadata': {'pool': 'testPool',
'project': 'testProj'
},
'_name_id': ('existingvol',)
}, tegile_driver.manage_existing(test_volume,
existing_ref))
def test_manage_existing_get_size(self):
tegile_driver = self.get_object(self.configuration)
existing_ref = {'name': 'existingvol'}
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
self.assertEqual(25,
tegile_driver.manage_existing_get_size(
test_volume,
existing_ref))
def test_manage_existing_get_size_fail(self):
tegile_driver = self.get_object(self.configuration)
existing_ref = {'name': 'existingvol'}
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend_fail):
self.assertRaises(TegileAPIException,
tegile_driver.manage_existing_get_size,
test_volume, existing_ref)
def get_object(self, configuration):
class TegileBaseDriver(BASE_DRIVER):
def initialize_connection(self, volume, connector, **kwargs):
pass
def terminate_connection(self, volume, connector,
force=False, **kwargs):
pass
return TegileBaseDriver(configuration=self.configuration)
class TegileISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(TegileISCSIDriverTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.configuration = test_config
self.configuration.chap_username = 'fake'
self.configuration.chap_password = "test"
def test_initialize_connection(self):
tegile_driver = self.get_object(self.configuration)
connector = {'initiator': 'iqn.1993-08.org.debian:01:d0bb9a834f8'}
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
self.assertEqual(
{'data': {'auth_method': 'CHAP',
'discard': False,
'target_discovered': (False,),
'auth_password': 'test',
'auth_username': 'fake',
'target_iqn': 'iqn.2012-02.'
'com.tegile:openstack-cobalt',
'target_lun': '27',
'target_portal': '10.68.103.106:3260',
'volume_id': (
'a24c2ee8-525a-4406-8ccd-8d38688f8e9e',)},
'driver_volume_type': 'iscsi'},
tegile_driver.initialize_connection(test_volume,
connector))
def get_object(self, configuration):
return ISCSI_DRIVER(configuration=configuration)
class TegileFCDriverTestCase(test.TestCase):
def setUp(self):
super(TegileFCDriverTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.configuration = test_config
def test_initialize_connection(self):
tegile_driver = self.get_object(self.configuration)
connector = {'wwpns': ['500110a0001a3990']}
with mock.patch.object(tegile_driver,
'_api_executor',
fake_tegile_backend):
self.assertEqual({'data': {'encrypted': False,
'initiator_target_map': {
'21000024ff59bb6e':
['21000024ff578701'],
'21000024ff59bb6f':
['21000024ff578700']
},
'target_discovered': False,
'target_lun': '12',
'target_wwn':
['21000024ff578700',
'21000024ff578701']},
'driver_volume_type': 'fibre_channel'},
tegile_driver.initialize_connection(
test_volume,
connector))
def get_object(self, configuration):
return FC_DRIVER(configuration=configuration)
| Nexenta/cinder | cinder/tests/unit/volume/drivers/test_tegile.py | Python | apache-2.0 | 18,078 |
#-*- coding: utf-8 -*-
# stino/compiler.py
import os
import re
import threading
import subprocess
import sublime
from . import fileutil
from . import textutil
from . import constant
from . import serial
from . import base
from . import preprocess
from . import sketch
from . import console
ram_size_dict = {}
ram_size_dict['attiny44'] = '256'
ram_size_dict['attiny45'] = '256'
ram_size_dict['attiny84'] = '512'
ram_size_dict['attiny85'] = '512'
ram_size_dict['atmega8'] = '1024'
ram_size_dict['atmega168'] = '1024'
ram_size_dict['atmega328p'] = '2048'
ram_size_dict['atmega644'] = '4096'
ram_size_dict['atmega644p'] = '4096'
ram_size_dict['atmega1284'] = '16384'
ram_size_dict['atmega1284p'] = '16384'
ram_size_dict['atmega1280'] = '4096'
ram_size_dict['atmega2560'] = '8196'
ram_size_dict['atmega32u4'] = '2560'
ram_size_dict['at90usb162'] = '512'
ram_size_dict['at90usb646'] = '4096'
ram_size_dict['at90usb1286'] = '8192'
ram_size_dict['cortex-m3'] = '98304'
ram_size_dict['cortex-m4'] = '16384'
class Args:
def __init__(self, cur_project, arduino_info):
self.args = getFullArgs(cur_project, arduino_info)
def getArgs(self):
return self.args
class Command:
def __init__(self, command):
self.in_file = ''
self.out_file = ''
self.command = command
self.calc_size = False
self.stdout = ''
self.out_text = ''
def run(self, output_console):
output_console.printText(self.out_text)
if self.out_file:
message = 'Creating %s...\n' % self.out_file
output_console.printText(message)
cur_command = formatCommand(self.command)
compile_proc = subprocess.Popen(cur_command, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
result = compile_proc.communicate()
return_code = compile_proc.returncode
stdout = result[0].decode(constant.sys_encoding).replace('\r', '')
stderr = result[1].decode(constant.sys_encoding).replace('\r', '')
self.stdout = stdout
show_compilation_output = constant.sketch_settings.get('show_compilation_output', False)
if show_compilation_output:
output_console.printText(self.command)
output_console.printText('\n')
output_console.printText(stdout)
output_console.printText(stderr)
return return_code
def isSizeCommand(self):
return self.calc_size
def setSizeCommand(self):
self.calc_size = True
def getOutFile(self):
return self.out_file
def getCommand(self):
return self.command
def getStdout(self):
return self.stdout
def setInFile(self, in_file):
self.in_file = in_file
def setOutFile(self, out_file):
self.out_file = out_file
def setCommand(self, command):
self.command = command
def setOutputText(self, text):
self.out_text = text
class Compiler:
def __init__(self, arduino_info, cur_project, args):
self.arduino_info = arduino_info
self.cur_project = cur_project
self.args = args.getArgs()
self.output_console = console.Console(cur_project.getName())
self.no_error = True
self.is_finished = False
self.prepare()
def getOutputConsole(self):
return self.output_console
def isFinished(self):
return self.is_finished
def noError(self):
return self.no_error
def prepare(self):
self.command_list = []
if self.args:
self.command_list = genCommandList(self.args, self.cur_project, self.arduino_info)
def run(self):
if self.command_list:
compilation_thread = threading.Thread(target=self.compile)
compilation_thread.start()
else:
self.no_error = False
self.is_finished = True
self.output_console.printText('Please choose the Ardunio Application Folder.')
def compile(self):
self.output_console.printText('Compiling %s...\n' % self.cur_project.getName())
for cur_command in self.command_list:
return_code = cur_command.run(self.output_console)
if return_code > 0:
self.output_console.printText('[Stino - Error %d]\n' % return_code)
self.no_error = False
break
else:
if cur_command.isSizeCommand():
stdout = cur_command.getStdout()
printSizeInfo(self.output_console, stdout, self.args)
if self.no_error:
self.output_console.printText('[Stino - Done compiling.]\n')
self.is_finished = True
def getChosenArgs(arduino_info):
args = {}
platform_list = arduino_info.getPlatformList()
if len(platform_list) > 1:
platform_id = constant.sketch_settings.get('platform', -1)
if not ((platform_id > 0) and (platform_id < len(platform_list))):
platform_id = 1
cur_platform = platform_list[platform_id]
platform_name = cur_platform.getName()
constant.sketch_settings.set('platform', platform_id)
constant.sketch_settings.set('platform_name', platform_name)
selected_platform = platform_list[platform_id]
board_list = selected_platform.getBoardList()
board_id = constant.sketch_settings.get('board', -1)
if board_list:
serial_port = getSelectedSerialPort()
args['serial.port'] = serial_port
if not (board_id > -1 or board_id < len(board_list)):
board_id = 0
constant.sketch_settings.set('board', board_id)
selected_board = board_list[board_id]
args.update(selected_board.getArgs())
board_option_list = selected_board.getOptionList()
if board_option_list:
board_option_key = '%d.%d' % (platform_id, board_id)
board_option_dict = constant.sketch_settings.get('board_option', {})
if board_option_key in board_option_dict:
option_item_id_list = board_option_dict[board_option_key]
if len(option_item_id_list) < len(board_option_list):
option_item_id_list = []
else:
option_item_id_list = []
if not option_item_id_list:
for board_option in board_option_list:
option_item_id_list.append(0)
for board_option in board_option_list:
index = board_option_list.index(board_option)
option_item_id = option_item_id_list[index]
option_item_list = board_option.getItemList()
option_item = option_item_list[option_item_id]
option_item_args = option_item.getArgs()
args.update(option_item_args)
if 'build.vid' in args:
if not 'build.extra_flags' in args:
args['build.extra_flags'] = '-DUSB_VID={build.vid} -DUSB_PID={build.pid}'
if 'bootloader.path' in args:
bootloader_path = args['bootloader.path']
if 'bootloader.file' in args:
bootloader_file = args['bootloader.file']
bootloader_file = bootloader_path + '/' + bootloader_file
args['bootloader.file'] = bootloader_file
programmer_list = selected_platform.getProgrammerList()
if programmer_list:
platform_programmer_dict = constant.sketch_settings.get('programmer', {})
if str(platform_id) in platform_programmer_dict:
programmer_id = platform_programmer_dict[str(platform_id)]
else:
programmer_id = 0
programmer = programmer_list[programmer_id]
programmer_args = programmer.getArgs()
args.update(programmer_args)
platform_file = getPlatformFile(arduino_info)
args = addBuildUsbValue(args, platform_file)
args = replaceAllDictValue(args)
if not 'upload.maximum_ram_size' in args:
args['upload.maximum_ram_size'] = '0'
if 'build.mcu' in args:
build_mcu = args['build.mcu']
if build_mcu in ram_size_dict:
args['upload.maximum_ram_size'] = ram_size_dict[build_mcu]
if 'build.elide_constructors' in args:
if args['build.elide_constructors'] == 'true':
args['build.elide_constructors'] = '-felide-constructors'
else:
args['build.elide_constructors'] = ''
if 'build.cpu' in args:
args['build.mcu'] = args['build.cpu']
if 'build.gnu0x' in args:
if args['build.gnu0x'] == 'true':
args['build.gnu0x'] = '-std=gnu++0x'
else:
args['build.gnu0x'] = ''
if 'build.cpp0x' in args:
if args['build.cpp0x'] == 'true':
args['build.cpp0x'] = '-std=c++0x'
else:
args['build.cpp0x'] = ''
return args
def getSelectedSerialPort():
serial_port = 'no_serial_port'
serial_port_list = serial.getSerialPortList()
if serial_port_list:
serial_port_id = constant.sketch_settings.get('serial_port', -1)
if not (serial_port_id > -1 and serial_port_id < len(serial_port_list)):
serial_port_id = 0
constant.sketch_settings.set('serial_port', serial_port_id)
serial_port = serial_port_list[serial_port_id]
return serial_port
def getReplaceTextList(text):
pattern_text = r'\{\S+?}'
pattern = re.compile(pattern_text)
replace_text_list = pattern.findall(text)
return replace_text_list
def replaceValueText(value_text, args_dict):
replace_text_list = getReplaceTextList(value_text)
for replace_text in replace_text_list:
key = replace_text[1:-1]
if key in args_dict:
value = args_dict[key]
else:
value = ''
value_text = value_text.replace(replace_text, value)
return value_text
def replaceAllDictValue(args_dict):
for key in args_dict:
value_text = args_dict[key]
value_text = replaceValueText(value_text, args_dict)
args_dict[key] = value_text
return args_dict
def addBuildUsbValue(args, platform_file):
lines = fileutil.readFileLines(platform_file)
for line in lines:
line = line.strip()
if line and not '#' in line:
(key, value) = textutil.getKeyValue(line)
if 'extra_flags' in key:
continue
if 'build.' in key:
if 'usb_manufacturer' in key:
if not value:
value = 'unknown'
value = replaceValueText(value, args)
if constant.sys_platform == 'windows':
value = value.replace('"', '\\"')
value = value.replace('\'', '"')
args[key] = value
return args
def getDefaultArgs(cur_project, arduino_info):
core_folder = getCoreFolder(arduino_info)
arduino_folder = base.getArduinoFolder()
ide_path = os.path.join(arduino_folder, 'hardware')
project_name = cur_project.getName()
serial_port = getSelectedSerialPort()
archive_file = 'core.a'
build_system_path = os.path.join(core_folder, 'system')
arduino_version = arduino_info.getVersion()
build_folder = getBuildFolder(cur_project)
args = {}
args['runtime.ide.path'] = arduino_folder
args['ide.path'] = ide_path
args['build.project_name'] = project_name
args['serial.port.file'] = serial_port
args['archive_file'] = archive_file
args['software'] = 'ARDUINO'
args['runtime.ide.version'] = '%d' % arduino_version
args['source_file'] = '{source_file}'
args['object_file'] = '{object_file}'
args['object_files'] = '{object_files}'
args['includes'] = '{includes}'
args['build.path'] = build_folder
return args
def getBuildFolder(cur_project):
build_folder = constant.sketch_settings.get('build_folder', '')
if not (build_folder and os.path.isdir(build_folder)):
document_folder = fileutil.getDocumentFolder()
build_folder = os.path.join(document_folder, 'Arduino_Build')
project_name = cur_project.getName()
build_folder = os.path.join(build_folder, project_name)
checkBuildFolder(build_folder)
return build_folder
def checkBuildFolder(build_folder):
if os.path.isfile(build_folder):
os.remove(build_folder)
if not os.path.exists(build_folder):
os.makedirs(build_folder)
file_name_list = fileutil.listDir(build_folder, with_dirs = False)
for file_name in file_name_list:
file_ext = os.path.splitext(file_name)[1]
if file_ext in ['.d']:
cur_file = os.path.join(build_folder, file_name)
os.remove(cur_file)
def getDefaultPlatformFile(arduino_info):
file_name = 'arduino_avr.txt'
platform_file = ''
platform_list = arduino_info.getPlatformList()
platform_id = constant.sketch_settings.get('platform', 1)
platform = platform_list[platform_id]
platform_name = platform.getName()
if 'Arduino ARM' in platform_name:
file_name = 'arduino_arm.txt'
elif 'Teensy' in platform_name:
board_list = platform.getBoardList()
board_id = constant.sketch_settings.get('board', 0)
board = board_list[board_id]
board_name = board.getName()
board_version = float(board_name.split()[1])
if board_version >= 3.0:
file_name = 'teensy_arm.txt'
else:
file_name = 'teensy_avr.txt'
elif 'Zpuino' in platform_name:
file_name = 'zpuino.txt'
platform_file = os.path.join(constant.compile_root, file_name)
return platform_file
def getCoreFolder(arduino_info):
platform_list = arduino_info.getPlatformList()
platform_id = constant.sketch_settings.get('platform', -1)
if not ((platform_id > 0) and (platform_id < len(platform_list))):
platform_id = 1
cur_platform = platform_list[platform_id]
platform_name = cur_platform.getName()
constant.sketch_settings.set('platform', platform_id)
constant.sketch_settings.set('platform_name', platform_name)
platform = platform_list[platform_id]
core_folder = ''
core_folder_list = platform.getCoreFolderList()
for cur_core_folder in core_folder_list:
platform_file = os.path.join(cur_core_folder, 'platform.txt')
if os.path.isfile(platform_file):
core_folder = cur_core_folder
break
return core_folder
def getPlatformFile(arduino_info):
core_folder = getCoreFolder(arduino_info)
if core_folder:
platform_file = os.path.join(core_folder, 'platform.txt')
else:
platform_file = getDefaultPlatformFile(arduino_info)
return platform_file
def splitPlatformFile(platform_file):
text = fileutil.readFile(platform_file)
index = text.index('recipe.')
text_header = text[:index]
text_body = text[index:]
return (text_header, text_body)
def getPlatformArgs(platform_text, args):
lines = platform_text.split('\n')
for line in lines:
line = line.strip()
if line and not '#' in line:
(key, value) = textutil.getKeyValue(line)
value = replaceValueText(value, args)
if 'tools.avrdude.' in key:
key = key.replace('tools.avrdude.', '')
if 'tools.bossac.' in key:
key = key.replace('tools.bossac.', '')
if 'tools.teensy.' in key:
key = key.replace('tools.teensy.', '')
if 'params.' in key:
key = key.replace('params.', '')
if constant.sys_platform == 'linux':
if '.linux' in key:
key = key.replace('.linux', '')
show_upload_output = constant.sketch_settings.get('show_upload_output', False)
if not show_upload_output:
if '.quiet' in key:
key = key.replace('.quiet', '.verbose')
if '.verbose' in key:
verify_code = constant.sketch_settings.get('verify_code', False)
if verify_code:
value += ' -V'
if key == 'build.extra_flags':
if key in args:
continue
args[key] = value
return args
def getFullArgs(cur_project, arduino_info):
args = {}
board_args = getChosenArgs(arduino_info)
if board_args:
default_args = getDefaultArgs(cur_project, arduino_info)
args.update(default_args)
args.update(board_args)
platform_file = getPlatformFile(arduino_info)
(platform_text_header, platform_text_body) = splitPlatformFile(platform_file)
args = getPlatformArgs(platform_text_header, args)
variant_folder = args['build.variants_folder']
cores_folder = args['build.cores_folder']
build_core = args['build.core']
build_core_folder = os.path.join(cores_folder, build_core)
args['build.core_folder'] = build_core_folder
if 'build.variant' in args:
build_variant = args['build.variant']
build_variant_folder = os.path.join(variant_folder, build_variant)
args['build.variant.path'] = build_variant_folder
else:
args['build.variant.path'] = build_core_folder
if 'compiler.path' in args:
compiler_path = args['compiler.path']
else:
runtime_ide_path = args['runtime.ide.path']
compiler_path = runtime_ide_path + '/hardware/tools/avr/bin/'
compiler_c_cmd = args['compiler.c.cmd']
if constant.sys_platform == 'windows':
compiler_c_cmd += '.exe'
compiler_c_cmd_file = os.path.join(compiler_path, compiler_c_cmd)
if os.path.isfile(compiler_c_cmd_file):
args['compiler.path'] = compiler_path
else:
args['compiler.path'] = ''
extra_flags = constant.sketch_settings.get('extra_flag', '')
if 'build.extra_flags' in args:
build_extra_flags = args['build.extra_flags']
else:
build_extra_flags = ''
if extra_flags:
build_extra_flags += ' '
build_extra_flags += extra_flags
args['build.extra_flags'] = build_extra_flags
args = getPlatformArgs(platform_text_body, args)
return args
def getLibFolderListFromProject(cur_project, arduino_info):
lib_folder_list = []
platform_list = arduino_info.getPlatformList()
platform_id = constant.sketch_settings.get('platform', 1)
general_platform = platform_list[0]
selected_platform = platform_list[platform_id]
general_h_lib_dict = general_platform.getHLibDict()
selected_h_lib_dict = selected_platform.getHLibDict()
ino_src_file_list = cur_project.getInoSrcFileList()
c_src_file_list = cur_project.getCSrcFileList()
h_list = preprocess.getHListFromSrcList(ino_src_file_list + c_src_file_list)
for h in h_list:
lib_folder = ''
if h in selected_h_lib_dict:
lib_folder = selected_h_lib_dict[h]
elif h in general_h_lib_dict:
lib_folder = general_h_lib_dict[h]
if lib_folder:
if not lib_folder in lib_folder_list:
lib_folder_list.append(lib_folder)
return lib_folder_list
def genBuildCppFile(build_folder, cur_project, arduino_info):
project_name = cur_project.getName()
cpp_file_name = project_name + '.ino.cpp'
cpp_file = os.path.join(build_folder, cpp_file_name)
ino_src_file_list = cur_project.getInoSrcFileList()
arduino_version = arduino_info.getVersion()
doMunge = not constant.sketch_settings.get('set_bare_gcc_only', False)
preprocess.genCppFileFromInoFileList(cpp_file, ino_src_file_list, arduino_version, preprocess=doMunge)
return cpp_file
def genIncludesPara(build_folder, project_folder, core_folder_list, compiler_include_folder):
folder_list = sketch.getFolderListFromFolderList(core_folder_list)
include_folder_list = []
include_folder_list.append(build_folder)
include_folder_list.append(project_folder)
include_folder_list.append(compiler_include_folder)
include_folder_list += folder_list
includes = ''
for include_folder in include_folder_list:
includes += '"-I%s" ' % include_folder
return includes
def getCompileCommand(c_file, args, includes_para):
build_folder = args['build.path']
file_name = os.path.split(c_file)[1]
file_ext = os.path.splitext(c_file)[1]
obj_file_name = file_name + '.o'
obj_file = os.path.join(build_folder, obj_file_name)
if file_ext in ['.S']:
command = args['recipe.S.o.pattern']
elif file_ext in ['.c']:
command = args['recipe.c.o.pattern']
else:
command = args['recipe.cpp.o.pattern']
command = command.replace('{includes}', includes_para)
command = command.replace('{source_file}', c_file)
command = command.replace('{object_file}', obj_file)
cur_command = Command(command)
cur_command.setInFile(c_file)
cur_command.setOutFile(obj_file)
return cur_command
def getCompileCommandList(c_file_list, args, includes_para):
command_list = []
for c_file in c_file_list:
cur_command = getCompileCommand(c_file, args, includes_para)
command_list.append(cur_command)
return command_list
def getArCommand(args, core_command_list):
build_folder = args['build.path']
archive_file_name = args['archive_file']
archive_file = os.path.join(build_folder, archive_file_name)
object_files = ''
for core_command in core_command_list:
core_obj_file = core_command.getOutFile()
object_files += '"%s" ' % core_obj_file
object_files = object_files[:-1]
command_text = args['recipe.ar.pattern']
command_text = command_text.replace('"{object_file}"', object_files)
ar_command = Command(command_text)
ar_command.setOutFile(archive_file)
return ar_command
def getElfCommand(args, project_command_list):
build_folder = args['build.path']
project_name = args['build.project_name']
elf_file_name = project_name + '.elf'
elf_file = os.path.join(build_folder, elf_file_name)
object_files = ''
for project_command in project_command_list:
project_obj_file = project_command.getOutFile()
object_files += '"%s" ' % project_obj_file
object_files = object_files[:-1]
command_text = args['recipe.c.combine.pattern']
command_text = command_text.replace('{object_files}', object_files)
elf_command = Command(command_text)
elf_command.setOutFile(elf_file)
return elf_command
def getEepCommand(args):
build_folder = args['build.path']
project_name = args['build.project_name']
eep_file_name = project_name + '.eep'
eep_file = os.path.join(build_folder, eep_file_name)
command_text = args['recipe.objcopy.eep.pattern']
eep_command = Command(command_text)
eep_command.setOutFile(eep_file)
return eep_command
def getHexCommand(args):
command_text = args['recipe.objcopy.hex.pattern']
hex_command = Command(command_text)
build_folder = args['build.path']
project_name = args['build.project_name']
ext = command_text[-5:-1]
hex_file_name = project_name + ext
hex_file = os.path.join(build_folder, hex_file_name)
hex_command.setOutFile(hex_file)
return hex_command
def getSizeCommand(args):
command_text = args['recipe.size.pattern']
command_text = command_text.replace('-A', '')
command_text = command_text.replace('.hex', '.elf')
size_command = Command(command_text)
size_command.setSizeCommand()
return size_command
def genCommandList(args, cur_project, arduino_info):
build_folder = args['build.path']
project_folder = cur_project.getFolder()
build_cpp_file = genBuildCppFile(build_folder, cur_project, arduino_info)
build_core_folder = args['build.core_folder']
build_variant_folder = args['build.variant.path']
lib_folder_list = getLibFolderListFromProject(cur_project, arduino_info)
core_folder_list = [build_core_folder, build_variant_folder] + lib_folder_list
compiler_bin_folder = args['compiler.path']
compiler_folder = os.path.split(compiler_bin_folder)[0]
compiler_folder = os.path.split(compiler_folder)[0]
compiler_name = os.path.split(compiler_folder)[1]
compiler_folder = os.path.join(compiler_folder, compiler_name)
compiler_include_folder = os.path.join(compiler_folder, 'include')
compiler_include_folder = compiler_include_folder.replace('/', os.path.sep)
# core_folder_list.append(compiler_include_folder)
includes_para = genIncludesPara(build_folder, project_folder, core_folder_list, compiler_include_folder)
project_C_file_list = [build_cpp_file] + cur_project.getCSrcFileList() + cur_project.getAsmSrcFileList()
core_C_file_list = sketch.getCSrcFileListFromFolderList(core_folder_list) + sketch.getAsmSrcFileListFromFolderList(core_folder_list)
project_command_list = getCompileCommandList(project_C_file_list, args, includes_para)
core_command_list = getCompileCommandList(core_C_file_list, args, includes_para)
ar_command = getArCommand(args, core_command_list)
elf_command = getElfCommand(args, project_command_list)
eep_command = getEepCommand(args)
hex_command = getHexCommand(args)
size_command = getSizeCommand(args)
full_compilation = constant.sketch_settings.get('full_compilation', True)
archive_file_name = args['archive_file']
archive_file = os.path.join(build_folder, archive_file_name)
if not os.path.isfile(archive_file):
full_compilation = True
command_list = []
command_list += project_command_list
if full_compilation:
if os.path.isfile(archive_file):
os.remove(archive_file)
command_list += core_command_list
command_list.append(ar_command)
command_list.append(elf_command)
if args['recipe.objcopy.eep.pattern']:
command_list.append(eep_command)
command_list.append(hex_command)
command_list.append(size_command)
return command_list
def getCommandList(cur_project, arduino_info):
command_list = []
args = getFullArgs(cur_project, arduino_info)
if args:
command_list = genCommandList(args, cur_project, arduino_info)
return command_list
def printSizeInfo(output_console, stdout, args):
flash_size_key = 'upload.maximum_size'
ram_size_key = 'upload.maximum_ram_size'
max_flash_size = int(args[flash_size_key])
max_ram_size = int(args[ram_size_key])
size_line = stdout.split('\n')[-2].strip()
info_list = re.findall(r'\S+', size_line)
text_size = int(info_list[0])
data_size = int(info_list[1])
bss_size = int(info_list[2])
flash_size = text_size + data_size
ram_size = data_size + bss_size
flash_percent = float(flash_size) / max_flash_size * 100
text = 'Binary sketch size: %d bytes (of a %d byte maximum, %.2f percent).\n' % (flash_size, max_flash_size, flash_percent)
if max_ram_size > 0:
ram_percent = float(ram_size) / max_ram_size * 100
text += 'Estimated memory use: %d bytes (of a %d byte maximum, %.2f percent).\n' % (ram_size, max_ram_size, ram_percent)
output_console.printText(text)
def formatCommand(command):
if constant.sys_version < 3:
if constant.sys_platform == 'windows':
command = command.replace('/"', '"')
command = command.replace('/', os.path.sep)
command = '"' + command + '"'
if constant.sys_version < 3:
if isinstance(command, unicode):
command = command.encode(constant.sys_encoding)
return command
| kierangraham/dotfiles | Sublime/Packages/Arduino-like IDE/app/compiler.py | Python | bsd-2-clause | 24,678 |
# testParser.py
# -------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import re
import sys
class TestParser(object):
def __init__(self, path):
# save the path to the test file
self.path = path
def removeComments(self, rawlines):
# remove any portion of a line following a '#' symbol
fixed_lines = []
for l in rawlines:
idx = l.find('#')
if idx == -1:
fixed_lines.append(l)
else:
fixed_lines.append(l[0:idx])
return '\n'.join(fixed_lines)
def parse(self):
# read in the test case and remove comments
test = {}
with open(self.path) as handle:
raw_lines = handle.read().split('\n')
test_text = self.removeComments(raw_lines)
test['__raw_lines__'] = raw_lines
test['path'] = self.path
test['__emit__'] = []
lines = test_text.split('\n')
i = 0
# read a property in each loop cycle
while(i < len(lines)):
# skip blank lines
if re.match('\A\s*\Z', lines[i]):
test['__emit__'].append(("raw", raw_lines[i]))
i += 1
continue
m = re.match('\A([^"]*?):\s*"([^"]*)"\s*\Z', lines[i])
if m:
test[m.group(1)] = m.group(2)
test['__emit__'].append(("oneline", m.group(1)))
i += 1
continue
m = re.match('\A([^"]*?):\s*"""\s*\Z', lines[i])
if m:
msg = []
i += 1
while(not re.match('\A\s*"""\s*\Z', lines[i])):
msg.append(raw_lines[i])
i += 1
test[m.group(1)] = '\n'.join(msg)
test['__emit__'].append(("multiline", m.group(1)))
i += 1
continue
print 'error parsing test file: %s' % self.path
sys.exit(1)
return test
def emitTestDict(testDict, handle):
for kind, data in testDict['__emit__']:
if kind == "raw":
handle.write(data + "\n")
elif kind == "oneline":
handle.write('%s: "%s"\n' % (data, testDict[data]))
elif kind == "multiline":
handle.write('%s: """\n%s\n"""\n' % (data, testDict[data]))
else:
raise Exception("Bad __emit__")
| naderm/cs188 | p5/classification/testParser.py | Python | bsd-3-clause | 3,041 |
from models import Marker
from static.pymapcluster import calculate_clusters
import time
import logging
import concurrent.futures
import multiprocessing
def retrieve_clusters(**kwargs):
marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])
result_futures = []
logging.info('number of cores: ' + str(multiprocessing.cpu_count()))
with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
for marker_box in marker_boxes:
kwargs.update(marker_box)
markers_in_box = Marker.bounding_box_query(**kwargs).all()
result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))
completed_futures = concurrent.futures.wait(result_futures)
result = []
for future in completed_futures.done:
result.extend(future.result())
return result
def divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):
cpu_count = multiprocessing.cpu_count()
lat_box_size = (ne_lat - sw_lat) / cpu_count
# lng_box_size = (sw_lng - ne_lng) / cpu_count
boxes = []
for i in xrange(cpu_count):
# TODO: the below calculation is using sw_lat as first param instead of ne_lat. Plz verify my fix for that:
# boxes.append((sw_lat + (i + 1) * lat_box_size, ne_lng, sw_lat + i * lat_box_size, sw_lng))
boxes.append({'ne_lat': ne_lat + (i + 1) * lat_box_size, 'ne_lng': ne_lng,
'sw_lat': sw_lat + i * lat_box_size, 'sw_lng': sw_lng})
return boxes
| HamutalCohen3/anyway | clusters_calculator.py | Python | bsd-3-clause | 1,569 |
# coding: utf-8
import string
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import pytest
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.plotting.common import TestPlotBase
"""
Test cases for plot functions imported from deprecated
pandas.tools.plotting
"""
@td.skip_if_no_mpl
class TestDeprecatedNameSpace(TestPlotBase):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_scatter_plot_legacy(self):
df = pd.DataFrame(randn(100, 2))
with tm.assert_produces_warning(FutureWarning):
plotting.scatter_matrix(df)
with tm.assert_produces_warning(FutureWarning):
pd.scatter_matrix(df)
@pytest.mark.slow
def test_boxplot_deprecated(self):
df = pd.DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
with tm.assert_produces_warning(FutureWarning):
plotting.boxplot(df, column=['one', 'two'],
by='indic')
@pytest.mark.slow
def test_radviz_deprecated(self, iris):
with tm.assert_produces_warning(FutureWarning):
plotting.radviz(frame=iris, class_column='Name')
@pytest.mark.slow
def test_plot_params(self):
with tm.assert_produces_warning(FutureWarning):
pd.plot_params['xaxis.compat'] = True
| cython-testbed/pandas | pandas/tests/plotting/test_deprecated.py | Python | bsd-3-clause | 1,513 |
#!/usr/bin/env python
r"""
Example of solving Laplace's equation on a block domain refined with level 1
hanging nodes.
The domain is progressively refined towards the edge/face of the block, where
Dirichlet boundary conditions are prescribed by an oscillating function.
Find :math:`u` such that:
.. math::
\int_{\Omega} \nabla v \cdot \nabla u = 0
\;, \quad \forall s \;.
Notes
-----
The implementation of the mesh refinement with level 1 hanging nodes is a
proof-of-concept code with many unresolved issues. The main problem is the fact
that a user needs to input the cells to refine at each level, while taking care
of the following constraints:
- the level 1 hanging nodes constraint: a cell that has a less-refined
neighbour cannot be refined;
- the implementation constraint: a cell with a refined neighbour cannot be
refined.
The hanging nodes are treated by a basis transformation/DOF substitution, which
has to be applied explicitly by the user:
- call ``field.substitute_dofs(subs)`` before assembling and solving;
- then call ``field.restore_dofs()`` before saving results.
Usage Examples
--------------
Default options, 2D, storing results in 'output' directory::
$ python examples/diffusion/laplace_refine_interactive.py output
$ python postproc.py output/hanging.vtk --wireframe -b -d'u,plot_warp_scalar'
Default options, 3D, storing results in 'output' directory::
$ python examples/diffusion/laplace_refine_interactive.py -3 output
$ python postproc.py output/hanging.vtk --wireframe -b --3d
Finer initial domain, 2D, storing results in 'output' directory::
$ python examples/diffusion/laplace_refine_interactive.py --shape=11,11 output
$ python postproc.py output/hanging.vtk --wireframe -b -d'u,plot_warp_scalar'
Bi-quadratic approximation, 2D, storing results in 'output' directory::
$ python examples/diffusion/laplace_refine_interactive.py --order=2 output
# View solution with higher order DOFs removed.
$ python postproc.py output/hanging.vtk --wireframe -b -d'u,plot_warp_scalar'
# View full solution on a mesh adapted for visualization.
$ python postproc.py output/hanging_u.vtk --wireframe -b -d'u,plot_warp_scalar'
"""
from __future__ import absolute_import
from argparse import RawDescriptionHelpFormatter, ArgumentParser
import os
import sys
sys.path.append('.')
import numpy as nm
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import ensure_path
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.discrete import (FieldVariable, Integral, Equation, Equations,
Function, Problem)
from sfepy.discrete.fem import FEDomain, Field
from sfepy.discrete.conditions import (Conditions, EssentialBC)
import sfepy.discrete.fem.refine_hanging as rh
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.terms import Term
def refine_towards_facet(domain0, grading, axis):
subs = None
domain = domain0
for level, coor in enumerate(grading):
refine = nm.zeros(domain.mesh.n_el, dtype=nm.uint8)
region = domain.create_region('aux',
'vertices in (%s %.10f)' % (axis, coor),
add_to_regions=False)
refine[region.cells] = 1
domain, subs = rh.refine(domain, refine, subs=subs)
return domain, subs
helps = {
'output_dir' :
'output directory',
'dims' :
'dimensions of the block [default: %(default)s]',
'shape' :
'shape (counts of nodes in x, y[, z]) of the block [default: %(default)s]',
'centre' :
'centre of the block [default: %(default)s]',
'3d' :
'generate a 3D block',
'order' :
'field approximation order',
}
def main():
parser = ArgumentParser(description=__doc__.rstrip(),
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('output_dir', help=helps['output_dir'])
parser.add_argument('--dims', metavar='dims',
action='store', dest='dims',
default='1.0,1.0,1.0', help=helps['dims'])
parser.add_argument('--shape', metavar='shape',
action='store', dest='shape',
default='7,7,7', help=helps['shape'])
parser.add_argument('--centre', metavar='centre',
action='store', dest='centre',
default='0.0,0.0,0.0', help=helps['centre'])
parser.add_argument('-3', '--3d',
action='store_true', dest='is_3d',
default=False, help=helps['3d'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=1, help=helps['order'])
options = parser.parse_args()
dim = 3 if options.is_3d else 2
dims = nm.array(eval(options.dims), dtype=nm.float64)[:dim]
shape = nm.array(eval(options.shape), dtype=nm.int32)[:dim]
centre = nm.array(eval(options.centre), dtype=nm.float64)[:dim]
output('dimensions:', dims)
output('shape: ', shape)
output('centre: ', centre)
mesh0 = gen_block_mesh(dims, shape, centre, name='block-fem',
verbose=True)
domain0 = FEDomain('d', mesh0)
bbox = domain0.get_mesh_bounding_box()
min_x, max_x = bbox[:, 0]
eps = 1e-8 * (max_x - min_x)
cnt = (shape[0] - 1) // 2
g0 = 0.5 * dims[0]
grading = nm.array([g0 / 2**ii for ii in range(cnt)]) + eps + centre[0] - g0
domain, subs = refine_towards_facet(domain0, grading, 'x <')
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in (x < %.10f)' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in (x > %.10f)' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 1, omega,
approx_order=options.order)
if subs is not None:
field.substitute_dofs(subs)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_laplace(v, u)',
integral, omega, v=v, u=u)
eq = Equation('eq', t1)
eqs = Equations([eq])
def u_fun(ts, coors, bc=None, problem=None):
"""
Define a displacement depending on the y coordinate.
"""
if coors.shape[1] == 2:
min_y, max_y = bbox[:, 1]
y = (coors[:, 1] - min_y) / (max_y - min_y)
val = (max_y - min_y) * nm.cos(3 * nm.pi * y)
else:
min_y, max_y = bbox[:, 1]
min_z, max_z = bbox[:, 2]
y = (coors[:, 1] - min_y) / (max_y - min_y)
z = (coors[:, 2] - min_z) / (max_z - min_z)
val = ((max_y - min_y) * (max_z - min_z)
* nm.cos(3 * nm.pi * y) * (1.0 + 3.0 * (z - 0.5)**2))
return val
bc_fun = Function('u_fun', u_fun)
fix1 = EssentialBC('shift_u', gamma1, {'u.0' : bc_fun})
fix2 = EssentialBC('fix2', gamma2, {'u.all' : 0.0})
ls = ScipyDirect({})
nls = Newton({}, lin_solver=ls)
pb = Problem('heat', equations=eqs)
pb.set_bcs(ebcs=Conditions([fix1, fix2]))
pb.set_solver(nls)
state = pb.solve(save_results=False)
if subs is not None:
field.restore_dofs()
filename = os.path.join(options.output_dir, 'hanging.vtk')
ensure_path(filename)
pb.save_state(filename, state)
if options.order > 1:
pb.save_state(filename, state, linearization=Struct(kind='adaptive',
min_level=0,
max_level=8,
eps=1e-3))
if __name__ == '__main__':
main()
| sfepy/sfepy | examples/diffusion/laplace_refine_interactive.py | Python | bsd-3-clause | 8,056 |
import re
from django import template
from django.core.exceptions import ImproperlyConfigured
from django.urls import NoReverseMatch
from django.utils.functional import cached_property
from wagtail.images.models import Filter
from wagtail.images.shortcuts import get_rendition_or_not_found
from wagtail.images.views.serve import generate_image_url
register = template.Library()
allowed_filter_pattern = re.compile(r"^[A-Za-z0-9_\-\.]+$")
@register.tag(name="image")
def image(parser, token):
bits = token.split_contents()[1:]
image_expr = parser.compile_filter(bits[0])
bits = bits[1:]
filter_specs = []
attrs = {}
output_var_name = None
as_context = False # if True, the next bit to be read is the output variable name
is_valid = True
for bit in bits:
if bit == 'as':
# token is of the form {% image self.photo max-320x200 as img %}
as_context = True
elif as_context:
if output_var_name is None:
output_var_name = bit
else:
# more than one item exists after 'as' - reject as invalid
is_valid = False
else:
try:
name, value = bit.split('=')
attrs[name] = parser.compile_filter(value) # setup to resolve context variables as value
except ValueError:
if allowed_filter_pattern.match(bit):
filter_specs.append(bit)
else:
raise template.TemplateSyntaxError(
"filter specs in 'image' tag may only contain A-Z, a-z, 0-9, dots, hyphens and underscores. "
"(given filter: {})".format(bit)
)
if as_context and output_var_name is None:
# context was introduced but no variable given ...
is_valid = False
if output_var_name and attrs:
# attributes are not valid when using the 'as img' form of the tag
is_valid = False
if len(filter_specs) == 0:
# there must always be at least one filter spec provided
is_valid = False
if len(bits) == 0:
# no resize rule provided eg. {% image page.image %}
raise template.TemplateSyntaxError(
"no resize rule provided. "
"'image' tag should be of the form {% image self.photo max-320x200 [ custom-attr=\"value\" ... ] %} "
"or {% image self.photo max-320x200 as img %}"
)
if is_valid:
return ImageNode(image_expr, '|'.join(filter_specs), attrs=attrs, output_var_name=output_var_name)
else:
raise template.TemplateSyntaxError(
"'image' tag should be of the form {% image self.photo max-320x200 [ custom-attr=\"value\" ... ] %} "
"or {% image self.photo max-320x200 as img %}"
)
class ImageNode(template.Node):
def __init__(self, image_expr, filter_spec, output_var_name=None, attrs={}):
self.image_expr = image_expr
self.output_var_name = output_var_name
self.attrs = attrs
self.filter_spec = filter_spec
@cached_property
def filter(self):
return Filter(spec=self.filter_spec)
def render(self, context):
try:
image = self.image_expr.resolve(context)
except template.VariableDoesNotExist:
return ''
if not image:
if self.output_var_name:
context[self.output_var_name] = None
return ''
if not hasattr(image, 'get_rendition'):
raise ValueError("image tag expected an Image object, got %r" % image)
rendition = get_rendition_or_not_found(image, self.filter)
if self.output_var_name:
# return the rendition object in the given variable
context[self.output_var_name] = rendition
return ''
else:
# render the rendition's image tag now
resolved_attrs = {}
for key in self.attrs:
resolved_attrs[key] = self.attrs[key].resolve(context)
return rendition.img_tag(resolved_attrs)
@register.simple_tag()
def image_url(image, filter_spec, viewname='wagtailimages_serve'):
try:
return generate_image_url(image, filter_spec, viewname)
except NoReverseMatch:
raise ImproperlyConfigured(
"'image_url' tag requires the " + viewname + " view to be configured. Please see "
"https://docs.wagtail.org/en/stable/advanced_topics/images/image_serve_view.html#setup for instructions."
)
| jnns/wagtail | wagtail/images/templatetags/wagtailimages_tags.py | Python | bsd-3-clause | 4,585 |
import string
# Manages Local "database" for ZBWarDrive:
# This keeps track of current ZBWarDrive and Sniffing Device State.
# It is different from the online logging database.
class ZBScanDB:
"""
API to interact with the "database" storing information
for the zbscanning program.
"""
def __init__(self):
self.channels = {11:None, 12:None, 13:None, 14:None, 15:None, 16:None, 17:None, 18:None, 19:None, 20:None, 21:None, 22:None, 23:None, 24:None, 25:None, 26:None}
# Devices is indexed by deviceId and stores a 4-tuple of device string, device serial, current status, and current channel
self.devices = {}
def close(self):
pass
# Add a new devices to the DB
def store_devices(self, devid, devstr, devserial):
self.devices[devid] = (devstr, devserial, 'Free', None)
# Returns the devid of a device marked 'Free',
# or None if there are no Free devices in the DB.
def get_devices_nextFree(self):
for devid, dev in self.devices.items():
if dev[2] == 'Free':
return devid
def update_devices_status(self, devid, newstatus):
if devid not in self.devices:
return None
(devstr, devserial, _, chan) = self.devices[devid]
self.devices[devid] = (devstr, devserial, newstatus, chan)
def update_devices_start_capture(self, devid, channel):
if devid not in self.devices:
return None
(devstr, devserial, _, _) = self.devices[devid]
self.devices[devid] = (devstr, devserial, "Capture", channel)
# Add a new network to the DB
def store_networks(self, key, spanid, source, channel, packet):
if channel not in self.channels:
return None
# TODO note this only stores the most recent in the channel
self.channels[channel] = (key, spanid, source, packet)
# Return the channel of the network identified by key,
# or None if it doesn't exist in the DB.
def get_networks_channel(self, key):
#print "Looking up channel for network with key of %s" % (key)
for chan, data in self.channels:
if data[0] == key: return chan
return None
def channel_status_logging(self, chan):
'''
Returns False if we have not seen the network or are not currently
logging it's channel, and returns True if we are currently logging it.
@return boolean
'''
if chan == None: raise Exception("None given for channel number")
elif chan not in self.channels: raise Exception("Invalid channel")
for dev in self.devices.values():
if dev[3] == chan and dev[2] == 'Capture':
return True
return False
# end of ZBScanDB class
def toHex(bin):
return ''.join(["%02x" % ord(x) for x in bin])
| JonathonReinhart/killerbee | killerbee/zbwardrive/db.py | Python | bsd-3-clause | 2,845 |
"""
count number of reads mapping to features of transcripts
"""
import os
import sys
import itertools
import pandas as pd
import gffutils
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio import bam
import bcbio.pipeline.datadict as dd
def combine_count_files(files, out_file=None, ext=".fpkm"):
"""
combine a set of count files into a single combined file
"""
assert all([file_exists(x) for x in files]), \
"Some count files in %s do not exist." % files
for f in files:
assert file_exists(f), "%s does not exist or is empty." % f
col_names = [os.path.basename(x.replace(ext, "")) for x in files]
if not out_file:
out_dir = os.path.join(os.path.dirname(files[0]))
out_file = os.path.join(out_dir, "combined.counts")
if file_exists(out_file):
return out_file
for i, f in enumerate(files):
if i == 0:
df = pd.io.parsers.read_table(f, sep="\t", index_col=0, header=None,
names=[col_names[0]])
else:
df = df.join(pd.io.parsers.read_table(f, sep="\t", index_col=0,
header=None,
names=[col_names[i]]))
df.to_csv(out_file, sep="\t", index_label="id")
return out_file
def annotate_combined_count_file(count_file, gtf_file, out_file=None):
dbfn = gtf_file + ".db"
if not file_exists(dbfn):
return None
if not gffutils:
return None
db = gffutils.FeatureDB(dbfn, keep_order=True)
if not out_file:
out_dir = os.path.dirname(count_file)
out_file = os.path.join(out_dir, "annotated_combined.counts")
# if the genes don't have a gene_id or gene_name set, bail out
try:
symbol_lookup = {f['gene_id'][0]: f['gene_name'][0] for f in
db.features_of_type('exon')}
except KeyError:
return None
df = pd.io.parsers.read_table(count_file, sep="\t", index_col=0, header=0)
df['symbol'] = df.apply(lambda x: symbol_lookup.get(x.name, ""), axis=1)
df.to_csv(out_file, sep="\t", index_label="id")
return out_file
| Cyberbio-Lab/bcbio-nextgen | bcbio/rnaseq/count.py | Python | mit | 2,271 |
import braintree
from braintree.resource import Resource
class UnknownPaymentMethod(Resource):
def image_url(self):
return "https://assets.braintreegateway.com/payment_method_logo/unknown.png"
| DiptoDas8/Biponi | lib/python2.7/site-packages/braintree/unknown_payment_method.py | Python | mit | 206 |
from glad.lang.common.loader import BaseLoader
from glad.lang.d.loader import LOAD_OPENGL_DLL
_OPENGL_LOADER = \
LOAD_OPENGL_DLL % {'pre':'private', 'init':'open_gl',
'proc':'get_proc', 'terminate':'close_gl'} + '''
bool gladLoadGL() {
bool status = false;
if(open_gl()) {
status = gladLoadGL(x => get_proc(x));
close_gl();
}
return status;
}
'''
_OPENGL_HAS_EXT = '''
static struct GLVersion { static int major = 0; static int minor = 0; }
private extern(C) char* strstr(const(char)*, const(char)*) @nogc;
private extern(C) int strcmp(const(char)*, const(char)*) @nogc;
private extern(C) int strncmp(const(char)*, const(char)*, size_t) @nogc;
private extern(C) size_t strlen(const(char)*) @nogc;
private bool has_ext(const(char)* ext) @nogc {
if(GLVersion.major < 3) {
const(char)* extensions = cast(const(char)*)glGetString(GL_EXTENSIONS);
const(char)* loc;
const(char)* terminator;
if(extensions is null || ext is null) {
return false;
}
while(1) {
loc = strstr(extensions, ext);
if(loc is null) {
return false;
}
terminator = loc + strlen(ext);
if((loc is extensions || *(loc - 1) == ' ') &&
(*terminator == ' ' || *terminator == '\\0')) {
return true;
}
extensions = terminator;
}
} else {
int num;
glGetIntegerv(GL_NUM_EXTENSIONS, &num);
for(uint i=0; i < cast(uint)num; i++) {
if(strcmp(cast(const(char)*)glGetStringi(GL_EXTENSIONS, i), ext) == 0) {
return true;
}
}
}
return false;
}
'''
_FIND_VERSION = '''
// Thank you @elmindreda
// https://github.com/elmindreda/greg/blob/master/templates/greg.c.in#L176
// https://github.com/glfw/glfw/blob/master/src/context.c#L36
int i;
const(char)* glversion;
const(char)*[] prefixes = [
"OpenGL ES-CM ".ptr,
"OpenGL ES-CL ".ptr,
"OpenGL ES ".ptr,
];
glversion = cast(const(char)*)glGetString(GL_VERSION);
if (glversion is null) return;
foreach(prefix; prefixes) {
size_t length = strlen(prefix);
if (strncmp(glversion, prefix, length) == 0) {
glversion += length;
break;
}
}
int major = glversion[0] - \'0\';
int minor = glversion[2] - \'0\';
GLVersion.major = major; GLVersion.minor = minor;
'''
class OpenGLDLoader(BaseLoader):
def write_header_end(self, fobj):
pass
def write_header(self, fobj):
pass
def write(self, fobj):
fobj.write('alias Loader = void* delegate(const(char)*);\n')
if not self.disabled and 'gl' in self.apis:
fobj.write(_OPENGL_LOADER)
def write_begin_load(self, fobj):
fobj.write('\tglGetString = cast(typeof(glGetString))load("glGetString");\n')
fobj.write('\tif(glGetString is null) { return false; }\n')
fobj.write('\tif(glGetString(GL_VERSION) is null) { return false; }\n\n')
def write_end_load(self, fobj):
fobj.write('\treturn GLVersion.major != 0 || GLVersion.minor != 0;\n')
def write_find_core(self, fobj):
fobj.write(_FIND_VERSION)
def write_has_ext(self, fobj):
fobj.write(_OPENGL_HAS_EXT)
| bsmr-opengl/glad | glad/lang/d/loader/gl.py | Python | mit | 3,394 |
print('bar-package grok!')
| tuanavu/python-cookbook-3rd | src/10/making_separate_directories_import_under_a_common_namespace/bar-package/spam/grok.py | Python | mit | 27 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class IntegrationAccountMapPaged(Paged):
"""
A paging container for iterating over a list of IntegrationAccountMap object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[IntegrationAccountMap]'}
}
def __init__(self, *args, **kwargs):
super(IntegrationAccountMapPaged, self).__init__(*args, **kwargs)
| lmazuel/azure-sdk-for-python | azure-mgmt-logic/azure/mgmt/logic/models/integration_account_map_paged.py | Python | mit | 926 |
from __future__ import absolute_import
from __future__ import print_function
import theano
import theano.tensor as T
import numpy as np
import time, json, warnings
from collections import deque
from .utils.generic_utils import Progbar
class CallbackList(object):
def __init__(self, callbacks=[], queue_length=10):
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def _set_params(self, params):
for callback in self.callbacks:
callback._set_params(params)
def _set_model(self, model):
for callback in self.callbacks:
callback._set_model(model)
def on_epoch_begin(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs={}):
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1:
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs={}):
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1:
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs={}):
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs={}):
for callback in self.callbacks:
callback.on_train_end(logs)
class Callback(object):
def __init__(self):
pass
def _set_params(self, params):
self.params = params
def _set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs={}):
pass
def on_epoch_end(self, epoch, logs={}):
pass
def on_batch_begin(self, batch, logs={}):
pass
def on_batch_end(self, batch, logs={}):
pass
def on_train_begin(self, logs={}):
pass
def on_train_end(self, logs={}):
pass
class BaseLogger(Callback):
def on_train_begin(self, logs={}):
self.verbose = self.params['verbose']
def on_epoch_begin(self, epoch, logs={}):
if self.verbose:
print('Epoch %d' % epoch)
self.progbar = Progbar(target=self.params['nb_sample'],
verbose=self.verbose)
self.seen = 0
self.totals = {}
def on_batch_begin(self, batch, logs={}):
if self.seen < self.params['nb_sample']:
self.log_values = []
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# skip progbar update for the last batch; will be handled by on_epoch_end
if self.verbose and self.seen < self.params['nb_sample']:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs={}):
for k in self.params['metrics']:
if k in self.totals:
self.log_values.append((k, self.totals[k] / self.seen))
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
class History(Callback):
def on_train_begin(self, logs={}):
self.epoch = []
self.history = {}
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs={}):
self.epoch.append(epoch)
for k, v in self.totals.items():
if k not in self.history:
self.history[k] = []
self.history[k].append(v / self.seen)
for k, v in logs.items():
if k not in self.history:
self.history[k] = []
self.history[k].append(v)
class ModelCheckpoint(Callback):
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False):
super(Callback, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.best = np.Inf
def on_epoch_end(self, epoch, logs={}):
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn("Can save best model only with %s available, skipping." % (self.monitor), RuntimeWarning)
else:
if current < self.best:
if self.verbose > 0:
print("Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %s"
% (epoch, self.monitor, self.best, current, self.filepath))
self.best = current
self.model.save_weights(self.filepath, overwrite=True)
else:
if self.verbose > 0:
print("Epoch %05d: %s did not improve" % (epoch, self.monitor))
else:
if self.verbose > 0:
print("Epoch %05d: saving model to %s" % (epoch, self.filepath))
self.model.save_weights(self.filepath, overwrite=True)
class EarlyStopping(Callback):
def __init__(self, monitor='val_loss', patience=0, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.best = np.Inf
self.wait = 0
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % (self.monitor), RuntimeWarning)
if current < self.best:
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
if self.verbose > 0:
print("Epoch %05d: early stopping" % (epoch))
self.model.stop_training = True
self.wait += 1
class RemoteMonitor(Callback):
def __init__(self, root='http://localhost:9000'):
self.root = root
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs={}):
import requests
send = {}
send['epoch'] = epoch
for k, v in self.totals.items():
send[k] = v / self.seen
for k, v in self.logs:
send[k] = v
r = requests.post(self.root + '/publish/epoch/end/', {'data': json.dumps(send)})
| zhangxujinsh/keras | keras/callbacks.py | Python | mit | 8,643 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class WikispiderPipeline(object):
def process_item(self, item, spider):
return item
| xu6148152/Binea_Python_Project | wikiSpider/wikiSpider/pipelines.py | Python | mit | 290 |
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
def register_types(module):
root_module = module.get_root()
## simple-ref-count.h: ns3::SimpleRefCount<ns3::dot11s::IeBeaconTimingUnit, ns3::empty, ns3::DefaultDeleter<ns3::dot11s::IeBeaconTimingUnit> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::dot11s::IeBeaconTimingUnit', 'ns3::empty', 'ns3::DefaultDeleter<ns3::dot11s::IeBeaconTimingUnit>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace addressUtils
nested_module = module.add_cpp_namespace('addressUtils')
register_types_ns3_addressUtils(nested_module)
## Register a nested module for the namespace aodv
nested_module = module.add_cpp_namespace('aodv')
register_types_ns3_aodv(nested_module)
## Register a nested module for the namespace dot11s
nested_module = module.add_cpp_namespace('dot11s')
register_types_ns3_dot11s(nested_module)
## Register a nested module for the namespace flame
nested_module = module.add_cpp_namespace('flame')
register_types_ns3_flame(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_addressUtils(module):
root_module = module.get_root()
def register_types_ns3_aodv(module):
root_module = module.get_root()
def register_types_ns3_dot11s(module):
root_module = module.get_root()
## ie-dot11s-configuration.h: ns3::dot11s::dot11sCongestionControlMode [enumeration]
module.add_enum('dot11sCongestionControlMode', ['CONGESTION_SIGNALING', 'CONGESTION_NULL'])
## ie-dot11s-configuration.h: ns3::dot11s::dot11sAuthenticationProtocol [enumeration]
module.add_enum('dot11sAuthenticationProtocol', ['AUTH_NULL', 'AUTH_SAE'])
## ie-dot11s-configuration.h: ns3::dot11s::dot11sPathSelectionProtocol [enumeration]
module.add_enum('dot11sPathSelectionProtocol', ['PROTOCOL_HWMP'])
## ie-dot11s-configuration.h: ns3::dot11s::dot11sSynchronizationProtocolIdentifier [enumeration]
module.add_enum('dot11sSynchronizationProtocolIdentifier', ['SYNC_NEIGHBOUR_OFFSET', 'SYNC_NULL'])
## ie-dot11s-peer-management.h: ns3::dot11s::PmpReasonCode [enumeration]
module.add_enum('PmpReasonCode', ['REASON11S_PEERING_CANCELLED', 'REASON11S_MESH_MAX_PEERS', 'REASON11S_MESH_CAPABILITY_POLICY_VIOLATION', 'REASON11S_MESH_CLOSE_RCVD', 'REASON11S_MESH_MAX_RETRIES', 'REASON11S_MESH_CONFIRM_TIMEOUT', 'REASON11S_MESH_INVALID_GTK', 'REASON11S_MESH_INCONSISTENT_PARAMETERS', 'REASON11S_MESH_INVALID_SECURITY_CAPABILITY', 'REASON11S_RESERVED'])
## ie-dot11s-configuration.h: ns3::dot11s::dot11sPathSelectionMetric [enumeration]
module.add_enum('dot11sPathSelectionMetric', ['METRIC_AIRTIME'])
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability [class]
module.add_class('Dot11sMeshCapability')
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol [class]
module.add_class('HwmpProtocol', parent=root_module['ns3::MeshL2RoutingProtocol'])
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::FailedDestination [struct]
module.add_class('FailedDestination', outer_class=root_module['ns3::dot11s::HwmpProtocol'])
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTiming [class]
module.add_class('IeBeaconTiming', parent=root_module['ns3::WifiInformationElement'])
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTimingUnit [class]
module.add_class('IeBeaconTimingUnit', parent=root_module['ns3::SimpleRefCount< ns3::dot11s::IeBeaconTimingUnit, ns3::empty, ns3::DefaultDeleter<ns3::dot11s::IeBeaconTimingUnit> >'])
## ie-dot11s-configuration.h: ns3::dot11s::IeConfiguration [class]
module.add_class('IeConfiguration', parent=root_module['ns3::WifiInformationElement'])
## ie-dot11s-id.h: ns3::dot11s::IeMeshId [class]
module.add_class('IeMeshId', parent=root_module['ns3::WifiInformationElement'])
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdChecker [class]
module.add_class('IeMeshIdChecker', parent=root_module['ns3::AttributeChecker'])
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdValue [class]
module.add_class('IeMeshIdValue', parent=root_module['ns3::AttributeValue'])
## ie-dot11s-peer-management.h: ns3::dot11s::IePeerManagement [class]
module.add_class('IePeerManagement', parent=root_module['ns3::WifiInformationElement'])
## ie-dot11s-peer-management.h: ns3::dot11s::IePeerManagement::Subtype [enumeration]
module.add_enum('Subtype', ['PEER_OPEN', 'PEER_CONFIRM', 'PEER_CLOSE'], outer_class=root_module['ns3::dot11s::IePeerManagement'])
## peer-link.h: ns3::dot11s::PeerLink [class]
module.add_class('PeerLink', parent=root_module['ns3::Object'])
## peer-link.h: ns3::dot11s::PeerLink::PeerState [enumeration]
module.add_enum('PeerState', ['IDLE', 'OPN_SNT', 'CNF_RCVD', 'OPN_RCVD', 'ESTAB', 'HOLDING'], outer_class=root_module['ns3::dot11s::PeerLink'])
## peer-management-protocol.h: ns3::dot11s::PeerManagementProtocol [class]
module.add_class('PeerManagementProtocol', parent=root_module['ns3::Object'])
def register_types_ns3_flame(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_olsr(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Dot11sDot11sMeshCapability_methods(root_module, root_module['ns3::dot11s::Dot11sMeshCapability'])
register_Ns3Dot11sHwmpProtocol_methods(root_module, root_module['ns3::dot11s::HwmpProtocol'])
register_Ns3Dot11sHwmpProtocolFailedDestination_methods(root_module, root_module['ns3::dot11s::HwmpProtocol::FailedDestination'])
register_Ns3Dot11sIeBeaconTiming_methods(root_module, root_module['ns3::dot11s::IeBeaconTiming'])
register_Ns3Dot11sIeBeaconTimingUnit_methods(root_module, root_module['ns3::dot11s::IeBeaconTimingUnit'])
register_Ns3Dot11sIeConfiguration_methods(root_module, root_module['ns3::dot11s::IeConfiguration'])
register_Ns3Dot11sIeMeshId_methods(root_module, root_module['ns3::dot11s::IeMeshId'])
register_Ns3Dot11sIeMeshIdChecker_methods(root_module, root_module['ns3::dot11s::IeMeshIdChecker'])
register_Ns3Dot11sIeMeshIdValue_methods(root_module, root_module['ns3::dot11s::IeMeshIdValue'])
register_Ns3Dot11sIePeerManagement_methods(root_module, root_module['ns3::dot11s::IePeerManagement'])
register_Ns3Dot11sPeerLink_methods(root_module, root_module['ns3::dot11s::PeerLink'])
register_Ns3Dot11sPeerManagementProtocol_methods(root_module, root_module['ns3::dot11s::PeerManagementProtocol'])
return
def register_Ns3Dot11sDot11sMeshCapability_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::Dot11sMeshCapability(ns3::dot11s::Dot11sMeshCapability const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::Dot11sMeshCapability const &', 'arg0')])
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::Dot11sMeshCapability() [constructor]
cls.add_constructor([])
## ie-dot11s-configuration.h: ns3::Buffer::Iterator ns3::dot11s::Dot11sMeshCapability::Deserialize(ns3::Buffer::Iterator i) [member function]
cls.add_method('Deserialize',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'i')])
## ie-dot11s-configuration.h: uint8_t ns3::dot11s::Dot11sMeshCapability::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint8_t',
[],
is_const=True)
## ie-dot11s-configuration.h: uint16_t ns3::dot11s::Dot11sMeshCapability::GetUint16() const [member function]
cls.add_method('GetUint16',
'uint16_t',
[],
is_const=True)
## ie-dot11s-configuration.h: bool ns3::dot11s::Dot11sMeshCapability::Is(uint16_t cap, uint8_t n) const [member function]
cls.add_method('Is',
'bool',
[param('uint16_t', 'cap'), param('uint8_t', 'n')],
is_const=True)
## ie-dot11s-configuration.h: ns3::Buffer::Iterator ns3::dot11s::Dot11sMeshCapability::Serialize(ns3::Buffer::Iterator i) const [member function]
cls.add_method('Serialize',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'i')],
is_const=True)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::MCCAEnabled [variable]
cls.add_instance_attribute('MCCAEnabled', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::MCCASupported [variable]
cls.add_instance_attribute('MCCASupported', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::TBTTAdjustment [variable]
cls.add_instance_attribute('TBTTAdjustment', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::acceptPeerLinks [variable]
cls.add_instance_attribute('acceptPeerLinks', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::beaconTimingReport [variable]
cls.add_instance_attribute('beaconTimingReport', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::forwarding [variable]
cls.add_instance_attribute('forwarding', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::powerSaveLevel [variable]
cls.add_instance_attribute('powerSaveLevel', 'bool', is_const=False)
return
def register_Ns3Dot11sHwmpProtocol_methods(root_module, cls):
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::HwmpProtocol() [constructor]
cls.add_constructor([])
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## hwmp-protocol.h: static ns3::TypeId ns3::dot11s::HwmpProtocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## hwmp-protocol.h: bool ns3::dot11s::HwmpProtocol::Install(ns3::Ptr<ns3::MeshPointDevice> arg0) [member function]
cls.add_method('Install',
'bool',
[param('ns3::Ptr< ns3::MeshPointDevice >', 'arg0')])
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::PeerLinkStatus(ns3::Mac48Address meshPontAddress, ns3::Mac48Address peerAddress, uint32_t interface, bool status) [member function]
cls.add_method('PeerLinkStatus',
'void',
[param('ns3::Mac48Address', 'meshPontAddress'), param('ns3::Mac48Address', 'peerAddress'), param('uint32_t', 'interface'), param('bool', 'status')])
## hwmp-protocol.h: bool ns3::dot11s::HwmpProtocol::RemoveRoutingStuff(uint32_t fromIface, ns3::Mac48Address const source, ns3::Mac48Address const destination, ns3::Ptr<ns3::Packet> packet, uint16_t & protocolType) [member function]
cls.add_method('RemoveRoutingStuff',
'bool',
[param('uint32_t', 'fromIface'), param('ns3::Mac48Address const', 'source'), param('ns3::Mac48Address const', 'destination'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('uint16_t &', 'protocolType')],
is_virtual=True)
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::Report(std::ostream & arg0) const [member function]
cls.add_method('Report',
'void',
[param('std::ostream &', 'arg0')],
is_const=True)
## hwmp-protocol.h: bool ns3::dot11s::HwmpProtocol::RequestRoute(uint32_t sourceIface, ns3::Mac48Address const source, ns3::Mac48Address const destination, ns3::Ptr<ns3::Packet const> packet, uint16_t protocolType, ns3::Callback<void, bool, ns3::Ptr<ns3::Packet>, ns3::Mac48Address, ns3::Mac48Address, unsigned short, unsigned int, ns3::empty, ns3::empty, ns3::empty> routeReply) [member function]
cls.add_method('RequestRoute',
'bool',
[param('uint32_t', 'sourceIface'), param('ns3::Mac48Address const', 'source'), param('ns3::Mac48Address const', 'destination'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocolType'), param('ns3::Callback< void, bool, ns3::Ptr< ns3::Packet >, ns3::Mac48Address, ns3::Mac48Address, unsigned short, unsigned int, ns3::empty, ns3::empty, ns3::empty >', 'routeReply')],
is_virtual=True)
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::ResetStats() [member function]
cls.add_method('ResetStats',
'void',
[])
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::SetNeighboursCallback(ns3::Callback<std::vector<ns3::Mac48Address, std::allocator<ns3::Mac48Address> >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetNeighboursCallback',
'void',
[param('ns3::Callback< std::vector< ns3::Mac48Address >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::SetRoot() [member function]
cls.add_method('SetRoot',
'void',
[])
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::UnsetRoot() [member function]
cls.add_method('UnsetRoot',
'void',
[])
return
def register_Ns3Dot11sHwmpProtocolFailedDestination_methods(root_module, cls):
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::FailedDestination::FailedDestination() [constructor]
cls.add_constructor([])
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::FailedDestination::FailedDestination(ns3::dot11s::HwmpProtocol::FailedDestination const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::HwmpProtocol::FailedDestination const &', 'arg0')])
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::FailedDestination::destination [variable]
cls.add_instance_attribute('destination', 'ns3::Mac48Address', is_const=False)
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::FailedDestination::seqnum [variable]
cls.add_instance_attribute('seqnum', 'uint32_t', is_const=False)
return
def register_Ns3Dot11sIeBeaconTiming_methods(root_module, cls):
cls.add_output_stream_operator()
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTiming::IeBeaconTiming(ns3::dot11s::IeBeaconTiming const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeBeaconTiming const &', 'arg0')])
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTiming::IeBeaconTiming() [constructor]
cls.add_constructor([])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTiming::AddNeighboursTimingElementUnit(uint16_t aid, ns3::Time last_beacon, ns3::Time beacon_interval) [member function]
cls.add_method('AddNeighboursTimingElementUnit',
'void',
[param('uint16_t', 'aid'), param('ns3::Time', 'last_beacon'), param('ns3::Time', 'beacon_interval')])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTiming::ClearTimingElement() [member function]
cls.add_method('ClearTimingElement',
'void',
[])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTiming::DelNeighboursTimingElementUnit(uint16_t aid, ns3::Time last_beacon, ns3::Time beacon_interval) [member function]
cls.add_method('DelNeighboursTimingElementUnit',
'void',
[param('uint16_t', 'aid'), param('ns3::Time', 'last_beacon'), param('ns3::Time', 'beacon_interval')])
## ie-dot11s-beacon-timing.h: uint8_t ns3::dot11s::IeBeaconTiming::DeserializeInformationField(ns3::Buffer::Iterator i, uint8_t length) [member function]
cls.add_method('DeserializeInformationField',
'uint8_t',
[param('ns3::Buffer::Iterator', 'i'), param('uint8_t', 'length')],
is_virtual=True)
## ie-dot11s-beacon-timing.h: ns3::WifiInformationElementId ns3::dot11s::IeBeaconTiming::ElementId() const [member function]
cls.add_method('ElementId',
'ns3::WifiInformationElementId',
[],
is_const=True, is_virtual=True)
## ie-dot11s-beacon-timing.h: uint8_t ns3::dot11s::IeBeaconTiming::GetInformationFieldSize() const [member function]
cls.add_method('GetInformationFieldSize',
'uint8_t',
[],
is_const=True, is_virtual=True)
## ie-dot11s-beacon-timing.h: std::vector<ns3::Ptr<ns3::dot11s::IeBeaconTimingUnit>, std::allocator<ns3::Ptr<ns3::dot11s::IeBeaconTimingUnit> > > ns3::dot11s::IeBeaconTiming::GetNeighboursTimingElementsList() [member function]
cls.add_method('GetNeighboursTimingElementsList',
'std::vector< ns3::Ptr< ns3::dot11s::IeBeaconTimingUnit > >',
[])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTiming::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTiming::SerializeInformationField(ns3::Buffer::Iterator i) const [member function]
cls.add_method('SerializeInformationField',
'void',
[param('ns3::Buffer::Iterator', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3Dot11sIeBeaconTimingUnit_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTimingUnit::IeBeaconTimingUnit(ns3::dot11s::IeBeaconTimingUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeBeaconTimingUnit const &', 'arg0')])
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTimingUnit::IeBeaconTimingUnit() [constructor]
cls.add_constructor([])
## ie-dot11s-beacon-timing.h: uint8_t ns3::dot11s::IeBeaconTimingUnit::GetAid() const [member function]
cls.add_method('GetAid',
'uint8_t',
[],
is_const=True)
## ie-dot11s-beacon-timing.h: uint16_t ns3::dot11s::IeBeaconTimingUnit::GetBeaconInterval() const [member function]
cls.add_method('GetBeaconInterval',
'uint16_t',
[],
is_const=True)
## ie-dot11s-beacon-timing.h: uint16_t ns3::dot11s::IeBeaconTimingUnit::GetLastBeacon() const [member function]
cls.add_method('GetLastBeacon',
'uint16_t',
[],
is_const=True)
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTimingUnit::SetAid(uint8_t aid) [member function]
cls.add_method('SetAid',
'void',
[param('uint8_t', 'aid')])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTimingUnit::SetBeaconInterval(uint16_t beaconInterval) [member function]
cls.add_method('SetBeaconInterval',
'void',
[param('uint16_t', 'beaconInterval')])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTimingUnit::SetLastBeacon(uint16_t lastBeacon) [member function]
cls.add_method('SetLastBeacon',
'void',
[param('uint16_t', 'lastBeacon')])
return
def register_Ns3Dot11sIeConfiguration_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ie-dot11s-configuration.h: ns3::dot11s::IeConfiguration::IeConfiguration(ns3::dot11s::IeConfiguration const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeConfiguration const &', 'arg0')])
## ie-dot11s-configuration.h: ns3::dot11s::IeConfiguration::IeConfiguration() [constructor]
cls.add_constructor([])
## ie-dot11s-configuration.h: uint8_t ns3::dot11s::IeConfiguration::DeserializeInformationField(ns3::Buffer::Iterator i, uint8_t length) [member function]
cls.add_method('DeserializeInformationField',
'uint8_t',
[param('ns3::Buffer::Iterator', 'i'), param('uint8_t', 'length')],
is_virtual=True)
## ie-dot11s-configuration.h: ns3::WifiInformationElementId ns3::dot11s::IeConfiguration::ElementId() const [member function]
cls.add_method('ElementId',
'ns3::WifiInformationElementId',
[],
is_const=True, is_virtual=True)
## ie-dot11s-configuration.h: uint8_t ns3::dot11s::IeConfiguration::GetInformationFieldSize() const [member function]
cls.add_method('GetInformationFieldSize',
'uint8_t',
[],
is_const=True, is_virtual=True)
## ie-dot11s-configuration.h: uint8_t ns3::dot11s::IeConfiguration::GetNeighborCount() [member function]
cls.add_method('GetNeighborCount',
'uint8_t',
[])
## ie-dot11s-configuration.h: bool ns3::dot11s::IeConfiguration::IsAirtime() [member function]
cls.add_method('IsAirtime',
'bool',
[])
## ie-dot11s-configuration.h: bool ns3::dot11s::IeConfiguration::IsHWMP() [member function]
cls.add_method('IsHWMP',
'bool',
[])
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability const & ns3::dot11s::IeConfiguration::MeshCapability() [member function]
cls.add_method('MeshCapability',
'ns3::dot11s::Dot11sMeshCapability const &',
[])
## ie-dot11s-configuration.h: void ns3::dot11s::IeConfiguration::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ie-dot11s-configuration.h: void ns3::dot11s::IeConfiguration::SerializeInformationField(ns3::Buffer::Iterator i) const [member function]
cls.add_method('SerializeInformationField',
'void',
[param('ns3::Buffer::Iterator', 'i')],
is_const=True, is_virtual=True)
## ie-dot11s-configuration.h: void ns3::dot11s::IeConfiguration::SetMetric(ns3::dot11s::dot11sPathSelectionMetric metricId) [member function]
cls.add_method('SetMetric',
'void',
[param('ns3::dot11s::dot11sPathSelectionMetric', 'metricId')])
## ie-dot11s-configuration.h: void ns3::dot11s::IeConfiguration::SetNeighborCount(uint8_t neighbors) [member function]
cls.add_method('SetNeighborCount',
'void',
[param('uint8_t', 'neighbors')])
## ie-dot11s-configuration.h: void ns3::dot11s::IeConfiguration::SetRouting(ns3::dot11s::dot11sPathSelectionProtocol routingId) [member function]
cls.add_method('SetRouting',
'void',
[param('ns3::dot11s::dot11sPathSelectionProtocol', 'routingId')])
return
def register_Ns3Dot11sIeMeshId_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ie-dot11s-id.h: ns3::dot11s::IeMeshId::IeMeshId(ns3::dot11s::IeMeshId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeMeshId const &', 'arg0')])
## ie-dot11s-id.h: ns3::dot11s::IeMeshId::IeMeshId() [constructor]
cls.add_constructor([])
## ie-dot11s-id.h: ns3::dot11s::IeMeshId::IeMeshId(std::string s) [constructor]
cls.add_constructor([param('std::string', 's')])
## ie-dot11s-id.h: uint8_t ns3::dot11s::IeMeshId::DeserializeInformationField(ns3::Buffer::Iterator start, uint8_t length) [member function]
cls.add_method('DeserializeInformationField',
'uint8_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint8_t', 'length')],
is_virtual=True)
## ie-dot11s-id.h: ns3::WifiInformationElementId ns3::dot11s::IeMeshId::ElementId() const [member function]
cls.add_method('ElementId',
'ns3::WifiInformationElementId',
[],
is_const=True, is_virtual=True)
## ie-dot11s-id.h: uint8_t ns3::dot11s::IeMeshId::GetInformationFieldSize() const [member function]
cls.add_method('GetInformationFieldSize',
'uint8_t',
[],
is_const=True, is_virtual=True)
## ie-dot11s-id.h: bool ns3::dot11s::IeMeshId::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ie-dot11s-id.h: bool ns3::dot11s::IeMeshId::IsEqual(ns3::dot11s::IeMeshId const & o) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::dot11s::IeMeshId const &', 'o')],
is_const=True)
## ie-dot11s-id.h: char * ns3::dot11s::IeMeshId::PeekString() const [member function]
cls.add_method('PeekString',
'char *',
[],
is_const=True)
## ie-dot11s-id.h: void ns3::dot11s::IeMeshId::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ie-dot11s-id.h: void ns3::dot11s::IeMeshId::SerializeInformationField(ns3::Buffer::Iterator i) const [member function]
cls.add_method('SerializeInformationField',
'void',
[param('ns3::Buffer::Iterator', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3Dot11sIeMeshIdChecker_methods(root_module, cls):
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdChecker::IeMeshIdChecker() [constructor]
cls.add_constructor([])
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdChecker::IeMeshIdChecker(ns3::dot11s::IeMeshIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeMeshIdChecker const &', 'arg0')])
return
def register_Ns3Dot11sIeMeshIdValue_methods(root_module, cls):
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdValue::IeMeshIdValue() [constructor]
cls.add_constructor([])
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdValue::IeMeshIdValue(ns3::dot11s::IeMeshIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeMeshIdValue const &', 'arg0')])
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdValue::IeMeshIdValue(ns3::dot11s::IeMeshId const & value) [constructor]
cls.add_constructor([param('ns3::dot11s::IeMeshId const &', 'value')])
## ie-dot11s-id.h: ns3::Ptr<ns3::AttributeValue> ns3::dot11s::IeMeshIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ie-dot11s-id.h: bool ns3::dot11s::IeMeshIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ie-dot11s-id.h: ns3::dot11s::IeMeshId ns3::dot11s::IeMeshIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::dot11s::IeMeshId',
[],
is_const=True)
## ie-dot11s-id.h: std::string ns3::dot11s::IeMeshIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ie-dot11s-id.h: void ns3::dot11s::IeMeshIdValue::Set(ns3::dot11s::IeMeshId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::dot11s::IeMeshId const &', 'value')])
return
def register_Ns3Dot11sIePeerManagement_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ie-dot11s-peer-management.h: ns3::dot11s::IePeerManagement::IePeerManagement(ns3::dot11s::IePeerManagement const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IePeerManagement const &', 'arg0')])
## ie-dot11s-peer-management.h: ns3::dot11s::IePeerManagement::IePeerManagement() [constructor]
cls.add_constructor([])
## ie-dot11s-peer-management.h: uint8_t ns3::dot11s::IePeerManagement::DeserializeInformationField(ns3::Buffer::Iterator i, uint8_t length) [member function]
cls.add_method('DeserializeInformationField',
'uint8_t',
[param('ns3::Buffer::Iterator', 'i'), param('uint8_t', 'length')],
is_virtual=True)
## ie-dot11s-peer-management.h: ns3::WifiInformationElementId ns3::dot11s::IePeerManagement::ElementId() const [member function]
cls.add_method('ElementId',
'ns3::WifiInformationElementId',
[],
is_const=True, is_virtual=True)
## ie-dot11s-peer-management.h: uint8_t ns3::dot11s::IePeerManagement::GetInformationFieldSize() const [member function]
cls.add_method('GetInformationFieldSize',
'uint8_t',
[],
is_const=True, is_virtual=True)
## ie-dot11s-peer-management.h: uint16_t ns3::dot11s::IePeerManagement::GetLocalLinkId() const [member function]
cls.add_method('GetLocalLinkId',
'uint16_t',
[],
is_const=True)
## ie-dot11s-peer-management.h: uint16_t ns3::dot11s::IePeerManagement::GetPeerLinkId() const [member function]
cls.add_method('GetPeerLinkId',
'uint16_t',
[],
is_const=True)
## ie-dot11s-peer-management.h: ns3::dot11s::PmpReasonCode ns3::dot11s::IePeerManagement::GetReasonCode() const [member function]
cls.add_method('GetReasonCode',
'ns3::dot11s::PmpReasonCode',
[],
is_const=True)
## ie-dot11s-peer-management.h: uint8_t ns3::dot11s::IePeerManagement::GetSubtype() const [member function]
cls.add_method('GetSubtype',
'uint8_t',
[],
is_const=True)
## ie-dot11s-peer-management.h: void ns3::dot11s::IePeerManagement::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ie-dot11s-peer-management.h: void ns3::dot11s::IePeerManagement::SerializeInformationField(ns3::Buffer::Iterator i) const [member function]
cls.add_method('SerializeInformationField',
'void',
[param('ns3::Buffer::Iterator', 'i')],
is_const=True, is_virtual=True)
## ie-dot11s-peer-management.h: void ns3::dot11s::IePeerManagement::SetPeerClose(uint16_t localLinkID, uint16_t peerLinkId, ns3::dot11s::PmpReasonCode reasonCode) [member function]
cls.add_method('SetPeerClose',
'void',
[param('uint16_t', 'localLinkID'), param('uint16_t', 'peerLinkId'), param('ns3::dot11s::PmpReasonCode', 'reasonCode')])
## ie-dot11s-peer-management.h: void ns3::dot11s::IePeerManagement::SetPeerConfirm(uint16_t localLinkID, uint16_t peerLinkId) [member function]
cls.add_method('SetPeerConfirm',
'void',
[param('uint16_t', 'localLinkID'), param('uint16_t', 'peerLinkId')])
## ie-dot11s-peer-management.h: void ns3::dot11s::IePeerManagement::SetPeerOpen(uint16_t localLinkId) [member function]
cls.add_method('SetPeerOpen',
'void',
[param('uint16_t', 'localLinkId')])
## ie-dot11s-peer-management.h: bool ns3::dot11s::IePeerManagement::SubtypeIsClose() const [member function]
cls.add_method('SubtypeIsClose',
'bool',
[],
is_const=True)
## ie-dot11s-peer-management.h: bool ns3::dot11s::IePeerManagement::SubtypeIsConfirm() const [member function]
cls.add_method('SubtypeIsConfirm',
'bool',
[],
is_const=True)
## ie-dot11s-peer-management.h: bool ns3::dot11s::IePeerManagement::SubtypeIsOpen() const [member function]
cls.add_method('SubtypeIsOpen',
'bool',
[],
is_const=True)
return
def register_Ns3Dot11sPeerLink_methods(root_module, cls):
## peer-link.h: static ns3::TypeId ns3::dot11s::PeerLink::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## peer-link.h: ns3::dot11s::PeerLink::PeerLink() [constructor]
cls.add_constructor([])
## peer-link.h: void ns3::dot11s::PeerLink::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## peer-link.h: void ns3::dot11s::PeerLink::SetBeaconInformation(ns3::Time lastBeacon, ns3::Time BeaconInterval) [member function]
cls.add_method('SetBeaconInformation',
'void',
[param('ns3::Time', 'lastBeacon'), param('ns3::Time', 'BeaconInterval')])
## peer-link.h: void ns3::dot11s::PeerLink::SetLinkStatusCallback(ns3::Callback<void,unsigned int,ns3::Mac48Address,bool,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetLinkStatusCallback',
'void',
[param('ns3::Callback< void, unsigned int, ns3::Mac48Address, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
## peer-link.h: void ns3::dot11s::PeerLink::SetPeerAddress(ns3::Mac48Address macaddr) [member function]
cls.add_method('SetPeerAddress',
'void',
[param('ns3::Mac48Address', 'macaddr')])
## peer-link.h: void ns3::dot11s::PeerLink::SetPeerMeshPointAddress(ns3::Mac48Address macaddr) [member function]
cls.add_method('SetPeerMeshPointAddress',
'void',
[param('ns3::Mac48Address', 'macaddr')])
## peer-link.h: void ns3::dot11s::PeerLink::SetInterface(uint32_t interface) [member function]
cls.add_method('SetInterface',
'void',
[param('uint32_t', 'interface')])
## peer-link.h: void ns3::dot11s::PeerLink::SetLocalLinkId(uint16_t id) [member function]
cls.add_method('SetLocalLinkId',
'void',
[param('uint16_t', 'id')])
## peer-link.h: void ns3::dot11s::PeerLink::SetLocalAid(uint16_t aid) [member function]
cls.add_method('SetLocalAid',
'void',
[param('uint16_t', 'aid')])
## peer-link.h: uint16_t ns3::dot11s::PeerLink::GetPeerAid() const [member function]
cls.add_method('GetPeerAid',
'uint16_t',
[],
is_const=True)
## peer-link.h: void ns3::dot11s::PeerLink::SetBeaconTimingElement(ns3::dot11s::IeBeaconTiming beaconTiming) [member function]
cls.add_method('SetBeaconTimingElement',
'void',
[param('ns3::dot11s::IeBeaconTiming', 'beaconTiming')])
## peer-link.h: ns3::Mac48Address ns3::dot11s::PeerLink::GetPeerAddress() const [member function]
cls.add_method('GetPeerAddress',
'ns3::Mac48Address',
[],
is_const=True)
## peer-link.h: uint16_t ns3::dot11s::PeerLink::GetLocalAid() const [member function]
cls.add_method('GetLocalAid',
'uint16_t',
[],
is_const=True)
## peer-link.h: ns3::Time ns3::dot11s::PeerLink::GetLastBeacon() const [member function]
cls.add_method('GetLastBeacon',
'ns3::Time',
[],
is_const=True)
## peer-link.h: ns3::Time ns3::dot11s::PeerLink::GetBeaconInterval() const [member function]
cls.add_method('GetBeaconInterval',
'ns3::Time',
[],
is_const=True)
## peer-link.h: ns3::dot11s::IeBeaconTiming ns3::dot11s::PeerLink::GetBeaconTimingElement() const [member function]
cls.add_method('GetBeaconTimingElement',
'ns3::dot11s::IeBeaconTiming',
[],
is_const=True)
## peer-link.h: void ns3::dot11s::PeerLink::MLMECancelPeerLink(ns3::dot11s::PmpReasonCode reason) [member function]
cls.add_method('MLMECancelPeerLink',
'void',
[param('ns3::dot11s::PmpReasonCode', 'reason')])
## peer-link.h: void ns3::dot11s::PeerLink::MLMEActivePeerLinkOpen() [member function]
cls.add_method('MLMEActivePeerLinkOpen',
'void',
[])
## peer-link.h: void ns3::dot11s::PeerLink::MLMEPeeringRequestReject() [member function]
cls.add_method('MLMEPeeringRequestReject',
'void',
[])
## peer-link.h: void ns3::dot11s::PeerLink::MLMESetSignalStatusCallback(ns3::Callback<void, unsigned int, ns3::Mac48Address, ns3::Mac48Address, ns3::dot11s::PeerLink::PeerState, ns3::dot11s::PeerLink::PeerState, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function]
cls.add_method('MLMESetSignalStatusCallback',
'void',
[param('ns3::Callback< void, unsigned int, ns3::Mac48Address, ns3::Mac48Address, ns3::dot11s::PeerLink::PeerState, ns3::dot11s::PeerLink::PeerState, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')])
## peer-link.h: void ns3::dot11s::PeerLink::TransmissionSuccess() [member function]
cls.add_method('TransmissionSuccess',
'void',
[])
## peer-link.h: void ns3::dot11s::PeerLink::TransmissionFailure() [member function]
cls.add_method('TransmissionFailure',
'void',
[])
## peer-link.h: void ns3::dot11s::PeerLink::Report(std::ostream & os) const [member function]
cls.add_method('Report',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Dot11sPeerManagementProtocol_methods(root_module, cls):
## peer-management-protocol.h: ns3::dot11s::PeerManagementProtocol::PeerManagementProtocol() [constructor]
cls.add_constructor([])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::ConfigurationMismatch(uint32_t interface, ns3::Mac48Address peerAddress) [member function]
cls.add_method('ConfigurationMismatch',
'void',
[param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## peer-management-protocol.h: ns3::Ptr<ns3::dot11s::PeerLink> ns3::dot11s::PeerManagementProtocol::FindPeerLink(uint32_t interface, ns3::Mac48Address peerAddress) [member function]
cls.add_method('FindPeerLink',
'ns3::Ptr< ns3::dot11s::PeerLink >',
[param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress')])
## peer-management-protocol.h: ns3::Mac48Address ns3::dot11s::PeerManagementProtocol::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Mac48Address',
[])
## peer-management-protocol.h: bool ns3::dot11s::PeerManagementProtocol::GetBeaconCollisionAvoidance() const [member function]
cls.add_method('GetBeaconCollisionAvoidance',
'bool',
[],
is_const=True)
## peer-management-protocol.h: ns3::Ptr<ns3::dot11s::IeBeaconTiming> ns3::dot11s::PeerManagementProtocol::GetBeaconTimingElement(uint32_t interface) [member function]
cls.add_method('GetBeaconTimingElement',
'ns3::Ptr< ns3::dot11s::IeBeaconTiming >',
[param('uint32_t', 'interface')])
## peer-management-protocol.h: ns3::Ptr<ns3::dot11s::IeMeshId> ns3::dot11s::PeerManagementProtocol::GetMeshId() const [member function]
cls.add_method('GetMeshId',
'ns3::Ptr< ns3::dot11s::IeMeshId >',
[],
is_const=True)
## peer-management-protocol.h: uint8_t ns3::dot11s::PeerManagementProtocol::GetNumberOfLinks() [member function]
cls.add_method('GetNumberOfLinks',
'uint8_t',
[])
## peer-management-protocol.h: std::vector<ns3::Ptr<ns3::dot11s::PeerLink>,std::allocator<ns3::Ptr<ns3::dot11s::PeerLink> > > ns3::dot11s::PeerManagementProtocol::GetPeerLinks() const [member function]
cls.add_method('GetPeerLinks',
'std::vector< ns3::Ptr< ns3::dot11s::PeerLink > >',
[],
is_const=True)
## peer-management-protocol.h: std::vector<ns3::Mac48Address,std::allocator<ns3::Mac48Address> > ns3::dot11s::PeerManagementProtocol::GetPeers(uint32_t interface) const [member function]
cls.add_method('GetPeers',
'std::vector< ns3::Mac48Address >',
[param('uint32_t', 'interface')],
is_const=True)
## peer-management-protocol.h: static ns3::TypeId ns3::dot11s::PeerManagementProtocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## peer-management-protocol.h: bool ns3::dot11s::PeerManagementProtocol::Install(ns3::Ptr<ns3::MeshPointDevice> arg0) [member function]
cls.add_method('Install',
'bool',
[param('ns3::Ptr< ns3::MeshPointDevice >', 'arg0')])
## peer-management-protocol.h: bool ns3::dot11s::PeerManagementProtocol::IsActiveLink(uint32_t interface, ns3::Mac48Address peerAddress) [member function]
cls.add_method('IsActiveLink',
'bool',
[param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::NotifyBeaconSent(uint32_t interface, ns3::Time beaconInterval) [member function]
cls.add_method('NotifyBeaconSent',
'void',
[param('uint32_t', 'interface'), param('ns3::Time', 'beaconInterval')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::ReceiveBeacon(uint32_t interface, ns3::Mac48Address peerAddress, ns3::Time beaconInterval, ns3::Ptr<ns3::dot11s::IeBeaconTiming> beaconTiming) [member function]
cls.add_method('ReceiveBeacon',
'void',
[param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress'), param('ns3::Time', 'beaconInterval'), param('ns3::Ptr< ns3::dot11s::IeBeaconTiming >', 'beaconTiming')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::ReceivePeerLinkFrame(uint32_t interface, ns3::Mac48Address peerAddress, ns3::Mac48Address peerMeshPointAddress, uint16_t aid, ns3::dot11s::IePeerManagement peerManagementElement, ns3::dot11s::IeConfiguration meshConfig) [member function]
cls.add_method('ReceivePeerLinkFrame',
'void',
[param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress'), param('ns3::Mac48Address', 'peerMeshPointAddress'), param('uint16_t', 'aid'), param('ns3::dot11s::IePeerManagement', 'peerManagementElement'), param('ns3::dot11s::IeConfiguration', 'meshConfig')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::Report(std::ostream & arg0) const [member function]
cls.add_method('Report',
'void',
[param('std::ostream &', 'arg0')],
is_const=True)
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::ResetStats() [member function]
cls.add_method('ResetStats',
'void',
[])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::SetBeaconCollisionAvoidance(bool enable) [member function]
cls.add_method('SetBeaconCollisionAvoidance',
'void',
[param('bool', 'enable')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::SetMeshId(std::string s) [member function]
cls.add_method('SetMeshId',
'void',
[param('std::string', 's')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::SetPeerLinkStatusCallback(ns3::Callback<void, ns3::Mac48Address, ns3::Mac48Address, unsigned int, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPeerLinkStatusCallback',
'void',
[param('ns3::Callback< void, ns3::Mac48Address, ns3::Mac48Address, unsigned int, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::TransmissionFailure(uint32_t interface, ns3::Mac48Address const peerAddress) [member function]
cls.add_method('TransmissionFailure',
'void',
[param('uint32_t', 'interface'), param('ns3::Mac48Address const', 'peerAddress')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::TransmissionSuccess(uint32_t interface, ns3::Mac48Address const peerAddress) [member function]
cls.add_method('TransmissionSuccess',
'void',
[param('uint32_t', 'interface'), param('ns3::Mac48Address const', 'peerAddress')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module)
register_functions_ns3_aodv(module.get_submodule('aodv'), root_module)
register_functions_ns3_dot11s(module.get_submodule('dot11s'), root_module)
register_functions_ns3_flame(module.get_submodule('flame'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
register_functions_ns3_olsr(module.get_submodule('olsr'), root_module)
return
def register_functions_ns3_Config(module, root_module):
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_addressUtils(module, root_module):
return
def register_functions_ns3_aodv(module, root_module):
return
def register_functions_ns3_dot11s(module, root_module):
## ie-dot11s-id.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::dot11s::MakeIeMeshIdChecker() [free function]
module.add_function('MakeIeMeshIdChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
return
def register_functions_ns3_flame(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def register_functions_ns3_olsr(module, root_module):
return
| ciprian-radu/unimap.ns-3noc | bindings/python/apidefs/gcc-ILP32/ns3_module_dot11s.py | Python | gpl-2.0 | 49,784 |
# shamelessly copied from pliExpertInfo (Vali, Mirakels, Littlesat)
from enigma import iServiceInformation, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.config import config
from Tools.Transponder import ConvertToHumanReadable, getChannelNumber
from Tools.GetEcmInfo import GetEcmInfo
from Poll import Poll
def addspace(text):
if text:
text += " "
return text
class PliExtraInfo(Poll, Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.type = type
self.poll_interval = 1000
self.poll_enabled = True
self.caid_data = (
( "0x100", "0x1ff", "Seca", "S", True ),
( "0x500", "0x5ff", "Via", "V", True ),
( "0x600", "0x6ff", "Irdeto", "I", True ),
( "0x900", "0x9ff", "NDS", "Nd", True ),
( "0xb00", "0xbff", "Conax", "Co", True ),
( "0xd00", "0xdff", "CryptoW", "Cw", True ),
( "0xe00", "0xeff", "PowerVU", "P", False ),
("0x1000", "0x10FF", "Tandberg", "TB", False ),
("0x1700", "0x17ff", "Beta", "B", True ),
("0x1800", "0x18ff", "Nagra", "N", True ),
("0x2600", "0x2600", "Biss", "Bi", False ),
("0x4ae0", "0x4ae1", "Dre", "D", False ),
("0x4aee", "0x4aee", "BulCrypt", "B1", False ),
("0x5581", "0x5581", "BulCrypt", "B2", False )
)
self.ca_table = (
("CryptoCaidSecaAvailable", "S", False),
("CryptoCaidViaAvailable", "V", False),
("CryptoCaidIrdetoAvailable", "I", False),
("CryptoCaidNDSAvailable", "Nd", False),
("CryptoCaidConaxAvailable", "Co", False),
("CryptoCaidCryptoWAvailable", "Cw", False),
("CryptoCaidPowerVUAvailable", "P", False),
("CryptoCaidBetaAvailable", "B", False),
("CryptoCaidNagraAvailable", "N", False),
("CryptoCaidBissAvailable", "Bi", False),
("CryptoCaidDreAvailable", "D", False),
("CryptoCaidBulCrypt1Available","B1", False),
("CryptoCaidBulCrypt2Available","B2", False),
("CryptoCaidTandbergAvailable", "TB", False),
("CryptoCaidSecaSelected", "S", True),
("CryptoCaidViaSelected", "V", True),
("CryptoCaidIrdetoSelected", "I", True),
("CryptoCaidNDSSelected", "Nd", True),
("CryptoCaidConaxSelected", "Co", True),
("CryptoCaidCryptoWSelected", "Cw", True),
("CryptoCaidPowerVUSelected", "P", True),
("CryptoCaidBetaSelected", "B", True),
("CryptoCaidNagraSelected", "N", True),
("CryptoCaidBissSelected", "Bi", True),
("CryptoCaidDreSelected", "D", True),
("CryptoCaidBulCrypt1Selected", "B1", True),
("CryptoCaidBulCrypt2Selected", "B2", True),
("CryptoCaidTandbergSelected", "TB", True),
)
self.ecmdata = GetEcmInfo()
self.feraw = self.fedata = self.updateFEdata = None
def getCryptoInfo(self, info):
if info.getInfo(iServiceInformation.sIsCrypted) == 1:
data = self.ecmdata.getEcmData()
self.current_source = data[0]
self.current_caid = data[1]
self.current_provid = data[2]
self.current_ecmpid = data[3]
else:
self.current_source = ""
self.current_caid = "0"
self.current_provid = "0"
self.current_ecmpid = "0"
def createCryptoBar(self, info):
res = ""
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
color="\c0000??00"
else:
color = "\c007?7?7?"
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
color="\c00????00"
except:
pass
if color != "\c007?7?7?" or caid_entry[4]:
if res: res += " "
res += color + caid_entry[3]
res += "\c00??????"
return res
def createCryptoSpecial(self, info):
caid_name = "FTA"
try:
for caid_entry in self.caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x:%04x:%04x:%04x" % (int(self.current_caid,16), int(self.current_provid,16), info.getInfo(iServiceInformation.sSID), int(self.current_ecmpid,16))
except:
pass
return ""
def createResolution(self, info):
xres = info.getInfo(iServiceInformation.sVideoWidth)
if xres == -1:
return ""
yres = info.getInfo(iServiceInformation.sVideoHeight)
mode = ("i", "p", " ")[info.getInfo(iServiceInformation.sProgressive)]
fps = str((info.getInfo(iServiceInformation.sFrameRate) + 500) / 1000)
return str(xres) + "x" + str(yres) + mode + fps
def createVideoCodec(self, info):
return ("MPEG2", "AVC", "MPEG1", "MPEG4-VC", "VC1", "VC1-SM", "HEVC", "")[info.getInfo(iServiceInformation.sVideoType)]
def createPIDInfo(self, info):
vpid = info.getInfo(iServiceInformation.sVideoPID)
apid = info.getInfo(iServiceInformation.sAudioPID)
pcrpid = info.getInfo(iServiceInformation.sPCRPID)
sidpid = info.getInfo(iServiceInformation.sSID)
tsid = info.getInfo(iServiceInformation.sTSID)
onid = info.getInfo(iServiceInformation.sONID)
if vpid < 0 : vpid = 0
if apid < 0 : apid = 0
if pcrpid < 0 : pcrpid = 0
if sidpid < 0 : sidpid = 0
if tsid < 0 : tsid = 0
if onid < 0 : onid = 0
return "%d-%d:%05d:%04d:%04d:%04d" % (onid, tsid, sidpid, vpid, apid, pcrpid)
def createTransponderInfo(self, fedata, feraw):
if not feraw:
return ""
elif "DVB-T" in feraw.get("tuner_type"):
tmp = addspace(self.createChannelNumber(fedata, feraw)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata))
else:
tmp = addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata))
return addspace(self.createTunerSystem(fedata)) + tmp + addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) \
+ addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw))
def createFrequency(self, feraw):
frequency = feraw.get("frequency")
if frequency:
if "DVB-T" in feraw.get("tuner_type"):
return str(int(frequency / 1000000. + 0.5))
else:
return str(int(frequency / 1000 + 0.5))
return ""
def createChannelNumber(self, fedata, feraw):
return "DVB-T" in feraw.get("tuner_type") and fedata.get("channel") or ""
def createSymbolRate(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
bandwidth = fedata.get("bandwidth")
if bandwidth:
return bandwidth
else:
symbolrate = fedata.get("symbol_rate")
if symbolrate:
return str(symbolrate / 1000)
return ""
def createPolarization(self, fedata):
return fedata.get("polarization_abbreviation") or ""
def createFEC(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
code_rate_lp = fedata.get("code_rate_lp")
code_rate_hp = fedata.get("code_rate_hp")
if code_rate_lp and code_rate_hp:
return code_rate_lp + "-" + code_rate_hp
else:
fec = fedata.get("fec_inner")
if fec:
return fec
return ""
def createModulation(self, fedata):
if fedata.get("tuner_type") == _("Terrestrial"):
constellation = fedata.get("constellation")
if constellation:
return constellation
else:
modulation = fedata.get("modulation")
if modulation:
return modulation
return ""
def createTunerType(self, feraw):
return feraw.get("tuner_type") or ""
def createTunerSystem(self, fedata):
return fedata.get("system") or ""
def createOrbPos(self, feraw):
orbpos = feraw.get("orbital_position")
if orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + "\xc2\xb0 W"
elif orbpos > 0:
return str((float(orbpos)) / 10.0) + "\xc2\xb0 E"
return ""
def createOrbPosOrTunerSystem(self, fedata,feraw):
orbpos = self.createOrbPos(feraw)
if orbpos is not "":
return orbpos
return self.createTunerSystem(fedata)
def createProviderName(self, info):
return info.getInfoString(iServiceInformation.sProvider)
@cached
def getText(self):
service = self.source.service
if service is None:
return ""
info = service and service.info()
if not info:
return ""
if self.type == "CryptoInfo":
self.getCryptoInfo(info)
if config.usage.show_cryptoinfo.value:
return addspace(self.createCryptoBar(info)) + self.createCryptoSpecial(info)
else:
return addspace(self.createCryptoBar(info)) + addspace(self.current_source) + self.createCryptoSpecial(info)
if self.type == "CryptoBar":
self.getCryptoInfo(info)
return self.createCryptoBar(info)
if self.type == "CryptoSpecial":
self.getCryptoInfo(info)
return self.createCryptoSpecial(info)
if self.type == "ResolutionString":
return self.createResolution(info)
if self.type == "VideoCodec":
return self.createVideoCodec(info)
if self.updateFEdata:
feinfo = service.frontendInfo()
if feinfo:
self.feraw = feinfo.getAll(config.usage.infobar_frontend_source.value == "settings")
if self.feraw:
self.fedata = ConvertToHumanReadable(self.feraw)
feraw = self.feraw
if not feraw:
feraw = info.getInfoObject(iServiceInformation.sTransponderData)
fedata = ConvertToHumanReadable(feraw)
else:
fedata = self.fedata
if self.type == "All":
self.getCryptoInfo(info)
if config.usage.show_cryptoinfo.value:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata, feraw) + "\n" \
+ addspace(self.createCryptoBar(info)) + addspace(self.createCryptoSpecial(info)) + "\n" \
+ addspace(self.createPIDInfo(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
else:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata, feraw) + "\n" \
+ addspace(self.createCryptoBar(info)) + self.current_source + "\n" \
+ addspace(self.createCryptoSpecial(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "PIDInfo":
return self.createPIDInfo(info)
if not feraw:
return ""
if self.type == "ServiceInfo":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata)) \
+ addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) + addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw)) \
+ addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "TransponderInfo":
return self.createTransponderInfo(fedata, feraw)
if self.type == "TransponderFrequency":
return self.createFrequency(feraw)
if self.type == "TransponderSymbolRate":
return self.createSymbolRate(fedata, feraw)
if self.type == "TransponderPolarization":
return self.createPolarization(fedata)
if self.type == "TransponderFEC":
return self.createFEC(fedata, feraw)
if self.type == "TransponderModulation":
return self.createModulation(fedata)
if self.type == "OrbitalPosition":
return self.createOrbPos(feraw)
if self.type == "TunerType":
return self.createTunerType(feraw)
if self.type == "TunerSystem":
return self.createTunerSystem(fedata)
if self.type == "OrbitalPositionOrTunerSystem":
return self.createOrbPosOrTunerSystem(fedata,feraw)
if self.type == "TerrestrialChannelNumber":
return self.createChannelNumber(fedata, feraw)
return _("invalid type")
text = property(getText)
@cached
def getBool(self):
service = self.source.service
info = service and service.info()
if not info:
return False
request_caid = None
for x in self.ca_table:
if x[0] == self.type:
request_caid = x[1]
request_selected = x[2]
break
if request_caid is None:
return False
if info.getInfo(iServiceInformation.sIsCrypted) != 1:
return False
data = self.ecmdata.getEcmData()
if data is None:
return False
current_caid = data[1]
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if caid_entry[3] == request_caid:
if request_selected:
if int(caid_entry[0], 16) <= int(current_caid, 16) <= int(caid_entry[1], 16):
return True
else: # request available
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
return True
except:
pass
return False
boolean = property(getBool)
def changed(self, what):
if what[0] == self.CHANGED_SPECIFIC:
self.updateFEdata = False
if what[1] == iPlayableService.evNewProgramInfo:
self.updateFEdata = True
if what[1] == iPlayableService.evEnd:
self.feraw = self.fedata = None
Converter.changed(self, what)
elif what[0] == self.CHANGED_POLL and self.updateFEdata is not None:
self.updateFEdata = False
Converter.changed(self, what)
| Taapat/enigma2-openpli-vuplus | lib/python/Components/Converter/PliExtraInfo.py | Python | gpl-2.0 | 12,757 |
# Copyright (c) 2015-2016 Western Digital Corporation or its affiliates.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# Author: Chaitanya Kulkarni <chaitanya.kulkarni@hgst.com>
#
"""
NVMe Compare Command Testcase:-
1. Create a data file 1 with pattern 1515 to write.
2. Create a data file 2 with pattern 2525 to compare with.
3. Write a block of data pattern using data file1.
4. Compare written block to data file 2's pattern; shall fail.
5. Compare written block to data file1's pattern; shall pass.
"""
from nose.tools import assert_equal, assert_not_equal
from nvme_test_io import TestNVMeIO
class TestNVMeCompareCmd(TestNVMeIO):
"""
Represents Compare Testcase. Inherits TestNVMeIO class.
- Attributes:
- data_size : data size to perform IO.
- start_block : starting block of to perform IO.
- compare_file : data file to use in nvme compare command.
- test_log_dir : directory for logs, temp files.
"""
def __init__(self):
""" Pre Section for TestNVMeCompareCmd """
TestNVMeIO.__init__(self)
self.data_size = 1024
self.start_block = 1023
self.setup_log_dir(self.__class__.__name__)
self.compare_file = self.test_log_dir + "/" + "compare_file.txt"
self.write_file = self.test_log_dir + "/" + self.write_file
self.create_data_file(self.write_file, self.data_size, "15")
self.create_data_file(self.compare_file, self.data_size, "25")
def __del__(self):
""" Post Section for TestNVMeCompareCmd """
TestNVMeIO.__del__(self)
def nvme_compare(self, cmp_file):
""" Wrapper for nvme compare command.
- Args:
- cmp_file : data file used in nvme compare command.
- Returns:
- return code of the nvme compare command.
"""
compare_cmd = "nvme compare " + self.ns1 + " --start-block=" + \
str(self.start_block) + " --block-count=" + \
str(self.block_count) + " --data-size=" + \
str(self.data_size) + " --data=" + cmp_file
return self.exec_cmd(compare_cmd)
def test_nvme_compare(self):
""" Testcase main """
assert_equal(self.nvme_write(), 0)
assert_not_equal(self.nvme_compare(self.compare_file), 0)
assert_equal(self.nvme_compare(self.write_file), 0)
| samiWaheed/nvme-cli | tests/nvme_compare_test.py | Python | gpl-2.0 | 3,106 |
#!/usr/bin/python -t
__requires__ = 'TurboGears'
import pkg_resources
pkg_resources.require('CherryPy >= 2.0, < 3.0alpha')
import logging
logging.basicConfig()
import os
import sys
import getopt
import xmlrpclib
import smtplib
from email.Message import Message
import warnings
# Ignore DeprecationWarnings. This allows us to stop getting email
# from the cron job. We'll see the same warnings from the server starting up
warnings.simplefilter('ignore', DeprecationWarning)
import turbogears
import bugzilla
from turbogears import config
cfgfile = '/etc/export-bugzilla.cfg'
if os.access('./export-bugzilla.cfg', os.R_OK):
cfgfile = './export-bugzilla.cfg'
turbogears.update_config(configfile=cfgfile)
from turbogears.database import session
from fas.model import BugzillaQueue
BZSERVER = config.get('bugzilla.url', 'https://bugdev.devel.redhat.com/bugzilla-cvs/xmlrpc.cgi')
BZUSER = config.get('bugzilla.username')
BZPASS = config.get('bugzilla.password')
MAILSERVER = config.get('mail.server', 'localhost')
ADMINEMAIL = config.get('mail.admin_email', 'admin@fedoraproject.org')
NOTIFYEMAIL = config.get('mail.notify_email', ['admin@fedoraproject.org'])
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], '', ('usage', 'help'))
if len(args) != 2 or ('--usage','') in opts or ('--help','') in opts:
print """
Usage: export-bugzilla.py GROUP BUGZILLA_GROUP
"""
sys.exit(1)
ourGroup = args[0]
bzGroup = args[1]
server = bugzilla.Bugzilla(url=BZSERVER, user=BZUSER, password=BZPASS,
cookiefile=None, tokenfile=None)
bugzilla_queue = BugzillaQueue.query.join('group').filter_by(
name=ourGroup)
no_bz_account = []
for entry in bugzilla_queue:
# Make sure we have a record for this user in bugzilla
if entry.action == 'r':
# Remove the user's bugzilla group
try:
server.updateperms(entry.email, 'rem', bzGroup)
except xmlrpclib.Fault, e:
if e.faultCode == 51:
# It's okay, not having this user is equivalent to setting
# them to not have this group.
pass
else:
raise
elif entry.action == 'a':
# Make sure the user exists
try:
server.getuser(entry.email)
except xmlrpclib.Fault, e:
if e.faultCode == 51:
# This user doesn't have a bugzilla account yet
# add them to a list and we'll let them know.
no_bz_account.append(entry)
continue
else:
print 'Error:', e, entry.email, entry.person.human_name
raise
server.updateperms(entry.email, 'add', bzGroup)
else:
print 'Unrecognized action code: %s %s %s %s %s' % (entry.action,
entry.email, entry.person.human_name, entry.person.username, entry.group.name)
continue
# Remove them from the queue
session.delete(entry)
session.flush()
# Mail the people without bugzilla accounts
if '$USER' in NOTIFYEMAIL:
for person in no_bz_account:
smtplib.SMTP(MAILSERVER)
msg = Message()
message = '''Hello %(name)s,
As a Fedora packager, we grant you permissions to make changes to bugs in
bugzilla to all Fedora bugs. This lets you work together with other Fedora
developers in an easier fashion. However, to enable this functionality, we
need to have your bugzilla email address stored in the Fedora Account System.
At the moment you have:
%(email)s
which bugzilla is telling us is not an account in bugzilla. If you could
please set up an account in bugzilla with this address or change your email
address on your Fedora Account to match an existing bugzilla account this would
let us go forward.
Note: this message is being generated by an automated script. You'll continue
getting this message until the problem is resolved. Sorry for the
inconvenience.
Thank you,
The Fedora Account System
%(admin_email)s
''' % {'name': person.person.human_name, 'email': person.email,
'admin_email': ADMINEMAIL}
msg.add_header('To', person.email)
msg.add_header('From', ADMINEMAIL)
msg.add_header('Subject', 'Fedora Account System and Bugzilla Mismatch')
msg.set_payload(message)
smtp = smtplib.SMTP(MAILSERVER)
smtp.sendmail(ADMINEMAIL, [person.email], msg.as_string())
smtp.quit()
recipients = [e for e in NOTIFYEMAIL if e != '$USER']
if recipients and no_bz_account:
smtplib.SMTP(MAILSERVER)
msg = Message()
people = []
for person in no_bz_account:
if person.person.status == 'Active':
people.append(' %(user)s -- %(name)s -- %(email)s' %
{'name': person.person.human_name, 'email': person.email,
'user': person.person.username})
if people:
people = '\n'.join(people)
message = '''
The following people are in the packager group but do not have email addresses
that are valid in bugzilla:
%s
''' % people
msg.add_header('From', ADMINEMAIL)
msg.add_header('To', ', '.join(recipients))
msg.add_header('Subject', 'Fedora Account System and Bugzilla Mismatch')
msg.set_payload(message)
smtp = smtplib.SMTP(MAILSERVER)
smtp.sendmail(ADMINEMAIL, recipients, msg.as_string())
smtp.quit()
| ampotty/fas | scripts/export-bugzilla.py | Python | gpl-2.0 | 5,752 |
# -*- coding: utf-8 -*-
# Copyright (C) 2010 Holoscópio Tecnologia
# Author: Luciana Fujii Pontello <luciana@holoscopio.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gobject
import gtk
from sltv.settings import UI_DIR
from core import InputUI
class AutoAudioInputUI(InputUI):
def __init__(self):
InputUI.__init__(self)
def get_widget(self):
return None
def get_name(self):
return "AutoAudio"
def get_description(self):
return "Auto Audio Source"
| Geheimorganisation/sltv | sltv/ui/input/autoaudioinput.py | Python | gpl-2.0 | 1,172 |
# If your project uses a database, you can set up database tests
# similar to what you see below. Be sure to set the db_uri to
# an appropriate uri for your testing database. sqlite is a good
# choice for testing, because you can use an in-memory database
# which is very fast.
from turbogears import testutil, database
# from funcweb.model import YourDataClass, User
# database.set_db_uri("sqlite:///:memory:")
# class TestUser(testutil.DBTest):
# def get_model(self):
# return User
# def test_creation(self):
# "Object creation should set the name"
# obj = User(user_name = "creosote",
# email_address = "spam@python.not",
# display_name = "Mr Creosote",
# password = "Wafer-thin Mint")
# assert obj.display_name == "Mr Creosote"
| dockerera/func | funcweb/funcweb/tests/test_model.py | Python | gpl-2.0 | 841 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Attitude indicator widget.
"""
import sys
from PyQt4 import QtGui, QtCore
__author__ = 'Bitcraze AB'
__all__ = ['AttitudeIndicator']
class AttitudeIndicator(QtGui.QWidget):
"""Widget for showing attitude"""
def __init__(self):
super(AttitudeIndicator, self).__init__()
self.roll = 0
self.pitch = 0
self.hover = False
self.hoverASL = 0.0
self.hoverTargetASL = 0.0
self.setMinimumSize(30, 30)
# self.setMaximumSize(240,240)
def setRoll(self, roll):
self.roll = roll
self.repaint()
def setPitch(self, pitch):
self.pitch = pitch
self.repaint()
def setHover(self, target):
self.hoverTargetASL = target
self.hover = target > 0
self.repaint()
def setBaro(self, asl):
self.hoverASL = asl
self.repaint()
def setRollPitch(self, roll, pitch):
self.roll = roll
self.pitch = pitch
self.repaint()
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
self.drawWidget(qp)
qp.end()
def drawWidget(self, qp):
size = self.size()
w = size.width()
h = size.height()
qp.translate(w / 2, h / 2)
qp.rotate(self.roll)
qp.translate(0, (self.pitch * h) / 50)
qp.translate(-w / 2, -h / 2)
qp.setRenderHint(qp.Antialiasing)
font = QtGui.QFont('Serif', 7, QtGui.QFont.Light)
qp.setFont(font)
# Draw the blue
qp.setPen(QtGui.QColor(0, 61, 144))
qp.setBrush(QtGui.QColor(0, 61, 144))
qp.drawRect(-w, h / 2, 3 * w, -3 * h)
# Draw the marron
qp.setPen(QtGui.QColor(59, 41, 39))
qp.setBrush(QtGui.QColor(59, 41, 39))
qp.drawRect(-w, h / 2, 3 * w, 3 * h)
pen = QtGui.QPen(QtGui.QColor(255, 255, 255), 1.5,
QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(-w, h / 2, 3 * w, h / 2)
# Drawing pitch lines
for ofset in [-180, 0, 180]:
for i in range(-900, 900, 25):
pos = (((i / 10.0) + 25 + ofset) * h / 50.0)
if i % 100 == 0:
length = 0.35 * w
if i != 0:
if ofset == 0:
qp.drawText((w / 2) + (length / 2) + (w * 0.06),
pos, "{}".format(-i / 10))
qp.drawText((w / 2) - (length / 2) - (w * 0.08),
pos, "{}".format(-i / 10))
else:
qp.drawText((w / 2) + (length / 2) + (w * 0.06),
pos, "{}".format(i / 10))
qp.drawText((w / 2) - (length / 2) - (w * 0.08),
pos, "{}".format(i / 10))
elif i % 50 == 0:
length = 0.2 * w
else:
length = 0.1 * w
qp.drawLine((w / 2) - (length / 2), pos,
(w / 2) + (length / 2), pos)
qp.setWorldMatrixEnabled(False)
pen = QtGui.QPen(QtGui.QColor(0, 0, 0), 2,
QtCore.Qt.SolidLine)
qp.setBrush(QtGui.QColor(0, 0, 0))
qp.setPen(pen)
qp.drawLine(0, h / 2, w, h / 2)
# Draw Hover vs Target
qp.setWorldMatrixEnabled(False)
pen = QtGui.QPen(QtGui.QColor(255, 255, 255), 2,
QtCore.Qt.SolidLine)
qp.setBrush(QtGui.QColor(255, 255, 255))
qp.setPen(pen)
fh = max(7, h / 50)
font = QtGui.QFont('Sans', fh, QtGui.QFont.Light)
qp.setFont(font)
qp.resetTransform()
qp.translate(0, h / 2)
if not self.hover:
# asl
qp.drawText(w - fh * 10, fh / 2, str(round(self.hoverASL, 2)))
if self.hover:
# target asl (center)
qp.drawText(
w - fh * 10, fh / 2, str(round(self.hoverTargetASL, 2)))
diff = round(self.hoverASL - self.hoverTargetASL, 2)
pos_y = -h / 6 * diff
# cap to +- 2.8m
if diff < -2.8:
pos_y = -h / 6 * -2.8
elif diff > 2.8:
pos_y = -h / 6 * 2.8
else:
pos_y = -h / 6 * diff
# difference from target (moves up and down +- 2.8m)
qp.drawText(w - fh * 3.8, pos_y + fh / 2, str(diff))
# vertical line
qp.drawLine(w - fh * 4.5, 0, w - fh * 4.5, pos_y)
# left horizontal line
qp.drawLine(w - fh * 4.7, 0, w - fh * 4.5, 0)
# right horizontal line
qp.drawLine(w - fh * 4.2, pos_y, w - fh * 4.5, pos_y)
if __name__ == "__main__":
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def updatePitch(self, pitch):
self.wid.setPitch(pitch - 90)
def updateRoll(self, roll):
self.wid.setRoll((roll / 10.0) - 180.0)
def updateTarget(self, target):
self.wid.setHover(500 + target / 10.)
def updateBaro(self, asl):
self.wid.setBaro(500 + asl / 10.)
def initUI(self):
vbox = QtGui.QVBoxLayout()
sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
sld.setFocusPolicy(QtCore.Qt.NoFocus)
sld.setRange(0, 3600)
sld.setValue(1800)
vbox.addWidget(sld)
self.wid = AttitudeIndicator()
sld.valueChanged[int].connect(self.updateRoll)
vbox.addWidget(self.wid)
hbox = QtGui.QHBoxLayout()
hbox.addLayout(vbox)
sldPitch = QtGui.QSlider(QtCore.Qt.Vertical, self)
sldPitch.setFocusPolicy(QtCore.Qt.NoFocus)
sldPitch.setRange(0, 180)
sldPitch.setValue(90)
sldPitch.valueChanged[int].connect(self.updatePitch)
hbox.addWidget(sldPitch)
sldASL = QtGui.QSlider(QtCore.Qt.Vertical, self)
sldASL.setFocusPolicy(QtCore.Qt.NoFocus)
sldASL.setRange(-200, 200)
sldASL.setValue(0)
sldASL.valueChanged[int].connect(self.updateBaro)
sldT = QtGui.QSlider(QtCore.Qt.Vertical, self)
sldT.setFocusPolicy(QtCore.Qt.NoFocus)
sldT.setRange(-200, 200)
sldT.setValue(0)
sldT.valueChanged[int].connect(self.updateTarget)
hbox.addWidget(sldT)
hbox.addWidget(sldASL)
self.setLayout(hbox)
self.setGeometry(50, 50, 510, 510)
self.setWindowTitle('Attitude Indicator')
self.show()
def changeValue(self, value):
self.c.updateBW.emit(value)
self.wid.repaint()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| manojngb/Crazyfly_simple_lift | src/cfclient/ui/widgets/ai.py | Python | gpl-2.0 | 8,202 |
#
# Copyright (c) 2007, 2008 Agostino Russo
# Python port of wubi/disckimage/main.c by Hampus Wessman
#
# Written by Agostino Russo <agostino.russo@gmail.com>
#
# win32.ui is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# win32.ui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
Allocates disk space for the virtual disk
'''
import ctypes
from ctypes import c_long, byref
from winui import defs
import sys
import logging
log = logging.getLogger('Virtualdisk')
def create_virtual_disk(path, size_mb):
'''
Fast allocation of disk space
This is done by using the windows API
The initial and final block are zeroed
'''
log.debug(" Creating virtual disk %s of %sMB" % (path, size_mb))
clear_bytes = 1000000
if not size_mb or size_mb < 1:
return
# Get Permission
grant_privileges()
# Create file
file_handle = defs.CreateFileW(
unicode(path),
defs.GENERIC_READ | defs.GENERIC_WRITE,
0,
defs.NULL,
defs.CREATE_ALWAYS,
defs.FILE_ATTRIBUTE_NORMAL,
defs.NULL)
if file_handle == defs.INVALID_HANDLE_VALUE:
log.exception("Failed to create file %s" % path)
# Set pointer to end of file */
file_pos = defs.LARGE_INTEGER()
file_pos.QuadPart = size_mb*1024*1024
if not defs.SetFilePointerEx(file_handle, file_pos, 0, defs.FILE_BEGIN):
log.exception("Failed to set file pointer to end of file")
# Set end of file
if not defs.SetEndOfFile(file_handle):
log.exception("Failed to extend file. Not enough free space?")
# Set valid data (if possible), ignore errors
call_SetFileValidData(file_handle, file_pos)
# Set pointer to beginning of file
file_pos.QuadPart = 0
result = defs.SetFilePointerEx(
file_handle,
file_pos,
defs.NULL,
defs.FILE_BEGIN)
if not result:
log.exception("Failed to set file pointer to beginning of file")
# Zero chunk of file
zero_file(file_handle, clear_bytes)
# Set pointer to end - clear_bytes of file
file_pos.QuadPart = size_mb*1024*1024 - clear_bytes
result = defs.SetFilePointerEx(
file_handle,
file_pos,
defs.NULL,
defs.FILE_BEGIN)
if not result:
log.exception("Failed to set file pointer to end - clear_bytes of file")
# Zero file
zero_file(file_handle, clear_bytes)
defs.CloseHandle(file_handle)
def grant_privileges():
# For version < Windows NT, no privileges are involved
full_version = sys.getwindowsversion()
major, minor, build, platform, txt = full_version
if platform < 2:
log.debug("Skipping grant_privileges, because Windows 95/98/ME was detected")
return
# SetFileValidData() requires the SE_MANAGE_VOLUME_NAME privilege, so we must enable it
# on the process token. We don't attempt to strip the privilege afterward as that would
# introduce race conditions. */
handle = ctypes.c_long(0)
if defs.OpenProcessToken(defs.GetCurrentProcess(), defs.TOKEN_ADJUST_PRIVILEGES|defs.TOKEN_QUERY, byref(handle)):
luid = defs.LUID()
if defs.LookupPrivilegeValue(defs.NULL, defs.SE_MANAGE_VOLUME_NAME, byref(luid)):
tp = defs.TOKEN_PRIVILEGES()
tp.PrivilegeCount = 1
tp.Privileges[0].Luid = luid
tp.Privileges[0].Attributes = defs.SE_PRIVILEGE_ENABLED
if not defs.AdjustTokenPrivileges(handle, defs.FALSE, byref(tp), 0, defs.NULL, defs.NULL):
log.debug("grant_privileges: AdjustTokenPrivileges() failed.")
else:
log.debug("grant_privileges: LookupPrivilegeValue() failed.")
defs.CloseHandle(handle)
else:
log.debug("grant_privileges: OpenProcessToken() failed.")
def call_SetFileValidData(file_handle, size_bytes):
# No need, Windows 95/98/ME do this automatically anyway.
full_version = sys.getwindowsversion()
major, minor, build, platform, txt = full_version
if platform < 2:
log.debug("Skipping SetFileValidData, because Windows 95/98/ME was detected")
return
try:
SetFileValidData = ctypes.windll.kernel32.SetFileValidData
except:
log.debug("Could not load SetFileValidData.")
return
SetFileValidData(file_handle, size_bytes)
def zero_file(file_handle, clear_bytes):
bytes_cleared = 0
buf_size = 1000
n_bytes_written = c_long(0)
write_buf = "0"*buf_size
while bytes_cleared < clear_bytes:
bytes_to_write = buf_size
if (bytes_to_write > clear_bytes - bytes_cleared):
bytes_to_write = clear_bytes - bytes_cleared
result = defs.WriteFile(
file_handle,
write_buf,
bytes_to_write,
byref(n_bytes_written),
defs.NULL)
if not result or not n_bytes_written.value:
log.exception("WriteFile() failed!")
bytes_cleared += n_bytes_written.value
| vyrus/wubi | src/wubi/backends/win32/virtualdisk.py | Python | gpl-2.0 | 5,753 |
# -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pipes
import random
import re
from six import iteritems
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.template import Templar
from ansible.utils.boolean import boolean
from ansible.utils.unicode import to_unicode
__all__ = ['PlayContext']
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the PlayContext
# object. The dictionary values are tuples, to account for aliases
# in variable names.
MAGIC_VARIABLE_MAPPING = dict(
connection = ('ansible_connection',),
remote_addr = ('ansible_ssh_host', 'ansible_host'),
remote_user = ('ansible_ssh_user', 'ansible_user'),
port = ('ansible_ssh_port', 'ansible_port'),
password = ('ansible_ssh_pass', 'ansible_password'),
private_key_file = ('ansible_ssh_private_key_file', 'ansible_private_key_file'),
shell = ('ansible_shell_type',),
become = ('ansible_become',),
become_method = ('ansible_become_method',),
become_user = ('ansible_become_user',),
become_pass = ('ansible_become_password','ansible_become_pass'),
become_exe = ('ansible_become_exe',),
become_flags = ('ansible_become_flags',),
sudo = ('ansible_sudo',),
sudo_user = ('ansible_sudo_user',),
sudo_pass = ('ansible_sudo_password', 'ansible_sudo_pass'),
sudo_exe = ('ansible_sudo_exe',),
sudo_flags = ('ansible_sudo_flags',),
su = ('ansible_su',),
su_user = ('ansible_su_user',),
su_pass = ('ansible_su_password', 'ansible_su_pass'),
su_exe = ('ansible_su_exe',),
su_flags = ('ansible_su_flags',),
)
SU_PROMPT_LOCALIZATIONS = [
'Password',
'암호',
'パスワード',
'Adgangskode',
'Contraseña',
'Contrasenya',
'Hasło',
'Heslo',
'Jelszó',
'Lösenord',
'Mật khẩu',
'Mot de passe',
'Parola',
'Parool',
'Pasahitza',
'Passord',
'Passwort',
'Salasana',
'Sandi',
'Senha',
'Wachtwoord',
'ססמה',
'Лозинка',
'Парола',
'Пароль',
'गुप्तशब्द',
'शब्दकूट',
'సంకేతపదము',
'හස්පදය',
'密码',
'密碼',
]
TASK_ATTRIBUTE_OVERRIDES = (
'become',
'become_user',
'become_pass',
'become_method',
'connection',
'delegate_to',
'no_log',
'remote_user',
)
class PlayContext(Base):
'''
This class is used to consolidate the connection information for
hosts in a play and child tasks, where the task may override some
connection/authentication information.
'''
# connection fields, some are inherited from Base:
# (connection, port, remote_user, environment, no_log)
_remote_addr = FieldAttribute(isa='string')
_password = FieldAttribute(isa='string')
_private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
_timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
_shell = FieldAttribute(isa='string')
_ssh_extra_args = FieldAttribute(isa='string')
_connection_lockfd= FieldAttribute(isa='int')
# privilege escalation fields
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
_become_pass = FieldAttribute(isa='string')
_become_exe = FieldAttribute(isa='string')
_become_flags = FieldAttribute(isa='string')
_prompt = FieldAttribute(isa='string')
# backwards compatibility fields for sudo/su
_sudo_exe = FieldAttribute(isa='string')
_sudo_flags = FieldAttribute(isa='string')
_sudo_pass = FieldAttribute(isa='string')
_su_exe = FieldAttribute(isa='string')
_su_flags = FieldAttribute(isa='string')
_su_pass = FieldAttribute(isa='string')
# general flags
_verbosity = FieldAttribute(isa='int', default=0)
_only_tags = FieldAttribute(isa='set', default=set())
_skip_tags = FieldAttribute(isa='set', default=set())
_check_mode = FieldAttribute(isa='bool', default=False)
_force_handlers = FieldAttribute(isa='bool', default=False)
_start_at_task = FieldAttribute(isa='string')
_step = FieldAttribute(isa='bool', default=False)
_diff = FieldAttribute(isa='bool', default=False)
def __init__(self, play=None, options=None, passwords=None, connection_lockfd=None):
super(PlayContext, self).__init__()
if passwords is None:
passwords = {}
self.password = passwords.get('conn_pass','')
self.become_pass = passwords.get('become_pass','')
# a file descriptor to be used during locking operations
self.connection_lockfd = connection_lockfd
# set options before play to allow play to override them
if options:
self.set_options(options)
if play:
self.set_play(play)
def set_play(self, play):
'''
Configures this connection information instance with data from
the play class.
'''
if play.connection:
self.connection = play.connection
if play.remote_user:
self.remote_user = play.remote_user
if play.port:
self.port = int(play.port)
if play.become is not None:
self.become = play.become
if play.become_method:
self.become_method = play.become_method
if play.become_user:
self.become_user = play.become_user
# non connection related
self.no_log = play.no_log
if play.force_handlers is not None:
self.force_handlers = play.force_handlers
def set_options(self, options):
'''
Configures this connection information instance with data from
options specified by the user on the command line. These have a
lower precedence than those set on the play or host.
'''
if options.connection:
self.connection = options.connection
self.remote_user = options.remote_user
self.private_key_file = options.private_key_file
self.ssh_extra_args = options.ssh_extra_args
# privilege escalation
self.become = options.become
self.become_method = options.become_method
self.become_user = options.become_user
# general flags (should we move out?)
if options.verbosity:
self.verbosity = options.verbosity
#if options.no_log:
# self.no_log = boolean(options.no_log)
if options.check:
self.check_mode = boolean(options.check)
if hasattr(options, 'force_handlers') and options.force_handlers:
self.force_handlers = boolean(options.force_handlers)
if hasattr(options, 'step') and options.step:
self.step = boolean(options.step)
if hasattr(options, 'start_at_task') and options.start_at_task:
self.start_at_task = to_unicode(options.start_at_task)
if hasattr(options, 'diff') and options.diff:
self.diff = boolean(options.diff)
if hasattr(options, 'timeout') and options.timeout:
self.timeout = int(options.timeout)
# get the tag info from options, converting a comma-separated list
# of values into a proper list if need be. We check to see if the
# options have the attribute, as it is not always added via the CLI
if hasattr(options, 'tags'):
if isinstance(options.tags, list):
self.only_tags.update(options.tags)
elif isinstance(options.tags, basestring):
self.only_tags.update(options.tags.split(','))
if len(self.only_tags) == 0:
self.only_tags = set(['all'])
if hasattr(options, 'skip_tags'):
if isinstance(options.skip_tags, list):
self.skip_tags.update(options.skip_tags)
elif isinstance(options.skip_tags, basestring):
self.skip_tags.update(options.skip_tags.split(','))
def set_task_and_variable_override(self, task, variables):
'''
Sets attributes from the task if they are set, which will override
those from the play.
'''
new_info = self.copy()
# loop through a subset of attributes on the task object and set
# connection fields based on their values
for attr in TASK_ATTRIBUTE_OVERRIDES:
if hasattr(task, attr):
attr_val = getattr(task, attr)
if attr_val is not None:
setattr(new_info, attr, attr_val)
# next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
# connection info object with 'magic' variables from the variable list
for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING):
for variable_name in variable_names:
if variable_name in variables:
setattr(new_info, attr, variables[variable_name])
# make sure we get port defaults if needed
if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
new_info.port = int(C.DEFAULT_REMOTE_PORT)
# become legacy updates
if not new_info.become_pass:
if new_info.become_method == 'sudo' and new_info.sudo_pass:
setattr(new_info, 'become_pass', new_info.sudo_pass)
elif new_info.become_method == 'su' and new_info.su_pass:
setattr(new_info, 'become_pass', new_info.su_pass)
# finally, in the special instance that the task was specified
# as a local action, override the connection in case it was changed
# during some other step in the process
if task._local_action:
setattr(new_info, 'connection', 'local')
return new_info
def make_become_cmd(self, cmd, executable=None):
""" helper function to create privilege escalation commands """
prompt = None
success_key = None
self.prompt = None
if executable is None:
executable = C.DEFAULT_EXECUTABLE
if self.become:
becomecmd = None
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
# set executable to use for the privilege escalation method, with various overrides
exe = self.become_exe or \
getattr(self, '%s_exe' % self.become_method, None) or \
C.DEFAULT_BECOME_EXE or \
getattr(C, 'DEFAULT_%s_EXE' % self.become_method.upper(), None) or \
self.become_method
# set flags to use for the privilege escalation method, with various overrides
flags = self.become_flags or \
getattr(self, '%s_flags' % self.become_method, None) or \
C.DEFAULT_BECOME_FLAGS or \
getattr(C, 'DEFAULT_%s_FLAGS' % self.become_method.upper(), None) or \
''
if self.become_method == 'sudo':
# Rather than detect if sudo wants a password this time, -k makes sudo always ask for
# a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
# string to the user's shell. We loop reading output until we see the randomly-generated
# sudo prompt set with the -p option.
# force quick error if password is required but not supplied, should prevent sudo hangs.
if self.become_pass:
prompt = '[sudo via ansible, key=%s] password: ' % randbits
becomecmd = '%s %s -p "%s" -S -u %s %s -c %s' % (exe, flags, prompt, self.become_user, executable, success_cmd)
else:
becomecmd = '%s %s -n -S -u %s %s -c %s' % (exe, flags, self.become_user, executable, success_cmd)
elif self.become_method == 'su':
def detect_su_prompt(data):
SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE)
return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
prompt = detect_su_prompt
becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd)
elif self.become_method == 'pbrun':
prompt='assword:'
becomecmd = '%s -b %s -u %s %s' % (exe, flags, self.become_user, success_cmd)
elif self.become_method == 'pfexec':
# No user as it uses it's own exec_attr to figure it out
becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
elif self.become_method == 'runas':
raise AnsibleError("'runas' is not yet implemented")
#TODO: figure out prompt
# this is not for use with winrm plugin but if they ever get ssh native on windoez
becomecmd = '%s %s /user:%s "%s"' % (exe, flags, self.become_user, success_cmd)
elif self.become_method == 'doas':
prompt = 'Password:'
exe = self.become_exe or 'doas'
if not self.become_pass:
flags += ' -n '
if self.become_user:
flags += ' -u %s ' % self.become_user
becomecmd = '%s %s echo %s && %s %s env ANSIBLE=true %s' % (exe, flags, success_key, exe, flags, cmd)
else:
raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
if self.become_pass:
self.prompt = prompt
self.success_key = success_key
return ('%s -c %s' % (executable, pipes.quote(becomecmd)))
return cmd
def update_vars(self, variables):
'''
Adds 'magic' variables relating to connections to the variable dictionary provided.
In case users need to access from the play, this is a legacy from runner.
'''
#FIXME: remove password? possibly add become/sudo settings
for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file']:
if special_var not in variables:
for prop, varnames in MAGIC_VARIABLE_MAPPING.items():
if special_var in varnames:
variables[special_var] = getattr(self, prop)
| mcsalgado/ansible | lib/ansible/playbook/play_context.py | Python | gpl-3.0 | 16,111 |
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import mergeDicts, randomString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Library
import copy
import traceback
log = CPLog(__name__)
class MovieResultModifier(Plugin):
default_info = {
'tmdb_id': 0,
'titles': [],
'original_title': '',
'year': 0,
'images': {
'poster': [],
'backdrop': [],
'poster_original': [],
'backdrop_original': [],
'actors': {}
},
'runtime': 0,
'plot': '',
'tagline': '',
'imdb': '',
'genres': [],
'mpaa': None,
'actors': [],
'actor_roles': {}
}
def __init__(self):
addEvent('result.modify.info.search', self.returnByType)
addEvent('result.modify.movie.search', self.combineOnIMDB)
addEvent('result.modify.movie.info', self.checkLibrary)
def returnByType(self, results):
new_results = {}
for r in results:
type_name = r.get('type', 'movie') + 's'
if type_name not in new_results:
new_results[type_name] = []
new_results[type_name].append(r)
# Combine movies, needs a cleaner way..
if 'movies' in new_results:
new_results['movies'] = self.combineOnIMDB(new_results['movies'])
return new_results
def combineOnIMDB(self, results):
temp = {}
order = []
# Combine on imdb id
for item in results:
random_string = randomString()
imdb = item.get('imdb', random_string)
imdb = imdb if imdb else random_string
if not temp.get(imdb):
temp[imdb] = self.getLibraryTags(imdb)
order.append(imdb)
# Merge dicts
temp[imdb] = mergeDicts(temp[imdb], item)
# Make it a list again
temp_list = [temp[x] for x in order]
return temp_list
def getLibraryTags(self, imdb):
temp = {
'in_wanted': False,
'in_library': False,
}
# Add release info from current library
db = get_session()
try:
l = db.query(Library).filter_by(identifier = imdb).first()
if l:
# Statuses
active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True)
for movie in l.movies:
if movie.status_id == active_status['id']:
temp['in_wanted'] = fireEvent('media.get', movie.id, single = True)
for release in movie.releases:
if release.status_id == done_status['id']:
temp['in_library'] = fireEvent('media.get', movie.id, single = True)
except:
log.error('Tried getting more info on searched movies: %s', traceback.format_exc())
return temp
def checkLibrary(self, result):
result = mergeDicts(copy.deepcopy(self.default_info), copy.deepcopy(result))
if result and result.get('imdb'):
return mergeDicts(result, self.getLibraryTags(result['imdb']))
return result
| tmxdyf/CouchPotatoServer | couchpotato/core/providers/info/_modifier/main.py | Python | gpl-3.0 | 3,402 |
# Name: controls.py
# Purpose: Control components
# Author: Roman Rolinsky <rolinsky@femagsoft.com>
# Created: 31.05.2007
# RCS-ID: $Id: core.py 47823 2007-07-29 19:24:35Z ROL $
from wx.tools.XRCed import component, images, attribute, params
from wx.tools.XRCed.globals import TRACE
import _bitmaps as bitmaps
TRACE('*** creating control components')
# Set panel images
component.Manager.panelImages['Controls'] = images.ToolPanel_Controls.GetImage()
### wxStaticText
c = component.Component('wxStaticText', ['control','tool'],
['pos', 'size', 'label', 'wrap'], defaults={'label': 'LABEL'},
image=images.TreeStaticText.GetImage())
c.addStyles('wxALIGN_LEFT', 'wxALIGN_RIGHT', 'wxALIGN_CENTRE', 'wxST_NO_AUTORESIZE')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'label', 'wxStaticText', 10)
component.Manager.setTool(c, 'Controls', pos=(0,0))
### wxStaticLine
c = component.Component('wxStaticLine', ['control','tool'],
['pos', 'size'], image=images.TreeStaticLine.GetImage())
c.addStyles('wxLI_HORIZONTAL', 'wxLI_VERTICAL')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'line', 'wxStaticLine', 20)
component.Manager.setTool(c, 'Controls', pos=(0,3))
### wxStaticBitmap
c = component.Component('wxStaticBitmap', ['control','tool'],
['pos', 'size', 'bitmap'],
image=images.TreeStaticBitmap.GetImage())
c.setSpecial('bitmap', attribute.BitmapAttribute)
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'bitmap', 'wxStaticLine', 30)
component.Manager.setTool(c, 'Controls', pos=(1,0))
### wxTextCtrl
c = component.Component('wxTextCtrl', ['control','tool'],
['pos', 'size', 'value'],
image=images.TreeTextCtrl.GetImage())
c.addStyles('wxTE_NO_VSCROLL',
'wxTE_AUTO_SCROLL',
'wxTE_PROCESS_ENTER',
'wxTE_PROCESS_TAB',
'wxTE_MULTILINE',
'wxTE_PASSWORD',
'wxTE_READONLY',
'wxHSCROLL',
'wxTE_RICH',
'wxTE_RICH2',
'wxTE_AUTO_URL',
'wxTE_NOHIDESEL',
'wxTE_LEFT',
'wxTE_CENTRE',
'wxTE_RIGHT',
'wxTE_DONTWRAP',
'wxTE_LINEWRAP',
'wxTE_CHARWRAP',
'wxTE_WORDWRAP')
c.setParamClass('value', params.ParamMultilineText)
c.addEvents('EVT_TEXT', 'EVT_TEXT_ENTER', 'EVT_TEXT_URL', 'EVT_TEXT_MAXLEN')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'text ctrl', 'wxTextCtrl', 40)
component.Manager.setTool(c, 'Controls', pos=(0,2))
### wxChoice
c = component.Component('wxChoice', ['control','tool'],
['pos', 'size', 'content', 'selection'],
image=images.TreeChoice.GetImage())
c.addStyles('wxCB_SORT')
c.setSpecial('content', attribute.ContentAttribute)
c.addEvents('EVT_CHOICE')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'choice', 'wxChoice', 50)
component.Manager.setTool(c, 'Controls', pos=(3,2))
### wxSlider
c = component.Component('wxSlider', ['control','tool'],
['pos', 'size', 'value', 'min', 'max',
'tickfreq', 'pagesize', 'linesize', 'thumb', 'tick',
'selmin', 'selmax'],
image=images.TreeSlider.GetImage())
c.addStyles('wxSL_HORIZONTAL', 'wxSL_VERTICAL', 'wxSL_AUTOTICKS', 'wxSL_LABELS',
'wxSL_LEFT', 'wxSL_RIGHT', 'wxSL_TOP', 'wxSL_BOTTOM',
'wxSL_BOTH', 'wxSL_SELRANGE', 'wxSL_INVERSE')
component.Manager.register(c)
c.setParamClass('value', params.ParamInt)
c.setParamClass('tickfreq', params.ParamIntNN)
c.setParamClass('pagesize', params.ParamIntNN)
c.setParamClass('linesize', params.ParamIntNN)
c.setParamClass('thumb', params.ParamUnit)
c.setParamClass('tick', params.ParamInt)
c.setParamClass('selmin', params.ParamInt)
c.setParamClass('selmax', params.ParamInt)
c.addEvents('EVT_SCROLL', 'EVT_SCROLL_TOP', 'EVT_SCROLL_BOTTOM',
'EVT_SCROLL_LINEUP', 'EVT_SCROLL_LINEDOWN', 'EVT_SCROLL_PAGEUP',
'EVT_SCROLL_PAGEDOWN', 'EVT_SCROLL_THUMBTRACK', 'EVT_SCROLL_THUMBRELEASE',
'EVT_SCROLL_CHANGED', 'EVT_SCROLL', 'EVT_SCROLL_TOP',
'EVT_SCROLL_BOTTOM', 'EVT_SCROLL_LINEUP',
'EVT_SCROLL_LINEDOWN', 'EVT_SCROLL_PAGEUP',
'EVT_SCROLL_PAGEDOWN', 'EVT_SCROLL_THUMBTRACK',
'EVT_SCROLL_THUMBRELEASE', 'EVT_SCROLL_CHANGED')
component.Manager.setMenu(c, 'control', 'slider', 'wxSlider', 60)
component.Manager.setTool(c, 'Controls', pos=(2,3))
### wxGauge
c = component.Component('wxGauge', ['control','tool'],
['pos', 'size', 'range', 'value', 'shadow', 'bezel'],
image=images.TreeGauge.GetImage())
c.addStyles('wxGA_HORIZONTAL', 'wxGA_VERTICAL', 'wxGA_PROGRESSBAR', 'wxGA_SMOOTH')
c.setParamClass('range', params.ParamIntNN)
c.setParamClass('value', params.ParamIntNN)
c.setParamClass('shadow', params.ParamUnit)
c.setParamClass('bezel', params.ParamUnit)
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'gauge', 'wxGauge', 70)
component.Manager.setTool(c, 'Controls', pos=(1,3))
### wxSpinCtrl
c = component.Component('wxSpinCtrl', ['control','tool'],
['pos', 'size', 'value', 'min', 'max'],
image=images.TreeSpinCtrl.GetImage())
c.addStyles('wxSP_HORIZONTAL', 'wxSP_VERTICAL', 'wxSP_ARROW_KEYS', 'wxSP_WRAP')
c.setParamClass('value', params.ParamInt)
c.addEvents('EVT_SPINCTRL')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'spin ctrl', 'wxSpinCtrl', 80)
component.Manager.setTool(c, 'Controls', pos=(1,2))
### wxScrollBar
c = component.Component('wxScrollBar', ['control'],
['pos', 'size', 'value', 'thumbsize', 'range', 'pagesize'],
image=images.TreeScrollBar.GetImage())
c.addStyles('wxSB_HORIZONTAL', 'wxSB_VERTICAL')
c.setParamClass('range', params.ParamIntNN)
c.setParamClass('value', params.ParamIntNN)
c.setParamClass('thumbsize', params.ParamUnit)
c.setParamClass('pagesize', params.ParamUnit)
c.addEvents('EVT_SCROLL', 'EVT_SCROLL_TOP', 'EVT_SCROLL_BOTTOM',
'EVT_SCROLL_LINEUP', 'EVT_SCROLL_LINEDOWN', 'EVT_SCROLL_PAGEUP',
'EVT_SCROLL_PAGEDOWN', 'EVT_SCROLL_THUMBTRACK', 'EVT_SCROLL_THUMBRELEASE',
'EVT_SCROLL_CHANGED', 'EVT_SCROLL', 'EVT_SCROLL_TOP',
'EVT_SCROLL_BOTTOM', 'EVT_SCROLL_LINEUP',
'EVT_SCROLL_LINEDOWN', 'EVT_SCROLL_PAGEUP',
'EVT_SCROLL_PAGEDOWN', 'EVT_SCROLL_THUMBTRACK',
'EVT_SCROLL_THUMBRELEASE', 'EVT_SCROLL_CHANGED')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'scroll bar', 'wxScrollBar', 90)
component.Manager.setTool(c, 'Controls', pos=(3,3))
### wxListCtrl
c = component.Component('wxListCtrl', ['control','tool'], ['pos', 'size'],
image=images.TreeListCtrl.GetImage())
c.addStyles('wxLC_LIST', 'wxLC_REPORT', 'wxLC_ICON', 'wxLC_SMALL_ICON',
'wxLC_ALIGN_TOP', 'wxLC_ALIGN_LEFT', 'wxLC_AUTOARRANGE',
'wxLC_USER_TEXT', 'wxLC_EDIT_LABELS', 'wxLC_NO_HEADER',
'wxLC_SINGLE_SEL', 'wxLC_SORT_ASCENDING', 'wxLC_SORT_DESCENDING',
'wxLC_VIRTUAL', 'wxLC_HRULES', 'wxLC_VRULES', 'wxLC_NO_SORT_HEADER')
c.addEvents('EVT_LIST_BEGIN_DRAG',
'EVT_LIST_BEGIN_RDRAG',
'EVT_LIST_BEGIN_LABEL_EDIT',
'EVT_LIST_END_LABEL_EDIT',
'EVT_LIST_DELETE_ITEM',
'EVT_LIST_DELETE_ALL_ITEMS',
'EVT_LIST_ITEM_SELECTED',
'EVT_LIST_ITEM_DESELECTED',
'EVT_LIST_KEY_DOWN',
'EVT_LIST_INSERT_ITEM',
'EVT_LIST_COL_CLICK',
'EVT_LIST_ITEM_RIGHT_CLICK',
'EVT_LIST_ITEM_MIDDLE_CLICK',
'EVT_LIST_ITEM_ACTIVATED',
'EVT_LIST_CACHE_HINT',
'EVT_LIST_COL_RIGHT_CLICK',
'EVT_LIST_COL_BEGIN_DRAG',
'EVT_LIST_COL_DRAGGING',
'EVT_LIST_COL_END_DRAG',
'EVT_LIST_ITEM_FOCUSED')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'list ctrl', 'wxListCtrl', 100)
component.Manager.setTool(c, 'Panels', pos=(0,1))
### wxTreeCtrl
c = component.Component('wxTreeCtrl', ['control','tool'], ['pos', 'size'],
image=images.TreeTreeCtrl.GetImage())
c.addStyles('wxTR_EDIT_LABELS',
'wxTR_NO_BUTTONS',
'wxTR_HAS_BUTTONS',
'wxTR_TWIST_BUTTONS',
'wxTR_NO_LINES',
'wxTR_FULL_ROW_HIGHLIGHT',
'wxTR_LINES_AT_ROOT',
'wxTR_HIDE_ROOT',
'wxTR_ROW_LINES',
'wxTR_HAS_VARIABLE_ROW_HEIGHT',
'wxTR_SINGLE',
'wxTR_MULTIPLE',
'wxTR_EXTENDED',
'wxTR_DEFAULT_STYLE')
c.addEvents('EVT_TREE_BEGIN_DRAG',
'EVT_TREE_BEGIN_RDRAG',
'EVT_TREE_BEGIN_LABEL_EDIT',
'EVT_TREE_END_LABEL_EDIT',
'EVT_TREE_DELETE_ITEM',
'EVT_TREE_GET_INFO',
'EVT_TREE_SET_INFO',
'EVT_TREE_ITEM_EXPANDED',
'EVT_TREE_ITEM_EXPANDING',
'EVT_TREE_ITEM_COLLAPSED',
'EVT_TREE_ITEM_COLLAPSING',
'EVT_TREE_SEL_CHANGED',
'EVT_TREE_SEL_CHANGING',
'EVT_TREE_KEY_DOWN',
'EVT_TREE_ITEM_ACTIVATED',
'EVT_TREE_ITEM_RIGHT_CLICK',
'EVT_TREE_ITEM_MIDDLE_CLICK',
'EVT_TREE_END_DRAG',
'EVT_TREE_STATE_IMAGE_CLICK',
'EVT_TREE_ITEM_GETTOOLTIP',
'EVT_TREE_ITEM_MENU')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'tree ctrl', 'wxTreeCtrl', 110)
component.Manager.setTool(c, 'Panels', pos=(0,2))
### wxHtmlWindow
c = component.Component('wxHtmlWindow', ['control'],
['pos', 'size', 'borders', 'url', 'htmlcode'])
c.addStyles('wxHW_SCROLLBAR_NEVER', 'wxHW_SCROLLBAR_AUTO', 'wxHW_NO_SELECTION')
c.setParamClass('url', params.ParamLongText)
c.setParamClass('htmlcode', params.ParamMultilineText)
c.addEvents('EVT_HTML_CELL_CLICKED', 'EVT_HTML_CELL_HOVER',
'EVT_HTML_LINK_CLICKED')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'HTML window', 'wxHtmlWindow', 120)
### wxCalendarCtrl
c = component.Component('wxCalendarCtrl', ['control', 'tool'], ['pos', 'size'])
c.addStyles('wxCAL_SUNDAY_FIRST', 'wxCAL_MONDAY_FIRST', 'wxCAL_SHOW_HOLIDAYS',
'wxCAL_NO_YEAR_CHANGE', 'wxCAL_NO_MONTH_CHANGE',
'wxCAL_SEQUENTIAL_MONTH_SELECTION', 'wxCAL_SHOW_SURROUNDING_WEEKS')
c.addEvents('EVT_CALENDAR_SEL_CHANGED', 'EVT_CALENDAR_DAY_CHANGED',
'EVT_CALENDAR_MONTH_CHANGED', 'EVT_CALENDAR_YEAR_CHANGED',
'EVT_CALENDAR_DOUBLECLICKED', 'EVT_CALENDAR_WEEKDAY_CLICKED')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'calendar ctrl', 'wxCalendarCtrl', 130)
### wxGenericDirCtrl
c = component.Component('wxGenericDirCtrl', ['control'],
['pos', 'size', 'defaultfolder', 'filter', 'defaultfilter'])
c.addStyles('wxDIRCTRL_DIR_ONLY', 'wxDIRCTRL_3D_INTERNAL', 'wxDIRCTRL_SELECT_FIRST',
'wxDIRCTRL_SHOW_FILTERS', 'wxDIRCTRL_EDIT_LABELS')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'generic dir ctrl', 'wxGenericDirCtrl', 160)
### wxFilePickerCtrl
c = component.Component('wxFilePickerCtrl', ['control'],
['pos', 'size', 'value', 'message', 'wildcard'])
c.addStyles('wxFLP_OPEN', 'wxFLP_SAVE', 'wxFLP_OVERWRITE_PROMPT',
'wxFLP_FILE_MUST_EXIST', 'wxFLP_CHANGE_DIR',
'wxFLP_DEFAULT_STYLE')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'file picker ctrl', 'wxFilePickerCtrl', 170)
component.Manager.setTool(c, 'Controls', pos=(4,2))
### wxDatePickerCtrl
c = component.Component('wxDatePickerCtrl', ['control'], ['pos', 'size', 'borders'])
c.addStyles('wxDP_DEFAULT', 'wxDP_SPIN', 'wxDP_DROPDOWN',
'wxDP_ALLOWNONE', 'wxDP_SHOWCENTURY')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'date picker ctrl', 'wxDateCtrl', 180)
### wxGrid
c = component.Component('wxGrid', ['control'], ['pos', 'size'])
c.addEvents('EVT_GRID_CELL_LEFT_CLICK',
'EVT_GRID_CELL_RIGHT_CLICK',
'EVT_GRID_CELL_LEFT_DCLICK',
'EVT_GRID_CELL_RIGHT_DCLICK',
'EVT_GRID_LABEL_LEFT_CLICK',
'EVT_GRID_LABEL_RIGHT_CLICK',
'EVT_GRID_LABEL_LEFT_DCLICK',
'EVT_GRID_LABEL_RIGHT_DCLICK',
'EVT_GRID_ROW_SIZE',
'EVT_GRID_COL_SIZE',
'EVT_GRID_RANGE_SELECT',
'EVT_GRID_CELL_CHANGE',
'EVT_GRID_SELECT_CELL',
'EVT_GRID_EDITOR_SHOWN',
'EVT_GRID_EDITOR_HIDDEN',
'EVT_GRID_EDITOR_CREATED',
'EVT_GRID_CELL_BEGIN_DRAG')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'grid', 'wxGrid', 190)
component.Manager.setTool(c, 'Panels', pos=(2,1), span=(1,2))
### wxHyperlinkCtrl
c = component.Component('wxHyperlinkCtrl', ['control','tool'],
['pos', 'size', 'label', 'url'],
params={'url': params.ParamText},
defaults={'url': 'http://'})
c.addStyles('wxHL_CONTEXTMENU', 'wxHL_ALIGN_LEFT', 'wxHL_ALIGN_RIGHT',
'wxHL_ALIGN_CENTRE', 'wxHL_DEFAULT_STYLE')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'hyperlink', 'wxHyperlinkCtrl', 200)
component.Manager.setTool(c, 'Controls', pos=(3,0))
################################################################################
# Buttons
### wxButton
c = component.Component('wxButton', ['control', 'tool', 'stdbtn'],
['pos', 'size', 'label', 'default'],
image=images.TreeButton.GetImage())
c.addStyles('wxBU_LEFT', 'wxBU_TOP', 'wxBU_RIGHT', 'wxBU_BOTTOM', 'wxBU_EXACTFIT',
'wxNO_BORDER')
c.setParamClass('default', params.ParamBool)
c.addEvents('EVT_BUTTON')
component.Manager.register(c)
component.Manager.setMenu(c, 'button', 'button', 'wxButton', 10)
component.Manager.setTool(c, 'Controls', pos=(0,1))
### wxBitmapButton
c = component.Component('wxBitmapButton', ['control', 'tool'],
['pos', 'size', 'default',
'bitmap', 'selected', 'focus', 'disabled', 'hover'],
image=images.TreeBitmapButton.GetImage())
c.addStyles('wxBU_AUTODRAW', 'wxBU_LEFT', 'wxBU_RIGHT', 'wxBU_TOP', 'wxBU_BOTTOM',
'wxBU_EXACTFIT')
c.setParamClass('default', params.ParamBool)
c.setSpecial('bitmap', attribute.BitmapAttribute)
c.setSpecial('selected', attribute.BitmapAttribute)
c.setParamClass('selected', params.ParamBitmap)
c.setSpecial('focus', attribute.BitmapAttribute)
c.setParamClass('focus', params.ParamBitmap)
c.setSpecial('disabled', attribute.BitmapAttribute)
c.setParamClass('disabled', params.ParamBitmap)
c.setSpecial('hover', attribute.BitmapAttribute)
c.setParamClass('hover', params.ParamBitmap)
c.addEvents('EVT_BUTTON')
component.Manager.register(c)
component.Manager.setMenu(c, 'button', 'bitmap button', 'wxBitmapButton', 20)
component.Manager.setTool(c, 'Controls', pos=(1,1))
### wxRadioButton
c = component.Component('wxRadioButton', ['control', 'tool'],
['pos', 'size', 'label', 'value'],
image=images.TreeRadioButton.GetImage())
c.addStyles('wxRB_GROUP', 'wxRB_SINGLE')
c.setParamClass('value', params.ParamBool)
c.addEvents('EVT_RADIOBUTTON')
component.Manager.register(c)
component.Manager.setMenu(c, 'button', 'radio button', 'wxRadioButton', 30)
component.Manager.setTool(c, 'Controls', pos=(3,1))
### wxSpinButton
c = component.Component('wxSpinButton', ['control', 'tool'],
['pos', 'size', 'value', 'min', 'max'],
image=images.TreeSpinButton.GetImage())
c.addStyles('wxSP_HORIZONTAL', 'wxSP_VERTICAL', 'wxSP_ARROW_KEYS', 'wxSP_WRAP')
c.addEvents('EVT_SPIN', 'EVT_SPIN_UP', 'EVT_SPIN_DOWN')
component.Manager.register(c)
component.Manager.setMenu(c, 'button', 'spin button', 'wxSpinButton', 40)
component.Manager.setTool(c, 'Controls', pos=(2,0))
### wxToggleButton
c = component.Component('wxToggleButton', ['control', 'tool'],
['pos', 'size', 'label', 'checked'],
image=images.TreeToggleButton.GetImage())
c.addEvents('EVT_TOGGLEBUTTON')
component.Manager.register(c)
component.Manager.setMenu(c, 'button', 'toggle button', 'wxToggleButton', 50)
component.Manager.setTool(c, 'Controls', pos=(2,1))
################################################################################
# Boxes
### wxCheckBox
c = component.Component('wxCheckBox', ['control','tool'],
['pos', 'size', 'label', 'checked'],
image=images.TreeCheckBox.GetImage())
c.addEvents('EVT_CHECKBOX')
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'check box', 'wxCheckBox', 10)
component.Manager.setTool(c, 'Controls', pos=(4,1))
### wxComboBox
c = component.Component('wxComboBox', ['control','tool'],
['pos', 'size', 'content', 'selection', 'value'],
image=images.TreeComboBox.GetImage())
c.addStyles('wxCB_SINGLE', 'wxCB_DROPDOWN', 'wxCB_READONLY',
'wxCB_SORT', 'wxTE_PROCESS_ENTER')
c.setSpecial('content', attribute.ContentAttribute)
c.addEvents('EVT_COMBOBOX', 'EVT_TEXT', 'EVT_TEXT_ENTER')
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'combo box', 'wxComboBox', 20)
component.Manager.setTool(c, 'Controls', pos=(2,2))
### wxRadioBox
c = component.Component('wxRadioBox', ['control','tool'],
['pos', 'size', 'label', 'dimension',
'content', 'selection', 'dimension'])
c.addStyles('wxRA_SPECIFY_ROWS', 'wxRA_SPECIFY_COLS')
c.setSpecial('content', attribute.ContentAttribute)
c.setParamClass('dimension', params.ParamInt)
c.addEvents('EVT_RADIOBOX')
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'radio box', 'wxRadioBox', 30)
#component.Manager.setTool(c, 'Panels')
### wxListBox
c = component.Component('wxListBox', ['control','tool'],
['pos', 'size', 'content', 'selection'],
image=images.TreeListBox.GetImage())
c.addStyles('wxLB_SINGLE', 'wxLB_MULTIPLE', 'wxLB_EXTENDED', 'wxLB_HSCROLL',
'wxLB_ALWAYS_SB', 'wxLB_NEEDED_SB', 'wxLB_SORT')
c.setSpecial('content', attribute.ContentAttribute)
c.addEvents('EVT_LISTBOX', 'EVT_LISTBOX_DCLICK')
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'list box', 'wxListBox', 40)
component.Manager.setTool(c, 'Panels', pos=(0,0))
### wxCheckListBox
c = component.Component('wxCheckListBox', ['control','tool'],
['pos', 'size', 'content', 'selection'])
c.addStyles('wxLB_SINGLE', 'wxLB_MULTIPLE', 'wxLB_EXTENDED', 'wxLB_HSCROLL',
'wxLB_ALWAYS_SB', 'wxLB_NEEDED_SB', 'wxLB_SORT')
c.setSpecial('content', attribute.CheckContentAttribute)
c.setParamClass('content', params.ParamContentCheckList)
c.addEvents('EVT_CHECKLISTBOX')
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'check list box', 'wxCheckListBox', 50)
#component.Manager.setTool(c, 'Panels', pos=(0,0))
### wxStaticBox
c = component.Component('wxStaticBox', ['control','tool'],
['pos', 'size', 'label'],
image=images.TreeStaticBox.GetImage())
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'static box', 'wxStaticBox', 60)
component.Manager.setTool(c, 'Panels', pos=(2,0))
### unknown
c = component.Component('unknown', ['control'], ['pos', 'size'])
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'unknown', 'unknown control')
### wxXXX
#c = component.Component('wxXXX', ['control','tool'],
# ['pos', 'size', ...])
#c.addStyles(...)
#component.Manager.register(c)
#component.Manager.setMenu(c, 'control', 'XXX', 'wxXXX', NN)
| 163gal/Time-Line | libs64/wx/tools/XRCed/plugins/controls.py | Python | gpl-3.0 | 19,978 |
import os
import sys
import shutil
import unittest
import tempfile
import time
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "ms3"))
from ms3.testing import MS3Server
from s3ffs_server import s3ffsServer, wait_until
class AmazonTestCase(unittest.TestCase):
def setUp(self):
self.local = tempfile.mkdtemp()
self.s3ffs = None
# In case there occurs an exception during setUp(), unittest
# doesn't call tearDown(), hence we need to make sure we don't
# leave any server processes running.
try:
self.s3ffs = s3ffsServer("s3ffs-us", mountpoint=self.local).start()
except Exception:
self.tearDown()
raise
def tearDown(self):
if self.s3ffs:
self.s3ffs.stop()
shutil.rmtree(self.local, True)
def test_mounted(self):
self.assertTrue(os.path.ismount(self.local))
def test_single_file(self):
content = "Hello, world!"
path = os.path.join(self.local, "file.txt")
with open(path, "w") as f:
f.write(content)
wait_until(os.path.exists, path)
self.assertTrue(open(path).read(), content)
if __name__ == "__main__":
unittest.main()
| skoobe/riofs | tests/test_amazon.py | Python | gpl-3.0 | 1,249 |
"""Common utility for testing third party oauth2 features."""
import json
from base64 import b64encode
import httpretty
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from oauth2_provider.models import Application
from social_core.backends.facebook import API_VERSION as FACEBOOK_API_VERSION
from social_core.backends.facebook import FacebookOAuth2
from social_django.models import Partial, UserSocialAuth
from student.tests.factories import UserFactory
from .testutil import ThirdPartyAuthTestMixin
@httpretty.activate
class ThirdPartyOAuthTestMixin(ThirdPartyAuthTestMixin):
"""
Mixin with tests for third party oauth views. A TestCase that includes
this must define the following:
BACKEND: The name of the backend from python-social-auth
USER_URL: The URL of the endpoint that the backend retrieves user data from
UID_FIELD: The field in the user data that the backend uses as the user id
"""
social_uid = "test_social_uid"
access_token = "test_access_token"
client_id = "test_client_id"
CREATE_USER = True
def setUp(self):
super(ThirdPartyOAuthTestMixin, self).setUp()
if self.CREATE_USER:
self.user = UserFactory()
UserSocialAuth.objects.create(user=self.user, provider=self.BACKEND, uid=self.social_uid)
self.oauth_client = self._create_client()
if self.BACKEND == 'google-oauth2':
self.configure_google_provider(enabled=True, visible=True)
elif self.BACKEND == 'facebook':
self.configure_facebook_provider(enabled=True, visible=True)
def tearDown(self):
super(ThirdPartyOAuthTestMixin, self).tearDown()
Partial.objects.all().delete()
def _create_client(self):
"""
Create an OAuth2 client application
"""
return Application.objects.create(
client_id=self.client_id,
client_type=Application.CLIENT_PUBLIC,
)
def _setup_provider_response(self, success=False, email=''):
"""
Register a mock response for the third party user information endpoint;
success indicates whether the response status code should be 200 or 400
"""
if success:
status = 200
response = {self.UID_FIELD: self.social_uid}
if email:
response.update({'email': email})
body = json.dumps(response)
else:
status = 400
body = json.dumps({})
self._setup_provider_response_with_body(status, body)
def _setup_provider_response_with_body(self, status, body):
"""
Register a mock response for the third party user information endpoint with given status and body.
"""
httpretty.register_uri(
httpretty.GET,
self.USER_URL,
body=body,
status=status,
content_type="application/json",
)
class ThirdPartyOAuthTestMixinFacebook(object):
"""Tests oauth with the Facebook backend"""
BACKEND = "facebook"
USER_URL = FacebookOAuth2.USER_DATA_URL.format(version=FACEBOOK_API_VERSION)
# In facebook responses, the "id" field is used as the user's identifier
UID_FIELD = "id"
class ThirdPartyOAuthTestMixinGoogle(object):
"""Tests oauth with the Google backend"""
BACKEND = "google-oauth2"
USER_URL = "https://www.googleapis.com/oauth2/v3/userinfo"
# In google-oauth2 responses, the "email" field is used as the user's identifier
UID_FIELD = "email"
def read_and_pre_process_xml(file_name):
"""
Read XML file with the name specified in the argument and pre process the xml so that it can be parsed.
Pre Processing removes line retune characters (i.e. "\n").
Arguments:
file_name (str): Name of the XML file.
Returns:
(str): Pre Processed contents of the file.
"""
with open(file_name, 'r') as xml_file:
return xml_file.read().replace('\n', '')
def prepare_saml_response_from_xml(xml, relay_state='testshib'):
"""
Pre Process XML so that it can be used as a SAML Response coming from SAML IdP.
This method will perform the following operations on the XML in given order
1. base64 encode XML.
2. URL encode the base64 encoded data.
Arguments:
xml (string): XML data
relay_state (string): Relay State of the SAML Response
Returns:
(str): Base64 and URL encoded XML.
"""
b64encoded_xml = b64encode(xml.encode())
return 'RelayState={relay_state}&SAMLResponse={saml_response}'.format(
relay_state=OneLogin_Saml2_Utils.escape_url(relay_state),
saml_response=OneLogin_Saml2_Utils.escape_url(b64encoded_xml)
)
| edx-solutions/edx-platform | common/djangoapps/third_party_auth/tests/utils.py | Python | agpl-3.0 | 4,741 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Project.slug'
db.alter_column('teams_project', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=50, blank=True))
# Changing field 'Task.assignee'
db.alter_column('teams_task', 'assignee_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.CustomUser'], null=True, blank=True))
def backwards(self, orm):
# Changing field 'Project.slug'
db.alter_column('teams_project', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=50))
# Changing field 'Task.assignee'
db.alter_column('teams_task', 'assignee_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['teams.TeamMember'], null=True, blank=True))
models = {
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'follow_new_video': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Application'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.invite': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Invite'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '200', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invitations'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_invitations'", 'to': "orm['auth.CustomUser']"})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"})
},
'teams.task': {
'Meta': {'object_name': 'Task'},
'assignee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '16'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'completed_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.SubtitleLanguage']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'teams.teamvideolanguage': {
'Meta': {'unique_together': "(('team_video', 'subtitle_language'),)", 'object_name': 'TeamVideoLanguage'},
'forked': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'is_lingua_franca': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'subtitle_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'languages'", 'to': "orm['teams.TeamVideo']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'teams.teamvideolanguagepair': {
'Meta': {'object_name': 'TeamVideoLanguagePair'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_0': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'language_1': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'language_pair': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'percent_complete': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'subtitle_language_0': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_video_language_pairs_0'", 'to': "orm['videos.SubtitleLanguage']"}),
'subtitle_language_1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_video_language_pairs_1'", 'null': 'True', 'to': "orm['videos.SubtitleLanguage']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'teams.workflow': {
'Meta': {'unique_together': "(('team', 'project', 'team_video'),)", 'object_name': 'Workflow'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'perm_approve': ('django.db.models.fields.PositiveIntegerField', [], {'default': '60'}),
'perm_review': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40'}),
'perm_subtitle': ('django.db.models.fields.PositiveIntegerField', [], {'default': '20'}),
'perm_translate': ('django.db.models.fields.PositiveIntegerField', [], {'default': '20'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']", 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']", 'null': 'True', 'blank': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['teams']
| ofer43211/unisubs | apps/teams/migrations/0057_auto__chg_field_project_slug__chg_field_task_assignee.py | Python | agpl-3.0 | 23,628 |
"""EAS two-devices turn
Revision ID: 17dc9c049f8b
Revises: ad7b856bcc0
Create Date: 2014-10-21 20:38:14.311747
"""
# revision identifiers, used by Alembic.
revision = '17dc9c049f8b'
down_revision = 'ad7b856bcc0'
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
from inbox.ignition import main_engine
engine = main_engine()
if not engine.has_table('easaccount'):
return
from inbox.models.session import session_scope
Base = sa.ext.declarative.declarative_base()
Base.metadata.reflect(engine)
class EASAccount(Base):
__table__ = Base.metadata.tables['easaccount']
primary_device = sa.orm.relationship(
'EASDevice', primaryjoin='and_(EASAccount.primary_device_id == EASDevice.id, '
'EASDevice.deleted_at.is_(None))', uselist=False)
secondary_device = sa.orm.relationship(
'EASDevice', primaryjoin='and_(EASAccount.secondary_device_id == EASDevice.id, '
'EASDevice.deleted_at.is_(None))', uselist=False)
class EASDevice(Base):
__table__ = Base.metadata.tables['easdevice']
with session_scope(versioned=False) as \
db_session:
accts = db_session.query(EASAccount).all()
for a in accts:
# Set both to filtered=False, //needed// for correct deploy.
primary = EASDevice(created_at=datetime.utcnow(),
updated_at=datetime.utcnow(),
filtered=False,
eas_device_id=a._eas_device_id,
eas_device_type=a._eas_device_type,
eas_policy_key=a.eas_policy_key,
eas_sync_key=a.eas_account_sync_key)
secondary = EASDevice(created_at=datetime.utcnow(),
updated_at=datetime.utcnow(),
filtered=False,
eas_device_id=a._eas_device_id,
eas_device_type=a._eas_device_type,
eas_policy_key=a.eas_policy_key,
eas_sync_key=a.eas_account_sync_key)
a.primary_device = primary
a.secondary_device = secondary
db_session.add(a)
db_session.commit()
conn = op.get_bind()
acct_device_map = dict(
(id_, device_id) for id_, device_id in conn.execute(text(
"""SELECT id, secondary_device_id from easaccount""")))
print 'acct_device_map: ', acct_device_map
for acct_id, device_id in acct_device_map.iteritems():
conn.execute(text("""
UPDATE easfoldersyncstatus
SET device_id=:device_id
WHERE account_id=:acct_id
"""), device_id=device_id, acct_id=acct_id)
conn.execute(text("""
UPDATE easuid
SET device_id=:device_id
WHERE easaccount_id=:acct_id
"""), device_id=device_id, acct_id=acct_id)
def downgrade():
raise Exception('!')
| Eagles2F/sync-engine | migrations/versions/115_eas_twodevices_turn.py | Python | agpl-3.0 | 3,146 |
# -*- coding: utf-8 -*-
{
'name': 'Import OFX Bank Statement',
'category': 'Banking addons',
'version': '8.0.1.0.1',
'license': 'AGPL-3',
'author': 'OpenERP SA,'
'Odoo Community Association (OCA)',
'website': 'https://github.com/OCA/bank-statement-import',
'depends': [
'account_bank_statement_import'
],
'demo': [
'demo/demo_data.xml',
],
'external_dependencies': {
'python': ['ofxparse'],
},
'auto_install': False,
'installable': True,
}
| acsone/bank-statement-import | account_bank_statement_import_ofx/__openerp__.py | Python | agpl-3.0 | 534 |
import os, sys, commands
def print_debug( msg, verbose ):
data_dir_root = os.environ.get('DATADIR')
debug_level = int(os.environ.get('DEBUGLEVEL'))
#print the message to debug log if debug variable is set
#add 'from debug import *' to header
# call with print_debug("my message",5)
# outputs to Datadir/debug.log if the number above is > than the number in Datadir/debug.level
if int(verbose) < debug_level:
commands.getoutput('echo '+msg+' >> '+data_dir_root+'/debug.log')
return 1
return 0
| achamely/omniwallet | api/debug.py | Python | agpl-3.0 | 530 |
# -*- coding: utf-8 -*-
###############################################################################
#
# HTMLUnescape
# Replaces character entity names in the specified text with equivalent HTML markup characters.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class HTMLUnescape(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the HTMLUnescape Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(HTMLUnescape, self).__init__(temboo_session, '/Library/Utilities/Encoding/HTMLUnescape')
def new_input_set(self):
return HTMLUnescapeInputSet()
def _make_result_set(self, result, path):
return HTMLUnescapeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return HTMLUnescapeChoreographyExecution(session, exec_id, path)
class HTMLUnescapeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the HTMLUnescape
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_EscapedHTML(self, value):
"""
Set the value of the EscapedHTML input for this Choreo. ((required, string) The escaped HTML that should be unescaped.)
"""
super(HTMLUnescapeInputSet, self)._set_input('EscapedHTML', value)
class HTMLUnescapeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the HTMLUnescape Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_UnescapedHTML(self):
"""
Retrieve the value for the "UnescapedHTML" output from this Choreo execution. ((string) The unescaped HTML.)
"""
return self._output.get('UnescapedHTML', None)
class HTMLUnescapeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return HTMLUnescapeResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Utilities/Encoding/HTMLUnescape.py | Python | apache-2.0 | 2,975 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Connects all half, float and double tensors to CheckNumericsOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("verify_tensor_all_finite")
def verify_tensor_all_finite(t, msg, name=None):
"""Assert that the tensor does not contain any NaN's or Inf's.
Args:
t: Tensor to check.
msg: Message to log on failure.
name: A name for this operation (optional).
Returns:
Same tensor as `t`.
"""
with ops.name_scope(name, "VerifyFinite", [t]) as name:
t = ops.convert_to_tensor(t, name="t")
with ops.colocate_with(t):
verify_input = array_ops.check_numerics(t, message=msg)
out = control_flow_ops.with_dependencies([verify_input], t)
return out
@tf_export("add_check_numerics_ops")
def add_check_numerics_ops():
"""Connect a `check_numerics` to every floating point tensor.
`check_numerics` operations themselves are added for each `half`, `float`,
or `double` tensor in the graph. For all ops in the graph, the
`check_numerics` op for all of its (`half`, `float`, or `double`) inputs
is guaranteed to run before the `check_numerics` op on any of its outputs.
Note: This API is not compatible with the use of `tf.cond` or
`tf.while_loop`, and will raise a `ValueError` if you attempt to call it
in such a graph.
Returns:
A `group` op depending on all `check_numerics` ops added.
Raises:
ValueError: If the graph contains any numeric operations in a control flow
structure.
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Not compatible with eager execution. To check for `Inf`s and `NaN`s under
eager execution, call tfe.seterr(inf_or_nan='raise') once before executing
the checked operations.
@enc_compatibility
"""
if context.executing_eagerly():
raise RuntimeError(
"add_check_numerics_ops() is not compatible with eager execution. "
"To check for Inf's and NaN's under eager execution, call "
"tfe.seterr(inf_or_nan='raise') once before executing the "
"checked operations.")
check_op = []
# This code relies on the ordering of ops in get_operations().
# The producer of a tensor always comes before that tensor's consumer in
# this list. This is true because get_operations() returns ops in the order
# added, and an op can only be added after its inputs are added.
for op in ops.get_default_graph().get_operations():
for output in op.outputs:
if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
if op._get_control_flow_context() is not None: # pylint: disable=protected-access
raise ValueError("`tf.add_check_numerics_ops() is not compatible "
"with TensorFlow control flow operations such as "
"`tf.cond()` or `tf.while_loop()`.")
message = op.name + ":" + str(output.value_index)
with ops.control_dependencies(check_op):
check_op = [array_ops.check_numerics(output, message=message)]
return control_flow_ops.group(*check_op)
| kobejean/tensorflow | tensorflow/python/ops/numerics.py | Python | apache-2.0 | 4,096 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
MsSQL to GCS operator.
"""
import decimal
from airflow.providers.google.cloud.operators.sql_to_gcs import BaseSQLToGCSOperator
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
from airflow.utils.decorators import apply_defaults
class MSSQLToGCSOperator(BaseSQLToGCSOperator):
"""Copy data from Microsoft SQL Server to Google Cloud Storage
in JSON or CSV format.
:param mssql_conn_id: Reference to a specific MSSQL hook.
:type mssql_conn_id: str
**Example**:
The following operator will export data from the Customers table
within the given MSSQL Database and then upload it to the
'mssql-export' GCS bucket (along with a schema file). ::
export_customers = MsSqlToGoogleCloudStorageOperator(
task_id='export_customers',
sql='SELECT * FROM dbo.Customers;',
bucket='mssql-export',
filename='data/customers/export.json',
schema_filename='schemas/export.json',
mssql_conn_id='mssql_default',
google_cloud_storage_conn_id='google_cloud_default',
dag=dag
)
"""
ui_color = '#e0a98c'
type_map = {
3: 'INTEGER',
4: 'TIMESTAMP',
5: 'NUMERIC'
}
@apply_defaults
def __init__(self,
mssql_conn_id='mssql_default',
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.mssql_conn_id = mssql_conn_id
def query(self):
"""
Queries MSSQL and returns a cursor of results.
:return: mssql cursor
"""
mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id)
conn = mssql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
def field_to_bigquery(self, field):
return {
'name': field[0].replace(" ", "_"),
'type': self.type_map.get(field[1], "STRING"),
'mode': "NULLABLE",
}
@classmethod
def convert_type(cls, value, schema_type):
"""
Takes a value from MSSQL, and converts it to a value that's safe for
JSON/Google Cloud Storage/BigQuery.
"""
if isinstance(value, decimal.Decimal):
return float(value)
return value
| mtagle/airflow | airflow/providers/google/cloud/operators/mssql_to_gcs.py | Python | apache-2.0 | 3,144 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interfaces and abstractions for filesystem access.
We should be agnostic whether we're using a "temporary" file
system, rooted in a local tmp dir, or whether we're using
a true HDFS. This file defines the interface.
Note that PEP 355 (Path - object oriented filesystem paths) did
not pass. Many file system methods are in __builtin__, os, or
os.path, and take strings representing filenames as arguments.
We maintain this usage of paths as arguments.
When possible, the interfaces here have fidelity to the
native python interfaces.
"""
import __builtin__
import errno
import grp
import logging
import os
import posixpath
import pwd
import re
import shutil
import stat
import sys
# SEEK_SET and family is found in posixfile or os, depending on the python version
if sys.version_info[:2] < (2, 5):
import posixfile
_tmp_mod = posixfile
else:
_tmp_mod = os
SEEK_SET, SEEK_CUR, SEEK_END = _tmp_mod.SEEK_SET, _tmp_mod.SEEK_CUR, _tmp_mod.SEEK_END
del _tmp_mod
# The web (and POSIX) always uses forward slash as a separator
LEADING_DOUBLE_SEPARATORS = re.compile("^" + posixpath.sep*2)
def normpath(path):
"""
Eliminates double-slashes.
Oddly, posixpath.normpath doesn't eliminate leading double slashes,
but it does clean-up triple-slashes.
"""
p = posixpath.normpath(path)
return LEADING_DOUBLE_SEPARATORS.sub(posixpath.sep, p)
class IllegalPathException(Exception):
pass
class LocalSubFileSystem(object):
"""
Facade around normal python filesystem calls, for a temporary/local
file system rooted in a root directory. This is intended for testing,
and is not a secure chroot alternative.
So far, this doesn't have a notion of current working dir, so all
paths are "absolute". I dislike the state that having cwd's implies,
but it may be convenient.
TODO(philip):
* chown: want to implement with names, not uids.
* chmod
* stat: perhaps implement "stats" which returns a dictionary;
Hadoop and posix have different stats
* set_replication: no equivalent
* file-system level stats
I think this covers all the functionality in "src/contrib/thriftfs/if/hadoopfs.thrift",
but there may be some bits missing. The implementation of the file-like object
for HDFS will be a bit tricky: open(f, "w") is generally the equivalent
of createFile, but it has to handle the case where f already
exists (in which case the best we can do is append, if that).
"""
def __init__(self, root):
"""
A file system rooted in root.
"""
self.root = root
self.name = "file://%s" % self.root
if not os.path.isdir(root):
logging.fatal("Root(%s) not found." % root +
" Perhaps you need to run manage.py create_test_fs")
def _resolve_path(self, path):
"""
Returns path to use in native file system.
"""
# Strip leading "/"
if not path.startswith("/"):
raise IllegalPathException("Path %s must start with leading /." % path)
path = path.lstrip("/")
joined = os.path.join(self.root, path)
absolute = os.path.abspath(joined)
normalized = os.path.normpath(absolute)
prefix = os.path.commonprefix([self.root, normalized])
if prefix != self.root:
raise IllegalPathException("Path %s is not valid." % path)
return joined
def _unresolve_path(self, path):
"""
Given an absolute path within the wrapped filesystem,
return the path that the user of this class sees.
"""
# Resolve it to make it realy absolute
assert path.startswith(self.root)
return path[len(self.root):]
def _wrap(f, paths=None, users=None, groups=None):
"""
Wraps an existing function f, and transforms
path arguments to "resolved paths" and
user arguments to uids.
By default transforms the first (zeroth) argument as
a path, but can be customized.
This lets us write:
def open(self, name, mode="r"):
return open(self._resolve_path(name), mode)
as
open = _wrap(__builtin__.open)
NOTE: No transformation is done on the keyword args;
they are not accepted. (The alternative would be to
require the names of the keyword transformations.)
"""
if users is None:
users = []
if groups is None:
groups = []
if paths is None and 0 not in users and 0 not in groups:
paths = [0]
# complicated way of taking the intersection of three lists.
assert not reduce(set.intersection, map(set, [paths, users, groups]))
def wrapped(*args):
self = args[0]
newargs = list(args[1:])
for i in paths:
newargs[i] = self._resolve_path(newargs[i])
for i in users:
newargs[i] = pwd.getpwnam(newargs[i]).pw_uid
for i in groups:
newargs[i] = grp.getgrnam(newargs[i]).gr_gid
return f(*newargs)
return wrapped
# These follow their namesakes.
open = _wrap(__builtin__.open)
remove = _wrap(os.remove)
mkdir = _wrap(os.mkdir)
rmdir = _wrap(os.rmdir)
listdir = _wrap(os.listdir)
rename = _wrap(os.rename, paths=[0,1])
exists = _wrap(os.path.exists)
isfile = _wrap(os.path.isfile)
isdir = _wrap(os.path.isdir)
chmod = _wrap(os.chmod)
# This could be provided with an error_handler
rmtree = _wrap(shutil.rmtree)
chown = _wrap(os.chown, paths=[0], users=[1], groups=[2])
@property
def uri(self):
return self.name
def stats(self, path, raise_on_fnf=True):
path = self._resolve_path(path)
try:
statobj = os.stat(path)
except OSError, ose:
if ose.errno == errno.ENOENT and not raise_on_fnf:
return None
raise
ret = dict()
ret["path"] = self._unresolve_path(path)
ret["size"] = statobj[stat.ST_SIZE]
ret["mtime"] = statobj[stat.ST_MTIME]
ret["mode"] = statobj[stat.ST_MODE]
ret["user"] = pwd.getpwuid(statobj[stat.ST_UID]).pw_name
ret["group"] = grp.getgrgid(statobj[stat.ST_GID]).gr_name
return ret
def setuser(self, user, groups=None):
pass
def status(self):
return FakeStatus()
def listdir_stats(self, path):
"""
This is an equivalent of listdir that, instead of returning file names,
returns a list of stats instead.
"""
listdir_files = self.listdir(path)
paths = [posixpath.join(path, f) for f in listdir_files]
return [self.stats(path) for path in paths]
def __repr__(self):
return "LocalFileSystem(%s)" % repr(self.root)
class FakeStatus(object):
"""
A fake implementation of HDFS health RPCs.
These follow the thrift naming conventions,
but return dicts or arrays of dicts,
because they will be encoded as JSON.
"""
def get_messages(self):
"""Warnings/lint checks."""
return [
dict(type="WARNING",message="All your base belong to us."),
dict(type="INFO", message="Hamster Dance!")
]
def get_health(self):
o = dict()
GB = 1024*1024*1024
o["bytesTotal"] = 5*GB
o["bytesUsed"] = 5*GB/2
o["bytesRemaining"] = 2*GB
o["bytesNonDfs"] = GB/2
o["liveDataNodes"] = 13
o["deadDataNodes"] = 2
o["upgradeStatus"] = dict(version=13, percentComplete=100, finalized=True)
return o
def get_datanode_report(self):
r = []
for i in range(0, 13):
dinfo = dict()
dinfo["name"] = "fake-%d" % i
dinfo["storageID"] = "fake-id-%d" % i
dinfo["host"] = "fake-host-%d" % i
dinfo["capacity"] = 123456789
dinfo["dfsUsed"] = 23456779
dinfo["remaining"] = 100000010
dinfo["xceiverCount"] = 3
dinfo["state"] = "NORMAL_STATE"
r.append(dinfo)
for i in range(0, 2):
dinfo = dict()
dinfo["name"] = "fake-dead-%d" % i
dinfo["storageID"] = "fake-dead-id-%d" % i
dinfo["host"] = "fake-dead-host-%d" % i
dinfo["capacity"] = 523456789
dinfo["dfsUsed"] = 23456779
dinfo["remaining"] = 500000010
dinfo["xceiverCount"] = 3
dinfo["state"] = "DECOMISSION_INPROGRESS"
r.append(dinfo)
return r
| yongshengwang/builthue | desktop/libs/hadoop/src/hadoop/fs/__init__.py | Python | apache-2.0 | 8,724 |
#! /usr/local/bin/stackless2.6
# by pts@fazekas.hu at Fri Jun 17 14:08:07 CEST 2011
"""Demo for hosting a gevent application with Stackless, without Syncless."""
__author__ = 'pts@fazekas.hu (Peter Szabo)'
import sys
# Import best_greenlet before gevent to add greenlet emulation for Stackless
# if necessary.
import syncless.best_greenlet
import gevent
import gevent.hub
import gevent.socket
class Lprng(object):
__slots__ = ['seed']
def __init__(self, seed=0):
self.seed = int(seed) & 0xffffffff
def next(self):
"""Generate a 32-bit unsigned random number."""
# http://en.wikipedia.org/wiki/Linear_congruential_generator
self.seed = (
((1664525 * self.seed) & 0xffffffff) + 1013904223) & 0xffffffff
return self.seed
def __iter__(self):
return self
def Worker(client_socket, addr):
print >>sys.stderr, 'info: connection from %r, handled by %r' % (
addr, gevent.hub.greenlet.getcurrent())
f = client_socket.makefile()
# Read HTTP request.
line1 = None
while True:
line = f.readline().rstrip('\r\n')
if not line: # Empty line, end of HTTP request.
break
if line1 is None:
line1 = line
# Parse HTTP request.
# Please note that an assertion here doesn't abort the server.
items = line1.split(' ')
assert 3 == len(items)
assert items[2] in ('HTTP/1.0', 'HTTP/1.1')
assert items[0] == 'GET'
assert items[1].startswith('/')
# This is to demonstrate the error reporting and recovery behavior of gevent:
# We get an error message like this, and the process execution continues:
#
# Traceback (most recent call last):
# File "/usr/local/lib/python2.6/site-packages/gevent/greenlet.py", line 388, in run
# result = self._run(*self.args, **self.kwargs)
# File "./s2.py", line 137, in Worker
# assert 'bad' not in items[1]
# AssertionError
# <Greenlet at 0xb71acbecL: Worker(<socket at 0xb747668cL fileno=10 sock=127.0.0.1:80, ('127.0.0.1', 55196))> failed with AssertionError
assert 'bad' not in items[1]
if 'sysexit' in items[1]:
print >>sys.stderr, 'info: exiting with SystemExit'
#sys.exit() # Doesn't work, gevent.core.__event_handler catches it.
gevent.hub.MAIN.throw(SystemExit)
if 'exit' in items[1]:
print >>sys.stderr, 'info: exiting with throw'
gevent.hub.MAIN.throw()
try:
num = int(items[1][1:])
except ValueError:
num = None
if 'slow' in items[1]:
gevent.hub.sleep(5)
# Write HTTP response.
if num is None:
f.write('HTTP/1.0 200 OK\r\nContent-Type: text/html\r\n\r\n')
f.write('<a href="/0">start at 0</a><p>Hello, World!\n')
else:
next_num = Lprng(num).next()
f.write('HTTP/1.0 200 OK\r\nContent-Type: text/html\r\n\r\n')
f.write('<a href="/%d">continue with %d</a>\n' %
(next_num, next_num))
#f.flush() # Not needed here.
def GeventListener(server_socket):
# Please note that exceptions raised here will be printed and then ignored
# by the gevent.hub main loop.
print >>sys.stderr, (
'info: accepting connections in %r' % gevent.hub.greenlet.getcurrent())
while True:
client_socket, addr = server_socket.accept()
gevent.spawn(Worker, client_socket, addr)
# Equally good:
#gevent.hub.spawn_raw(Worker, client_socket, addr)
client_socket = addr = None # Save memory.
if __name__ == '__main__':
# We need this patch so gevent.hub.spawn_raw below will create a greenlet
# of the correct type.
server_socket = gevent.socket.socket()
# Old:
# gevent.socket.set_reuse_addr(server_socket)
# server_socket.bind(('127.0.0.1', 8080))
# server_socket.listen(128)
gevent.socket.bind_and_listen(server_socket, ('127.0.0.1', 8080), 128,
reuse_addr=True)
print >>sys.stderr, 'listening on %r' % (server_socket.getsockname(),)
# All non-blocking gevent operations must be initiated from a greenlet
# invoked by the gevent hub. The easiest way to ensure that is to move these
# operations to a function (GeventListener), and call this function with
# gevent.hub.spawn_raw. (As a side effect, if an exception happens in that
# function, the process will continue running.)
gevent.hub.spawn_raw(GeventListener, server_socket)
# Run the gevent main loop indefinitely. This is not a requirement, we
# could to non-blocking Syncless operations instead right here for a long
# time.
syncless.best_greenlet.gevent_hub_main()
assert 0, 'unreached'
| HanWenfang/syncless | examples/demo_gevent_only.py | Python | apache-2.0 | 4,479 |
from time import time
from collections import namedtuple
import warnings
from scipy import stats
import numpy as np
from ..base import clone
from ..exceptions import ConvergenceWarning
from ..preprocessing import normalize
from ..utils import (check_array, check_random_state, _safe_indexing,
is_scalar_nan)
from ..utils.validation import FLOAT_DTYPES, check_is_fitted
from ..utils._mask import _get_mask
from ._base import _BaseImputer
from ._base import SimpleImputer
from ._base import _check_inputs_dtype
_ImputerTriplet = namedtuple('_ImputerTriplet', ['feat_idx',
'neighbor_feat_idx',
'estimator'])
class IterativeImputer(_BaseImputer):
"""Multivariate imputer that estimates each feature from all the others.
A strategy for imputing missing values by modeling each feature with
missing values as a function of other features in a round-robin fashion.
Read more in the :ref:`User Guide <iterative_imputer>`.
.. versionadded:: 0.21
.. note::
This estimator is still **experimental** for now: the predictions
and the API might change without any deprecation cycle. To use it,
you need to explicitly import ``enable_iterative_imputer``::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_iterative_imputer # noqa
>>> # now you can import normally from sklearn.impute
>>> from sklearn.impute import IterativeImputer
Parameters
----------
estimator : estimator object, default=BayesianRidge()
The estimator to use at each step of the round-robin imputation.
If ``sample_posterior`` is True, the estimator must support
``return_std`` in its ``predict`` method.
missing_values : int, np.nan, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
sample_posterior : boolean, default=False
Whether to sample from the (Gaussian) predictive posterior of the
fitted estimator for each imputation. Estimator must support
``return_std`` in its ``predict`` method if set to ``True``. Set to
``True`` if using ``IterativeImputer`` for multiple imputations.
max_iter : int, default=10
Maximum number of imputation rounds to perform before returning the
imputations computed during the final round. A round is a single
imputation of each feature with missing values. The stopping criterion
is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals]))` < tol,
where `X_t` is `X` at iteration `t. Note that early stopping is only
applied if ``sample_posterior=False``.
tol : float, default=1e-3
Tolerance of the stopping condition.
n_nearest_features : int, default=None
Number of other features to use to estimate the missing values of
each feature column. Nearness between features is measured using
the absolute correlation coefficient between each feature pair (after
initial imputation). To ensure coverage of features throughout the
imputation process, the neighbor features are not necessarily nearest,
but are drawn with probability proportional to correlation for each
imputed target feature. Can provide significant speed-up when the
number of features is huge. If ``None``, all features will be used.
initial_strategy : str, default='mean'
Which strategy to use to initialize the missing values. Same as the
``strategy`` parameter in :class:`~sklearn.impute.SimpleImputer`
Valid values: {"mean", "median", "most_frequent", or "constant"}.
imputation_order : str, default='ascending'
The order in which the features will be imputed. Possible values:
"ascending"
From features with fewest missing values to most.
"descending"
From features with most missing values to fewest.
"roman"
Left to right.
"arabic"
Right to left.
"random"
A random order for each round.
skip_complete : boolean, default=False
If ``True`` then features with missing values during ``transform``
which did not have any missing values during ``fit`` will be imputed
with the initial imputation method only. Set to ``True`` if you have
many features with no missing values at both ``fit`` and ``transform``
time to save compute.
min_value : float or array-like of shape (n_features,), default=-np.inf
Minimum possible imputed value. Broadcast to shape (n_features,) if
scalar. If array-like, expects shape (n_features,), one min value for
each feature. The default is `-np.inf`.
.. versionchanged:: 0.23
Added support for array-like.
max_value : float or array-like of shape (n_features,), default=np.inf
Maximum possible imputed value. Broadcast to shape (n_features,) if
scalar. If array-like, expects shape (n_features,), one max value for
each feature. The default is `np.inf`.
.. versionchanged:: 0.23
Added support for array-like.
verbose : int, default=0
Verbosity flag, controls the debug messages that are issued
as functions are evaluated. The higher, the more verbose. Can be 0, 1,
or 2.
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use. Randomizes
selection of estimator features if n_nearest_features is not None, the
``imputation_order`` if ``random``, and the sampling from posterior if
``sample_posterior`` is True. Use an integer for determinism.
See :term:`the Glossary <random_state>`.
add_indicator : boolean, default=False
If True, a :class:`MissingIndicator` transform will stack onto output
of the imputer's transform. This allows a predictive estimator
to account for missingness despite imputation. If a feature has no
missing values at fit/train time, the feature won't appear on
the missing indicator even if there are missing values at
transform/test time.
Attributes
----------
initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer`
Imputer used to initialize the missing values.
imputation_sequence_ : list of tuples
Each tuple has ``(feat_idx, neighbor_feat_idx, estimator)``, where
``feat_idx`` is the current feature to be imputed,
``neighbor_feat_idx`` is the array of other features used to impute the
current feature, and ``estimator`` is the trained estimator used for
the imputation. Length is ``self.n_features_with_missing_ *
self.n_iter_``.
n_iter_ : int
Number of iteration rounds that occurred. Will be less than
``self.max_iter`` if early stopping criterion was reached.
n_features_with_missing_ : int
Number of features with missing values.
indicator_ : :class:`~sklearn.impute.MissingIndicator`
Indicator used to add binary indicators for missing values.
``None`` if add_indicator is False.
random_state_ : RandomState instance
RandomState instance that is generated either from a seed, the random
number generator or by `np.random`.
See Also
--------
SimpleImputer : Univariate imputation of missing values.
Examples
--------
>>> import numpy as np
>>> from sklearn.experimental import enable_iterative_imputer
>>> from sklearn.impute import IterativeImputer
>>> imp_mean = IterativeImputer(random_state=0)
>>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
IterativeImputer(random_state=0)
>>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
>>> imp_mean.transform(X)
array([[ 6.9584..., 2. , 3. ],
[ 4. , 2.6000..., 6. ],
[10. , 4.9999..., 9. ]])
Notes
-----
To support imputation in inductive mode we store each feature's estimator
during the ``fit`` phase, and predict without refitting (in order) during
the ``transform`` phase.
Features which contain all missing values at ``fit`` are discarded upon
``transform``.
References
----------
.. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice:
Multivariate Imputation by Chained Equations in R". Journal of
Statistical Software 45: 1-67.
<https://www.jstatsoft.org/article/view/v045i03>`_
.. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in
Multivariate Data Suitable for use with an Electronic Computer".
Journal of the Royal Statistical Society 22(2): 302-306.
<https://www.jstor.org/stable/2984099>`_
"""
def __init__(self,
estimator=None, *,
missing_values=np.nan,
sample_posterior=False,
max_iter=10,
tol=1e-3,
n_nearest_features=None,
initial_strategy="mean",
imputation_order='ascending',
skip_complete=False,
min_value=-np.inf,
max_value=np.inf,
verbose=0,
random_state=None,
add_indicator=False):
super().__init__(
missing_values=missing_values,
add_indicator=add_indicator
)
self.estimator = estimator
self.sample_posterior = sample_posterior
self.max_iter = max_iter
self.tol = tol
self.n_nearest_features = n_nearest_features
self.initial_strategy = initial_strategy
self.imputation_order = imputation_order
self.skip_complete = skip_complete
self.min_value = min_value
self.max_value = max_value
self.verbose = verbose
self.random_state = random_state
def _impute_one_feature(self,
X_filled,
mask_missing_values,
feat_idx,
neighbor_feat_idx,
estimator=None,
fit_mode=True):
"""Impute a single feature from the others provided.
This function predicts the missing values of one of the features using
the current estimates of all the other features. The ``estimator`` must
support ``return_std=True`` in its ``predict`` method for this function
to work.
Parameters
----------
X_filled : ndarray
Input data with the most recent imputations.
mask_missing_values : ndarray
Input data's missing indicator matrix.
feat_idx : int
Index of the feature currently being imputed.
neighbor_feat_idx : ndarray
Indices of the features to be used in imputing ``feat_idx``.
estimator : object
The estimator to use at this step of the round-robin imputation.
If ``sample_posterior`` is True, the estimator must support
``return_std`` in its ``predict`` method.
If None, it will be cloned from self._estimator.
fit_mode : boolean, default=True
Whether to fit and predict with the estimator or just predict.
Returns
-------
X_filled : ndarray
Input data with ``X_filled[missing_row_mask, feat_idx]`` updated.
estimator : estimator with sklearn API
The fitted estimator used to impute
``X_filled[missing_row_mask, feat_idx]``.
"""
if estimator is None and fit_mode is False:
raise ValueError("If fit_mode is False, then an already-fitted "
"estimator should be passed in.")
if estimator is None:
estimator = clone(self._estimator)
missing_row_mask = mask_missing_values[:, feat_idx]
if fit_mode:
X_train = _safe_indexing(X_filled[:, neighbor_feat_idx],
~missing_row_mask)
y_train = _safe_indexing(X_filled[:, feat_idx],
~missing_row_mask)
estimator.fit(X_train, y_train)
# if no missing values, don't predict
if np.sum(missing_row_mask) == 0:
return X_filled, estimator
# get posterior samples if there is at least one missing value
X_test = _safe_indexing(X_filled[:, neighbor_feat_idx],
missing_row_mask)
if self.sample_posterior:
mus, sigmas = estimator.predict(X_test, return_std=True)
imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
# two types of problems: (1) non-positive sigmas
# (2) mus outside legal range of min_value and max_value
# (results in inf sample)
positive_sigmas = sigmas > 0
imputed_values[~positive_sigmas] = mus[~positive_sigmas]
mus_too_low = mus < self._min_value[feat_idx]
imputed_values[mus_too_low] = self._min_value[feat_idx]
mus_too_high = mus > self._max_value[feat_idx]
imputed_values[mus_too_high] = self._max_value[feat_idx]
# the rest can be sampled without statistical issues
inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
mus = mus[inrange_mask]
sigmas = sigmas[inrange_mask]
a = (self._min_value[feat_idx] - mus) / sigmas
b = (self._max_value[feat_idx] - mus) / sigmas
truncated_normal = stats.truncnorm(a=a, b=b,
loc=mus, scale=sigmas)
imputed_values[inrange_mask] = truncated_normal.rvs(
random_state=self.random_state_)
else:
imputed_values = estimator.predict(X_test)
imputed_values = np.clip(imputed_values,
self._min_value[feat_idx],
self._max_value[feat_idx])
# update the feature
X_filled[missing_row_mask, feat_idx] = imputed_values
return X_filled, estimator
def _get_neighbor_feat_idx(self,
n_features,
feat_idx,
abs_corr_mat):
"""Get a list of other features to predict ``feat_idx``.
If self.n_nearest_features is less than or equal to the total
number of features, then use a probability proportional to the absolute
correlation between ``feat_idx`` and each other feature to randomly
choose a subsample of the other features (without replacement).
Parameters
----------
n_features : int
Number of features in ``X``.
feat_idx : int
Index of the feature currently being imputed.
abs_corr_mat : ndarray, shape (n_features, n_features)
Absolute correlation matrix of ``X``. The diagonal has been zeroed
out and each feature has been normalized to sum to 1. Can be None.
Returns
-------
neighbor_feat_idx : array-like
The features to use to impute ``feat_idx``.
"""
if (self.n_nearest_features is not None and
self.n_nearest_features < n_features):
p = abs_corr_mat[:, feat_idx]
neighbor_feat_idx = self.random_state_.choice(
np.arange(n_features), self.n_nearest_features, replace=False,
p=p)
else:
inds_left = np.arange(feat_idx)
inds_right = np.arange(feat_idx + 1, n_features)
neighbor_feat_idx = np.concatenate((inds_left, inds_right))
return neighbor_feat_idx
def _get_ordered_idx(self, mask_missing_values):
"""Decide in what order we will update the features.
As a homage to the MICE R package, we will have 4 main options of
how to order the updates, and use a random order if anything else
is specified.
Also, this function skips features which have no missing values.
Parameters
----------
mask_missing_values : array-like, shape (n_samples, n_features)
Input data's missing indicator matrix, where "n_samples" is the
number of samples and "n_features" is the number of features.
Returns
-------
ordered_idx : ndarray, shape (n_features,)
The order in which to impute the features.
"""
frac_of_missing_values = mask_missing_values.mean(axis=0)
if self.skip_complete:
missing_values_idx = np.flatnonzero(frac_of_missing_values)
else:
missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
if self.imputation_order == 'roman':
ordered_idx = missing_values_idx
elif self.imputation_order == 'arabic':
ordered_idx = missing_values_idx[::-1]
elif self.imputation_order == 'ascending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values,
kind='mergesort')[n:]
elif self.imputation_order == 'descending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values,
kind='mergesort')[n:][::-1]
elif self.imputation_order == 'random':
ordered_idx = missing_values_idx
self.random_state_.shuffle(ordered_idx)
else:
raise ValueError("Got an invalid imputation order: '{0}'. It must "
"be one of the following: 'roman', 'arabic', "
"'ascending', 'descending', or "
"'random'.".format(self.imputation_order))
return ordered_idx
def _get_abs_corr_mat(self, X_filled, tolerance=1e-6):
"""Get absolute correlation matrix between features.
Parameters
----------
X_filled : ndarray, shape (n_samples, n_features)
Input data with the most recent imputations.
tolerance : float, default=1e-6
``abs_corr_mat`` can have nans, which will be replaced
with ``tolerance``.
Returns
-------
abs_corr_mat : ndarray, shape (n_features, n_features)
Absolute correlation matrix of ``X`` at the beginning of the
current round. The diagonal has been zeroed out and each feature's
absolute correlations with all others have been normalized to sum
to 1.
"""
n_features = X_filled.shape[1]
if (self.n_nearest_features is None or
self.n_nearest_features >= n_features):
return None
with np.errstate(invalid='ignore'):
# if a feature in the neighboorhood has only a single value
# (e.g., categorical feature), the std. dev. will be null and
# np.corrcoef will raise a warning due to a division by zero
abs_corr_mat = np.abs(np.corrcoef(X_filled.T))
# np.corrcoef is not defined for features with zero std
abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
# ensures exploration, i.e. at least some probability of sampling
np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
# features are not their own neighbors
np.fill_diagonal(abs_corr_mat, 0)
# needs to sum to 1 for np.random.choice sampling
abs_corr_mat = normalize(abs_corr_mat, norm='l1', axis=0, copy=False)
return abs_corr_mat
def _initial_imputation(self, X, in_fit=False):
"""Perform initial imputation for input X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data, where "n_samples" is the number of samples and
"n_features" is the number of features.
in_fit : bool, default=False
Whether function is called in fit.
Returns
-------
Xt : ndarray, shape (n_samples, n_features)
Input data, where "n_samples" is the number of samples and
"n_features" is the number of features.
X_filled : ndarray, shape (n_samples, n_features)
Input data with the most recent imputations.
mask_missing_values : ndarray, shape (n_samples, n_features)
Input data's missing indicator matrix, where "n_samples" is the
number of samples and "n_features" is the number of features.
X_missing_mask : ndarray, shape (n_samples, n_features)
Input data's mask matrix indicating missing datapoints, where
"n_samples" is the number of samples and "n_features" is the
number of features.
"""
if is_scalar_nan(self.missing_values):
force_all_finite = "allow-nan"
else:
force_all_finite = True
X = self._validate_data(X, dtype=FLOAT_DTYPES, order="F", reset=in_fit,
force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
X_missing_mask = _get_mask(X, self.missing_values)
mask_missing_values = X_missing_mask.copy()
if self.initial_imputer_ is None:
self.initial_imputer_ = SimpleImputer(
missing_values=self.missing_values,
strategy=self.initial_strategy
)
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
valid_mask = np.flatnonzero(np.logical_not(
np.isnan(self.initial_imputer_.statistics_)))
Xt = X[:, valid_mask]
mask_missing_values = mask_missing_values[:, valid_mask]
return Xt, X_filled, mask_missing_values, X_missing_mask
@staticmethod
def _validate_limit(limit, limit_type, n_features):
"""Validate the limits (min/max) of the feature values
Converts scalar min/max limits to vectors of shape (n_features,)
Parameters
----------
limit: scalar or array-like
The user-specified limit (i.e, min_value or max_value)
limit_type: string, "max" or "min"
n_features: Number of features in the dataset
Returns
-------
limit: ndarray, shape(n_features,)
Array of limits, one for each feature
"""
limit_bound = np.inf if limit_type == "max" else -np.inf
limit = limit_bound if limit is None else limit
if np.isscalar(limit):
limit = np.full(n_features, limit)
limit = check_array(
limit, force_all_finite=False, copy=False, ensure_2d=False
)
if not limit.shape[0] == n_features:
raise ValueError(
f"'{limit_type}_value' should be of "
f"shape ({n_features},) when an array-like "
f"is provided. Got {limit.shape}, instead."
)
return limit
def fit_transform(self, X, y=None):
"""Fits the imputer on X and return the transformed X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where "n_samples" is the number of samples and
"n_features" is the number of features.
y : ignored.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if self.max_iter < 0:
raise ValueError(
"'max_iter' should be a positive integer. Got {} instead."
.format(self.max_iter))
if self.tol < 0:
raise ValueError(
"'tol' should be a non-negative float. Got {} instead."
.format(self.tol)
)
if self.estimator is None:
from ..linear_model import BayesianRidge
self._estimator = BayesianRidge()
else:
self._estimator = clone(self.estimator)
self.imputation_sequence_ = []
self.initial_imputer_ = None
X, Xt, mask_missing_values, complete_mask = (
self._initial_imputation(X, in_fit=True))
super()._fit_indicator(complete_mask)
X_indicator = super()._transform_indicator(complete_mask)
if self.max_iter == 0 or np.all(mask_missing_values):
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
# Edge case: a single feature. We return the initial ...
if Xt.shape[1] == 1:
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
self._min_value = self._validate_limit(
self.min_value, "min", X.shape[1])
self._max_value = self._validate_limit(
self.max_value, "max", X.shape[1])
if not np.all(np.greater(self._max_value, self._min_value)):
raise ValueError(
"One (or more) features have min_value >= max_value.")
# order in which to impute
# note this is probably too slow for large feature data (d > 100000)
# and a better way would be good.
# see: https://goo.gl/KyCNwj and subsequent comments
ordered_idx = self._get_ordered_idx(mask_missing_values)
self.n_features_with_missing_ = len(ordered_idx)
abs_corr_mat = self._get_abs_corr_mat(Xt)
n_samples, n_features = Xt.shape
if self.verbose > 0:
print("[IterativeImputer] Completing matrix with shape %s"
% (X.shape,))
start_t = time()
if not self.sample_posterior:
Xt_previous = Xt.copy()
normalized_tol = self.tol * np.max(
np.abs(X[~mask_missing_values])
)
for self.n_iter_ in range(1, self.max_iter + 1):
if self.imputation_order == 'random':
ordered_idx = self._get_ordered_idx(mask_missing_values)
for feat_idx in ordered_idx:
neighbor_feat_idx = self._get_neighbor_feat_idx(n_features,
feat_idx,
abs_corr_mat)
Xt, estimator = self._impute_one_feature(
Xt, mask_missing_values, feat_idx, neighbor_feat_idx,
estimator=None, fit_mode=True)
estimator_triplet = _ImputerTriplet(feat_idx,
neighbor_feat_idx,
estimator)
self.imputation_sequence_.append(estimator_triplet)
if self.verbose > 1:
print('[IterativeImputer] Ending imputation round '
'%d/%d, elapsed time %0.2f'
% (self.n_iter_, self.max_iter, time() - start_t))
if not self.sample_posterior:
inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf,
axis=None)
if self.verbose > 0:
print('[IterativeImputer] '
'Change: {}, scaled tolerance: {} '.format(
inf_norm, normalized_tol))
if inf_norm < normalized_tol:
if self.verbose > 0:
print('[IterativeImputer] Early stopping criterion '
'reached.')
break
Xt_previous = Xt.copy()
else:
if not self.sample_posterior:
warnings.warn("[IterativeImputer] Early stopping criterion not"
" reached.", ConvergenceWarning)
Xt[~mask_missing_values] = X[~mask_missing_values]
return super()._concatenate_indicator(Xt, X_indicator)
def transform(self, X):
"""Imputes all missing values in X.
Note that this is stochastic, and that if random_state is not fixed,
repeated calls, or permuted input, will yield different results.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
check_is_fitted(self)
X, Xt, mask_missing_values, complete_mask = self._initial_imputation(X)
X_indicator = super()._transform_indicator(complete_mask)
if self.n_iter_ == 0 or np.all(mask_missing_values):
return super()._concatenate_indicator(Xt, X_indicator)
imputations_per_round = len(self.imputation_sequence_) // self.n_iter_
i_rnd = 0
if self.verbose > 0:
print("[IterativeImputer] Completing matrix with shape %s"
% (X.shape,))
start_t = time()
for it, estimator_triplet in enumerate(self.imputation_sequence_):
Xt, _ = self._impute_one_feature(
Xt,
mask_missing_values,
estimator_triplet.feat_idx,
estimator_triplet.neighbor_feat_idx,
estimator=estimator_triplet.estimator,
fit_mode=False
)
if not (it + 1) % imputations_per_round:
if self.verbose > 1:
print('[IterativeImputer] Ending imputation round '
'%d/%d, elapsed time %0.2f'
% (i_rnd + 1, self.n_iter_, time() - start_t))
i_rnd += 1
Xt[~mask_missing_values] = X[~mask_missing_values]
return super()._concatenate_indicator(Xt, X_indicator)
def fit(self, X, y=None):
"""Fits the imputer on X and return self.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where "n_samples" is the number of samples and
"n_features" is the number of features.
y : ignored
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X)
return self
| xuewei4d/scikit-learn | sklearn/impute/_iterative.py | Python | bsd-3-clause | 31,089 |
"""
Stackoverflow OAuth2 backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/stackoverflow.html
"""
from .oauth import BaseOAuth2
class StackoverflowOAuth2(BaseOAuth2):
"""Stackoverflow OAuth2 authentication backend"""
name = 'stackoverflow'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'https://stackexchange.com/oauth'
ACCESS_TOKEN_URL = 'https://stackexchange.com/oauth/access_token'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Stackoverflow account"""
fullname, first_name, last_name = self.get_user_names(
response.get('display_name')
)
return {'username': response.get('link').rsplit('/', 1)[-1],
'full_name': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json(
'https://api.stackexchange.com/2.1/me',
params={
'site': 'stackoverflow',
'access_token': access_token,
'key': self.setting('API_KEY')
}
)['items'][0]
def request_access_token(self, *args, **kwargs):
return self.get_querystring(*args, **kwargs)
| tobias47n9e/social-core | social_core/backends/stackoverflow.py | Python | bsd-3-clause | 1,459 |
##########################################################################
#
# Copyright (c) 2019, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferTest
class NumericBookmarkSetTest( GafferTest.TestCase ) :
def testAccessors( self ) :
s = Gaffer.ScriptNode()
b = Gaffer.NumericBookmarkSet( s, 1 )
self.assertEqual( b.getBookmark(), 1 )
for i in range( 1, 10 ) :
b.setBookmark( i )
self.assertEqual( b.getBookmark(), i )
for i in ( 0, 10 ) :
with self.assertRaises( RuntimeError ) :
b.setBookmark( i )
def testBookmarkUpdates( self ) :
s = Gaffer.ScriptNode()
s["a"] = Gaffer.Node()
s["b"] = Gaffer.Node()
s["c"] = Gaffer.Node()
b = Gaffer.NumericBookmarkSet( s, 1 )
self.assertEqual( b.size(), 0 )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["a"] )
self.assertEqual( set(b), { s["a"] } )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["b"] )
self.assertEqual( set(b), { s["b"] } )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, None )
self.assertEqual( b.size(), 0 )
Gaffer.MetadataAlgo.setNumericBookmark( s, 2, s["c"] )
b2 = Gaffer.NumericBookmarkSet( s, 2 )
self.assertEqual( set(b2), { s["c"] } )
Gaffer.MetadataAlgo.setNumericBookmark( s, 2, s["a"] )
self.assertEqual( set(b2), { s["a"] } )
def testSignals( self ) :
s = Gaffer.ScriptNode()
s["a"] = Gaffer.Node()
s["b"] = Gaffer.Node()
mirror = set()
def added( _, member ) :
mirror.add( member )
def removed( _, member ) :
mirror.remove( member )
b = Gaffer.NumericBookmarkSet( s, 1 )
ca = b.memberAddedSignal().connect( added )
cr = b.memberRemovedSignal().connect( removed )
self.assertEqual( set(b), mirror )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["a"] )
self.assertEqual( set(b), mirror )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["b"] )
self.assertEqual( set(b), mirror )
def testSignalOrder( self ) :
s = Gaffer.ScriptNode()
s["a"] = Gaffer.Node()
s["b"] = Gaffer.Node()
b = Gaffer.NumericBookmarkSet( s, 1 )
callbackFailures = { "added" : 0, "removed" : 0 }
# Check we have no members when one is removed as we're
# defined as only ever containing one node. We can't assert
# here as the exception gets eaten and the test passes anyway
def removed( _, member ) :
if set(b) != set() :
callbackFailures["removed"] += 1
cr = b.memberRemovedSignal().connect( removed )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["a"] )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["b"] )
self.assertEqual( callbackFailures["removed"], 0 )
# Check member is added before signal, same deal re: asserts
def added( _, member ) :
if set(b) != { s["a"] } :
callbackFailures["added"] += 1
ca = b.memberAddedSignal().connect( added )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["a"] )
self.assertEqual( callbackFailures["added"], 0 )
if __name__ == "__main__":
unittest.main()
| andrewkaufman/gaffer | python/GafferTest/NumericBookmarkSetTest.py | Python | bsd-3-clause | 4,620 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.story import shared_state
from telemetry import user_story
from telemetry.user_story import user_story_set
from telemetry.util import cloud_storage
# pylint: disable=abstract-method
class SharedStateBar(shared_state.SharedState):
pass
class UserStoryFoo(user_story.UserStory):
def __init__(self, name='', labels=None):
super(UserStoryFoo, self).__init__(
SharedStateBar, name, labels)
class UserStorySetFoo(user_story_set.UserStorySet):
""" UserStorySetFoo is a user story created for testing purpose. """
pass
class UserStorySetTest(unittest.TestCase):
def testUserStoryTestName(self):
self.assertEquals('user_story_set_unittest', UserStorySetFoo.Name())
def testUserStoryTestDescription(self):
self.assertEquals(
' UserStorySetFoo is a user story created for testing purpose. ',
UserStorySetFoo.Description())
def testBaseDir(self):
uss = UserStorySetFoo()
base_dir = uss.base_dir
self.assertTrue(os.path.isdir(base_dir))
self.assertEqual(base_dir, os.path.dirname(__file__))
def testFilePath(self):
uss = UserStorySetFoo()
self.assertEqual(os.path.abspath(__file__).replace('.pyc', '.py'),
uss.file_path)
def testCloudBucket(self):
blank_uss = user_story_set.UserStorySet()
self.assertEqual(blank_uss.bucket, None)
public_uss = user_story_set.UserStorySet(
cloud_storage_bucket=cloud_storage.PUBLIC_BUCKET)
self.assertEqual(public_uss.bucket, cloud_storage.PUBLIC_BUCKET)
partner_uss = user_story_set.UserStorySet(
cloud_storage_bucket=cloud_storage.PARTNER_BUCKET)
self.assertEqual(partner_uss.bucket, cloud_storage.PARTNER_BUCKET)
internal_uss = user_story_set.UserStorySet(
cloud_storage_bucket=cloud_storage.INTERNAL_BUCKET)
self.assertEqual(internal_uss.bucket, cloud_storage.INTERNAL_BUCKET)
with self.assertRaises(ValueError):
user_story_set.UserStorySet(cloud_storage_bucket='garbage_bucket')
def testRemoveWithEmptySetRaises(self):
uss = user_story_set.UserStorySet()
foo_story = UserStoryFoo()
with self.assertRaises(ValueError):
uss.RemoveUserStory(foo_story)
def testBasicAddRemove(self):
uss = user_story_set.UserStorySet()
foo_story = UserStoryFoo()
uss.AddUserStory(foo_story)
self.assertEqual([foo_story], uss.user_stories)
uss.RemoveUserStory(foo_story)
self.assertEqual([], uss.user_stories)
| guorendong/iridium-browser-ubuntu | tools/telemetry/telemetry/user_story/user_story_set_unittest.py | Python | bsd-3-clause | 2,649 |
import unittest
import numpy
import six
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer import utils
@testing.parameterize(*(
testing.product({
'shape': [(3, 2, 4)],
'axis': [None, 0, 1, 2, -1, (0, 1), (1, -1)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_weights': [True, False],
'keepdims': [True, False],
'use_variable_method': [True, False],
}) +
testing.product({
'shape': [()],
'axis': [None],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_weights': [True, False],
'keepdims': [True, False],
'use_variable_method': [True, False],
})))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
}))
class TestAverage(testing.FunctionTestCase):
def setUp(self):
self.skip_double_backward_test = True
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-3, 'rtol': 5e-3})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-1})
else:
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
def before_test(self, test_name):
if self.use_weights and isinstance(self.axis, tuple):
# This condition is not supported
raise unittest.SkipTest(
'Tuple axis is not supported when weights is given')
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
if self.axis is None:
w_shape = self.shape
elif isinstance(self.axis, int):
axis = self.axis
if axis < 0:
ndim = len(self.shape)
axis += ndim
w_shape = self.shape[axis],
else:
w_shape = tuple(self.shape[a] for a in self.axis)
# Sample weights. Weights should not sum to 0.
while True:
w = numpy.random.uniform(-2, 2, w_shape).astype(self.dtype)
w_sum_eps = 1.0 if self.dtype == numpy.float16 else 5e-2
if abs(w.sum()) > w_sum_eps:
break
return x, w
def forward(self, inputs, device):
x, w = inputs
if not self.use_weights:
w = None
if self.use_variable_method:
y = x.mean(axis=self.axis, weights=w, keepdims=self.keepdims)
else:
y = functions.average(
x, axis=self.axis, weights=w, keepdims=self.keepdims)
return y,
def forward_expected(self, inputs):
x, w = inputs
if not self.use_weights:
w = None
y_expect = numpy.average(x, axis=self.axis, weights=w)
if self.keepdims:
# numpy.average does not support keepdims
axis = self.axis
if axis is None:
axis = list(six.moves.range(x.ndim))
elif isinstance(axis, int):
axis = axis,
shape = list(x.shape)
for i in six.moves.range(len(shape)):
if i in axis or i - len(shape) in axis:
shape[i] = 1
y_expect = y_expect.reshape(shape)
y_expect = utils.force_array(y_expect, dtype=self.dtype)
return y_expect,
@testing.parameterize(*(
testing.product({
'shape': [(30, 20, 40)],
'axis': [None, 0, 1, 2, -1, (0, 1), (1, -1)],
'dtype': [numpy.float16],
'use_weights': [False], # np.average overflows when `weights` is used
'keepdims': [True, False],
})
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
}))
@attr.slow
@testing.with_requires('numpy>=1.12') # NumPy #8222
class TestAverageOverflowingSum(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options.update({'atol': 1e-2, 'rtol': 2e-3})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
self.check_double_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
x = numpy.random.uniform(3000, 7000, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.average(
x, self.axis, keepdims=self.keepdims)
return y,
def forward_expected(self, inputs):
x, = inputs
y_expect = numpy.mean(
x.astype(numpy.float64), self.axis, keepdims=self.keepdims
).astype(self.dtype)
return utils.force_array(y_expect),
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestAverageDuplicateValueInAxis(unittest.TestCase):
def test_duplicate_value(self):
x = numpy.random.uniform(-1, 1, 24).reshape(2, 3, 4).astype(self.dtype)
with self.assertRaises(ValueError):
functions.average(x, axis=(0, 0))
def test_duplicate_value_negative(self):
x = numpy.random.uniform(-1, 1, 24).reshape(2, 3, 4).astype(self.dtype)
with self.assertRaises(ValueError):
functions.average(x, axis=(1, -2))
def test_weights_and_axis(self):
x = numpy.random.uniform(-1, 1, 24).reshape(2, 3, 4).astype(self.dtype)
w = numpy.random.uniform(-1, 1, 6).reshape(2, 3).astype(self.dtype)
with self.assertRaises(ValueError):
functions.average(x, axis=(0, 1), weights=w)
testing.run_module(__name__, __file__)
| okuta/chainer | tests/chainer_tests/functions_tests/math_tests/test_average.py | Python | mit | 6,055 |
from cms.utils.urlutils import admin_reverse
from django.core.urlresolvers import reverse
from cms.utils import get_language_from_request
from cms.utils.compat.dj import python_2_unicode_compatible
from django.db import models
from cms.models.fields import PlaceholderField
from hvad.models import TranslatableModel, TranslatedFields
def dynamic_placeholder_1(instance):
return instance.char_1
def dynamic_placeholder_2(instance):
return instance.char_2
@python_2_unicode_compatible
class Example1(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
date_field = models.DateField(null=True)
placeholder = PlaceholderField('placeholder')
static_admin_url = ''
def __init__(self, *args, **kwargs):
super(Example1, self).__init__(*args, **kwargs)
def callable_item(self, request):
return self.char_1
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("example_detail", args=(self.pk,))
def set_static_url(self, request):
language = get_language_from_request(request)
if self.pk:
self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))
return self.pk
def dynamic_url(self, request):
language = get_language_from_request(request)
return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))
class TwoPlaceholderExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
placeholder_1 = PlaceholderField('placeholder_1', related_name='p1')
placeholder_2 = PlaceholderField('placeholder_2', related_name='p2')
class DynamicPlaceholderSlotExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1')
placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2')
class CharPksExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
slug = models.SlugField(u'char_1', max_length=255, primary_key=True)
placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1')
@python_2_unicode_compatible
class MultilingualExample1(TranslatableModel):
translations = TranslatedFields(
char_1=models.CharField(u'char_1', max_length=255),
char_2=models.CharField(u'char_2', max_length=255),
)
placeholder_1 = PlaceholderField('placeholder_1')
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("detail_multi", args=(self.pk,))
| amaozhao/basecms | cms/test_utils/project/placeholderapp/models.py | Python | mit | 3,065 |
import pkg_resources
import unittest
def with_requires(*requirements):
"""Run a test case only when given requirements are satisfied.
.. admonition:: Example
This test case runs only when `numpy>=1.10` is installed.
>>> from chainer import testing
... class Test(unittest.TestCase):
... @testing.with_requires('numpy>=1.10')
... def test_for_numpy_1_10(self):
... pass
Args:
requirements: A list of string representing requirement condition to
run a given test case.
"""
ws = pkg_resources.WorkingSet()
try:
ws.require(*requirements)
skip = False
except pkg_resources.VersionConflict:
skip = True
msg = 'requires: {}'.format(','.join(requirements))
return unittest.skipIf(skip, msg)
| AlpacaDB/chainer | chainer/testing/helper.py | Python | mit | 829 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutItemMap.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2017 Nyall Dawson'
__date__ = '20/10/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QFileInfo, QRectF, QDir
from qgis.PyQt.QtXml import QDomDocument
from qgis.PyQt.QtGui import QPainter, QColor
from qgis.core import (QgsLayoutItemMap,
QgsRectangle,
QgsRasterLayer,
QgsVectorLayer,
QgsLayout,
QgsMapSettings,
QgsProject,
QgsMultiBandColorRenderer,
QgsCoordinateReferenceSystem,
QgsTextFormat,
QgsFontUtils,
QgsPalLayerSettings,
QgsNullSymbolRenderer,
QgsPoint,
QgsFeature,
QgsVectorLayerSimpleLabeling,
QgsLabelingEngineSettings,
QgsLayoutMeasurement,
QgsUnitTypes,
QgsLayoutObject,
QgsProperty,
QgsReadWriteContext)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
from qgslayoutchecker import QgsLayoutChecker
from test_qgslayoutitem import LayoutItemTestCase
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLayoutMap(unittest.TestCase, LayoutItemTestCase):
@classmethod
def setUpClass(cls):
cls.item_class = QgsLayoutItemMap
def setUp(self):
self.report = "<h1>Python QgsLayoutItemMap Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
myPath = os.path.join(TEST_DATA_DIR, 'rgb256x256.png')
rasterFileInfo = QFileInfo(myPath)
self.raster_layer = QgsRasterLayer(rasterFileInfo.filePath(),
rasterFileInfo.completeBaseName())
rasterRenderer = QgsMultiBandColorRenderer(
self.raster_layer.dataProvider(), 1, 2, 3)
self.raster_layer.setRenderer(rasterRenderer)
myPath = os.path.join(TEST_DATA_DIR, 'points.shp')
vector_file_info = QFileInfo(myPath)
self.vector_layer = QgsVectorLayer(vector_file_info.filePath(),
vector_file_info.completeBaseName(), 'ogr')
assert self.vector_layer.isValid()
# pipe = mRasterLayer.pipe()
# assert pipe.set(rasterRenderer), 'Cannot set pipe renderer'
QgsProject.instance().addMapLayers([self.raster_layer, self.vector_layer])
# create layout with layout map
self.layout = QgsLayout(QgsProject.instance())
self.layout.initializeDefaults()
self.map = QgsLayoutItemMap(self.layout)
self.map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
self.map.setFrameEnabled(True)
self.map.setLayers([self.raster_layer])
self.layout.addLayoutItem(self.map)
def testMapCrs(self):
# create layout with layout map
map_settings = QgsMapSettings()
map_settings.setLayers([self.vector_layer])
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
# check that new maps inherit project CRS
QgsProject.instance().setCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
rectangle = QgsRectangle(-13838977, 2369660, -8672298, 6250909)
map.setExtent(rectangle)
map.setLayers([self.vector_layer])
layout.addLayoutItem(map)
self.assertEqual(map.crs().authid(), 'EPSG:4326')
self.assertFalse(map.presetCrs().isValid())
# overwrite CRS
map.setCrs(QgsCoordinateReferenceSystem('EPSG:3857'))
self.assertEqual(map.crs().authid(), 'EPSG:3857')
self.assertEqual(map.presetCrs().authid(), 'EPSG:3857')
checker = QgsLayoutChecker('composermap_crs3857', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
# overwrite CRS
map.setCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
self.assertEqual(map.presetCrs().authid(), 'EPSG:4326')
self.assertEqual(map.crs().authid(), 'EPSG:4326')
rectangle = QgsRectangle(-124, 17, -78, 52)
map.zoomToExtent(rectangle)
checker = QgsLayoutChecker('composermap_crs4326', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
# change back to project CRS
map.setCrs(QgsCoordinateReferenceSystem())
self.assertEqual(map.crs().authid(), 'EPSG:4326')
self.assertFalse(map.presetCrs().isValid())
def testContainsAdvancedEffects(self):
map_settings = QgsMapSettings()
map_settings.setLayers([self.vector_layer])
layout = QgsLayout(QgsProject.instance())
map = QgsLayoutItemMap(layout)
self.assertFalse(map.containsAdvancedEffects())
self.vector_layer.setBlendMode(QPainter.CompositionMode_Darken)
result = map.containsAdvancedEffects()
self.vector_layer.setBlendMode(QPainter.CompositionMode_SourceOver)
self.assertTrue(result)
def testRasterization(self):
map_settings = QgsMapSettings()
map_settings.setLayers([self.vector_layer])
layout = QgsLayout(QgsProject.instance())
map = QgsLayoutItemMap(layout)
self.assertFalse(map.requiresRasterization())
self.vector_layer.setBlendMode(QPainter.CompositionMode_Darken)
self.assertFalse(map.requiresRasterization())
self.assertTrue(map.containsAdvancedEffects())
map.setBackgroundEnabled(False)
self.assertTrue(map.requiresRasterization())
map.setBackgroundEnabled(True)
map.setBackgroundColor(QColor(1, 1, 1, 1))
self.assertTrue(map.requiresRasterization())
self.vector_layer.setBlendMode(QPainter.CompositionMode_SourceOver)
def testLabelMargin(self):
"""
Test rendering map item with a label margin set
"""
format = QgsTextFormat()
format.setFont(QgsFontUtils.getStandardTestFont("Bold"))
format.setSize(20)
format.setNamedStyle("Bold")
format.setColor(QColor(0, 0, 0))
settings = QgsPalLayerSettings()
settings.setFormat(format)
settings.fieldName = "'X'"
settings.isExpression = True
settings.placement = QgsPalLayerSettings.OverPoint
vl = QgsVectorLayer("Point?crs=epsg:4326&field=id:integer", "vl", "memory")
vl.setRenderer(QgsNullSymbolRenderer())
f = QgsFeature(vl.fields(), 1)
for x in range(15):
for y in range(15):
f.setGeometry(QgsPoint(x, y))
vl.dataProvider().addFeature(f)
vl.setLabeling(QgsVectorLayerSimpleLabeling(settings))
vl.setLabelsEnabled(True)
p = QgsProject()
engine_settings = QgsLabelingEngineSettings()
engine_settings.setFlag(QgsLabelingEngineSettings.UsePartialCandidates, False)
engine_settings.setFlag(QgsLabelingEngineSettings.DrawLabelRectOnly, True)
p.setLabelingEngineSettings(engine_settings)
p.addMapLayer(vl)
layout = QgsLayout(p)
layout.initializeDefaults()
p.setCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(10, 10, 180, 180))
map.setFrameEnabled(True)
map.zoomToExtent(vl.extent())
map.setLayers([vl])
layout.addLayoutItem(map)
checker = QgsLayoutChecker('composermap_label_nomargin', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
map.setLabelMargin(QgsLayoutMeasurement(15, QgsUnitTypes.LayoutMillimeters))
checker = QgsLayoutChecker('composermap_label_margin', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
map.setLabelMargin(QgsLayoutMeasurement(3, QgsUnitTypes.LayoutCentimeters))
checker = QgsLayoutChecker('composermap_label_cm_margin', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
map.setMapRotation(45)
map.zoomToExtent(vl.extent())
map.setScale(map.scale() * 1.2)
checker = QgsLayoutChecker('composermap_rotated_label_margin', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
# data defined
map.setMapRotation(0)
map.zoomToExtent(vl.extent())
map.dataDefinedProperties().setProperty(QgsLayoutObject.MapLabelMargin, QgsProperty.fromExpression('1+3'))
map.refresh()
checker = QgsLayoutChecker('composermap_dd_label_margin', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
def testPartialLabels(self):
"""
Test rendering map item with a show partial labels flag
"""
format = QgsTextFormat()
format.setFont(QgsFontUtils.getStandardTestFont("Bold"))
format.setSize(20)
format.setNamedStyle("Bold")
format.setColor(QColor(0, 0, 0))
settings = QgsPalLayerSettings()
settings.setFormat(format)
settings.fieldName = "'X'"
settings.isExpression = True
settings.placement = QgsPalLayerSettings.OverPoint
vl = QgsVectorLayer("Point?crs=epsg:4326&field=id:integer", "vl", "memory")
vl.setRenderer(QgsNullSymbolRenderer())
f = QgsFeature(vl.fields(), 1)
for x in range(15):
for y in range(15):
f.setGeometry(QgsPoint(x, y))
vl.dataProvider().addFeature(f)
vl.setLabeling(QgsVectorLayerSimpleLabeling(settings))
vl.setLabelsEnabled(True)
p = QgsProject()
engine_settings = QgsLabelingEngineSettings()
engine_settings.setFlag(QgsLabelingEngineSettings.UsePartialCandidates, False)
engine_settings.setFlag(QgsLabelingEngineSettings.DrawLabelRectOnly, True)
p.setLabelingEngineSettings(engine_settings)
p.addMapLayer(vl)
layout = QgsLayout(p)
layout.initializeDefaults()
p.setCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(10, 10, 180, 180))
map.setFrameEnabled(True)
map.zoomToExtent(vl.extent())
map.setLayers([vl])
layout.addLayoutItem(map)
# default should always be to hide partial labels
self.assertFalse(map.mapFlags() & QgsLayoutItemMap.ShowPartialLabels)
# hiding partial labels (the default)
map.setMapFlags(QgsLayoutItemMap.MapItemFlags())
checker = QgsLayoutChecker('composermap_label_nomargin', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
# showing partial labels
map.setMapFlags(QgsLayoutItemMap.ShowPartialLabels)
checker = QgsLayoutChecker('composermap_show_partial_labels', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
def testBlockingItems(self):
"""
Test rendering map item with blocking items
"""
format = QgsTextFormat()
format.setFont(QgsFontUtils.getStandardTestFont("Bold"))
format.setSize(20)
format.setNamedStyle("Bold")
format.setColor(QColor(0, 0, 0))
settings = QgsPalLayerSettings()
settings.setFormat(format)
settings.fieldName = "'X'"
settings.isExpression = True
settings.placement = QgsPalLayerSettings.OverPoint
vl = QgsVectorLayer("Point?crs=epsg:4326&field=id:integer", "vl", "memory")
vl.setRenderer(QgsNullSymbolRenderer())
f = QgsFeature(vl.fields(), 1)
for x in range(15):
for y in range(15):
f.setGeometry(QgsPoint(x, y))
vl.dataProvider().addFeature(f)
vl.setLabeling(QgsVectorLayerSimpleLabeling(settings))
vl.setLabelsEnabled(True)
p = QgsProject()
engine_settings = QgsLabelingEngineSettings()
engine_settings.setFlag(QgsLabelingEngineSettings.DrawLabelRectOnly, True)
p.setLabelingEngineSettings(engine_settings)
p.addMapLayer(vl)
layout = QgsLayout(p)
layout.initializeDefaults()
p.setCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(10, 10, 180, 180))
map.setFrameEnabled(True)
map.zoomToExtent(vl.extent())
map.setLayers([vl])
map.setId('map')
layout.addLayoutItem(map)
map2 = QgsLayoutItemMap(layout)
map2.attemptSetSceneRect(QRectF(0, 5, 50, 80))
map2.setFrameEnabled(True)
map2.setBackgroundEnabled(False)
map2.setId('map2')
layout.addLayoutItem(map2)
map3 = QgsLayoutItemMap(layout)
map3.attemptSetSceneRect(QRectF(150, 160, 50, 50))
map3.setFrameEnabled(True)
map3.setBackgroundEnabled(False)
map3.setId('map3')
layout.addLayoutItem(map3)
map.addLabelBlockingItem(map2)
map.addLabelBlockingItem(map3)
map.setMapFlags(QgsLayoutItemMap.MapItemFlags())
checker = QgsLayoutChecker('composermap_label_blockers', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
doc = QDomDocument("testdoc")
elem = layout.writeXml(doc, QgsReadWriteContext())
l2 = QgsLayout(p)
self.assertTrue(l2.readXml(elem, doc, QgsReadWriteContext()))
map_restore = [i for i in l2.items() if isinstance(i, QgsLayoutItemMap) and i.id() == 'map'][0]
map2_restore = [i for i in l2.items() if isinstance(i, QgsLayoutItemMap) and i.id() == 'map2'][0]
map3_restore = [i for i in l2.items() if isinstance(i, QgsLayoutItemMap) and i.id() == 'map3'][0]
self.assertTrue(map_restore.isLabelBlockingItem(map2_restore))
self.assertTrue(map_restore.isLabelBlockingItem(map3_restore))
if __name__ == '__main__':
unittest.main()
| geopython/QGIS | tests/src/python/test_qgslayoutmap.py | Python | gpl-2.0 | 16,013 |
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0103
"""BibEdit Templates."""
__revision__ = "$Id$"
from invenio.config import CFG_SITE_URL
from invenio.messages import gettext_set_language
class Template:
"""BibEdit Templates Class."""
def __init__(self):
"""Initialize."""
pass
def menu(self):
"""Create the menu."""
recordmenu = '<div class="bibEditMenuSectionHeader">\n' \
' %(imgCompressMenuSection)sRecord\n' \
' %(imgNewRecord)s\n' \
' %(imgCloneRecord)s\n' \
' %(imgTemplateRecord)s\n' \
' </div>\n' \
' <table>\n' \
' <col width="28px">\n' \
' <col width="40px">\n' \
' <col width="40px">\n' \
' <col width="28px">\n' \
' <tr>\n' \
' <td colspan="2">\n' \
' <form onsubmit="return false;">\n' \
' %(txtSearchPattern)s\n' \
' </form>\n' \
' <td colspan="2">%(sctSearchType)s</td>\n' \
' </tr>\n' \
' <tr>\n' \
' <td colspan="4">%(btnSearch)s</td>\n' \
' </tr>\n' \
' <tr id="rowRecordBrowser" style="display: none">\n' \
' <td>%(btnPrev)s</td>\n' \
' <td colspan="2" id="cellRecordNo"\n' \
' style="text-align: center">1/1</td>\n' \
' <td>%(btnNext)s</td>\n' \
' </tr>\n' \
' <tr>\n' \
' <td colspan="2">%(btnSubmit)s</td>\n' \
' <td colspan="2">%(btnCancel)s</td>\n' \
' </tr>\n' \
' <tr>\n' \
' <td id="tickets" colspan="4"><!--filled by bibedit_menu.js--></td>\n' \
' </tr>\n' \
' <tr class="bibEditMenuMore">\n' \
' <td>%(imgDeleteRecord)s</td>\n' \
' <td colspan="3">%(btnDeleteRecord)s</td>\n' \
' </tr>\n' \
' <tr class="bibEditmenuMore">\n' \
' <td>Switch to:</td>\n' \
' <td colspan="3">%(btnSwitchReadOnly)s</td>\n' \
' </tr>' \
' </table>' % {
'imgCompressMenuSection': img('/img/bullet_toggle_minus.png',
'bibEditImgCompressMenuSection', id='ImgRecordMenu'),
'imgNewRecord': img('/img/table.png', 'bibEditImgCtrlEnabled',
id='imgNewRecord', title='New record'), \
'imgCloneRecord': img('/img/table_multiple.png',
'bibEditImgCtrlDisabled', id='imgCloneRecord',
title='Clone record'), \
'imgTemplateRecord': img('/img/page_edit.png',
'bibEditImgCtrlEnabled', id='imgTemplateRecord',
title='Manage templates'), \
'txtSearchPattern': inp('text', id='txtSearchPattern'), \
'sctSearchType': '<select id="sctSearchType">\n' \
' <option value="recID">Rec ID</option>\n' \
' <option value="reportnumber">Rep No</option>\n' \
' <option value="anywhere">Anywhere</option>\n' \
' </select>',
'btnSearch': button('button', 'Search', 'bibEditBtnBold',
id='btnSearch'),
'btnPrev': button('button', '<', id='btnPrev', disabled='disabled'),
'btnNext': button('button', '>', id='btnNext', disabled='disabled'),
'btnSubmit': button('button', 'Submit', 'bibEditBtnBold',
id='btnSubmit', disabled='disabled'),
'btnCancel': button('button', 'Cancel', id='btnCancel',
disabled='disabled'),
'imgDeleteRecord': img('/img/table_delete.png'),
'btnDeleteRecord': button('button', 'Delete',
id='btnDeleteRecord', disabled='disabled'),
'btnSwitchReadOnly' : button('button', 'Read-only',
id='btnSwitchReadOnly')
}
fieldmenu = '<div class="bibEditMenuSectionHeader">\n' \
' %(imgCompressMenuSection)sFields\n' \
' </div>\n' \
' <table class="bibEditMenuMore">\n' \
' <col width="28px">\n' \
' <col>\n' \
' <tr>\n' \
' <td>%(imgAddField)s</td>\n' \
' <td>%(btnAddField)s</td>\n' \
' </tr>\n' \
' <tr>\n' \
' <td>%(imgDeleteSelected)s</td>\n' \
' <td>%(btnDeleteSelected)s</td>\n' \
' </tr>\n' \
' </table>' % {
'imgCompressMenuSection': img('/img/bullet_toggle_minus.png',
'bibEditImgCompressMenuSection', id='ImgFieldMenu'),
'imgAddField': img('/img/table_row_insert.png'),
'btnAddField': button('button', 'Add', id='btnAddField',
disabled='disabled'),
'imgDeleteSelected': img('/img/table_row_delete.png'),
'btnDeleteSelected': button('button', 'Delete selected',
id='btnDeleteSelected', disabled='disabled')}
viewmenu = '<div class="bibEditMenuSectionHeader">\n' \
' %(imgCompressMenuSection)sView\n' \
' </div>\n' \
' <table>\n' \
' <col width="68px">\n' \
' <col width="68px">\n' \
' <tr class="bibEditMenuMore">\n' \
' <td>%(btnTagMARC)s</td>\n' \
' <td>%(btnTagNames)s</td>\n' \
' </tr>\n' \
' </table>' % {
'imgCompressMenuSection': img('/img/bullet_toggle_minus.png',
'bibEditImgCompressMenuSection', id='ImgViewMenu'),
'btnTagMARC': button('button', 'MARC', id='btnMARCTags',
disabled='disabled'),
'btnTagNames': button('button', 'Human', id='btnHumanTags',
disabled='disabled')
}
historymenu = '<div class="bibEditMenuSectionHeader">\n' \
' %(imgCompressMenuSection)sHistory\n' \
' </div>\n' \
' <div class="bibEditRevHistoryMenuSection">\n' \
' <table>\n' \
' <col width="136px">\n' \
' <tr class="bibEditMenuMore">\n' \
' <td id="bibEditRevisionsHistory"></td>'\
' </tr>\n' \
' </table>\n' \
' </div>\n'% {
'imgCompressMenuSection': img('/img/bullet_toggle_minus.png',
'bibEditImgCompressMenuSection', id='ImgHistoryMenu')
}
undoredosection = '<div class="bibEditMenuSectionHeader">\n' \
' %(imgCompressMenuSection)sUndo/Redo\n' \
' </div>\n<table>' \
' <tr class="bibEditMenuMore"><td>' \
' <div class="bibEditURMenuSection">\n' \
' <div class="bibEditURDetailsSection" id="bibEditURUndoListLayer">\n' \
' <div class="bibEditURButtonLayer"><button id="btnUndo" class="menu-btn"><</button></div>\n' \
' <div id="undoOperationVisualisationField" class="bibEditHiddenElement bibEditURPreviewBox">\n' \
' <div id="undoOperationVisualisationFieldContent"></div>\n' \
' </div>\n' \
' </div>' \
' <div class="bibEditURDetailsSection" id="bibEditURRedoListLayer">\n' \
' <div class="bibEditURButtonLayer"><button id="btnRedo" class="menu-btn">></button></div>' \
' <div id="redoOperationVisualisationField" class="bibEditHiddenElement bibEditURPreviewBox">\n' \
' <div id="redoOperationVisualisationFieldContent"></div>' \
' </div>\n' \
' </div>\n' \
' </div></td></tr></table>\n' % { \
'imgCompressMenuSection': img('/img/bullet_toggle_minus.png',
'bibEditImgCompressMenuSection', id='ImgUndoRedoMenu') }
statusarea = '<table>\n' \
' <tr>\n' \
' <td id="cellIndicator">%(imgIndicator)s</td>\n' \
' <td id="cellStatus">%(lblChecking)s</td>\n' \
' </table>' % {
'imgIndicator': img('/img/indicator.gif'),
'lblChecking': 'Checking status' + '...'
}
holdingpenpanel = '<div class="bibEditMenuSectionHeader">\n' \
' %(imgCompressMenuSection)sHolding Pen\n' \
'<table class="bibEditMenuMore">\n<tr><td>' \
' <div id="bibEditHoldingPenToolbar"> ' \
' <div id="bibeditHPChanges"></div>' \
' </div> </td></tr></table>' \
' </div>\n' % \
{ 'imgCompressMenuSection': img('/img/bullet_toggle_minus.png',
'bibEditImgCompressMenuSection', id='ImgHoldingPenMenu') }
bibcirculationpanel = \
' <div class="bibEditMenuSection" ' \
' id="bibEditBibCircConnection">\n' \
'<div class="bibEditMenuSectionHeader">\n' \
' %(imgCompressMenuSection)sPhysical Copies\n' \
' <table class="bibEditMenuMore">\n<tr><td ' \
' class="bibEditBibCircPanel">' \
' Number of copies: ' \
' <div id="bibEditBibCirculationCopies">0</div><br/>' \
' <button id="bibEditBibCirculationBtn" class="menu-btn">' \
'Edit physical copies</button>' \
' </td></tr></table></div></div>' \
% {
'imgCompressMenuSection': img('/img/bullet_toggle_minus.png',
'bibEditImgCompressMenuSection', id='ImgBibCirculationMenu')
}
lnkSpecialChar = link('Special symbols', href='#', id='lnkSpecSymbols')
lnkhelp = img('/img/help.png', '', style='vertical-align: bottom') + \
link('Help', href='#', onclick='window.open(' \
'\'%s/help/admin/bibedit-admin-guide#2\', \'\', \'width=640,' \
'height=600,left=150,top=150,resizable=yes,scrollbars=yes\');' \
'return false;' % CFG_SITE_URL)
return ' <div id="bibEditMenu">\n' \
' <div class="bibEditMenuSection">\n' \
' %(recordmenu)s\n' \
' </div>\n' \
' <div class="bibEditMenuSection">\n' \
' %(fieldmenu)s\n' \
' </div>\n' \
' <div class="bibEditMenuSection">\n' \
' %(viewmenu)s\n' \
' </div>\n' \
' <div class="bibEditMenuSection">\n' \
' %(holdingpenpanel)s\n'\
' </div>'\
' <div class="bibEditMenuSection">\n' \
' %(undoredosection)s\n' \
' </div>\n' \
' <div class="bibEditMenuSection">\n' \
' %(historymenu)s\n' \
' </div>\n' \
' %(circulationmenu)s\n' \
' <div id="bibEditMenuSection">\n' \
' %(statusarea)s\n' \
' </div>\n' \
' <div class="bibEditMenuSection" align="right">\n' \
' %(lnkSpecialChar)s %(lnkhelp)s\n' \
' </div>\n' \
' </div>\n' % {
'recordmenu': recordmenu,
'viewmenu': viewmenu,
'fieldmenu': fieldmenu,
'statusarea': statusarea,
'lnkhelp': lnkhelp,
'lnkSpecialChar': lnkSpecialChar,
'holdingpenpanel': holdingpenpanel,
'historymenu': historymenu,
'undoredosection': undoredosection,
'circulationmenu': bibcirculationpanel
}
def history_comparebox(self, ln, revdate, revdate_cmp, comparison):
""" Display the bibedit history comparison box. """
_ = gettext_set_language(ln)
title = '<b>%(comp)s</b><br /><span class="diff_field_added">%(rev)s %(revdate)s</span>\
<br /><span class="diff_field_deleted">%(rev)s %(revdate_cmp)s</span>' % {
'comp': _('Comparison of:'),
'rev': _('Revision'),
'revdate': revdate,
'revdate_cmp': revdate_cmp}
return '''
<div class="bibEditHistCompare">
<p>%s</p>
<p>
%s
</p>
</div>''' % (title, comparison)
def clean_value(self, value, format):
""" This function clean value for HTML interface and inverse. """
if format != "html":
value = value.replace('"', '"')
value = value.replace('<', '<')
value = value.replace('>', '>')
else:
value = value.replace('"', '"')
value = value.replace('<', '<')
value = value.replace('>', '>')
return value
def focuson(self):
html = """
<div id='display_div'>
<strong>Display</strong> <br />
<ul id='focuson_list' class='list-plain'>
<li>
<input type="checkbox" name="references" id="focuson_references" value="references" checked/>
<label for="focuson_references">References</label>
</li>
<li>
<input type="checkbox" name="authors" id="focuson_authors" value="authors" checked/>
<label for="focuson_authors">Authors</label>
</li>
<li>
<input type="checkbox" name="others" id="focuson_others" value="others" checked/>
<label for="focuson_others">Others</label>
</li>
</ul>
</div>
"""
return html
def img(src, _class='', **kargs):
"""Create an HTML <img> element."""
src = 'src="%s" ' % src
if _class:
_class = 'class="%s" ' % _class
args = ''
for karg in kargs:
args += '%s="%s" ' % (karg, kargs[karg])
return '<img %s%s%s/>' % (src, _class, args)
def inp(_type, _class='', **kargs):
"""Create an HTML <input> element."""
_type = 'type="%s" ' % _type
if _class:
_class = 'class="%s" ' % _class
args = ''
for karg in kargs:
args += '%s="%s" ' % (karg, kargs[karg])
return '<input %s%s%s/>' % (_type, _class, args)
def button(_type, value, _class="", **kargs):
"""Create an HTML <button> element."""
_type = 'type="%s" ' % _type
class_result = "class='menu-btn "
if _class:
class_result += "%s' " % _class
else:
class_result += "'"
args = ''
for karg in kargs:
args += '%s="%s" ' % (karg, kargs[karg])
return '<button %s%s%s>%s</button>' % (_type, class_result, args, value)
def link(value, _class='', **kargs):
"""Create an HTML <a> (link) element."""
if _class:
_class = 'class="%s" ' % _class
args = ''
for karg in kargs:
args += '%s="%s" ' % (karg, kargs[karg])
return '<a %s%s>%s</a>' % (_class, args, value)
| AlbertoPeon/invenio | modules/bibedit/lib/bibedit_templates.py | Python | gpl-2.0 | 16,853 |
#!/usr/bin/python
import os
from autotest.client import utils
version = 1
def setup(tarball, topdir):
srcdir = os.path.join(topdir, 'src')
utils.extract_tarball_to_dir(tarball, srcdir)
os.chdir(srcdir)
utils.system('patch -p1 < ../00_arches.patch')
utils.make()
utils.make('prefix=%s install' % topdir)
os.chdir(topdir)
# old source was
# http://www.kernel.org/pub/linux/kernel/people/bcrl/aio/libaio-0.3.92.tar.bz2
# now grabbing from debian
# http://ftp.debian.org/debian/pool/main/liba/libaio/libaio_0.3.106.orig.tar.gz
# architecture patch from here
# http://git.hadrons.org/?p=debian/pkgs/libaio.git;a=tree;f=debian/patches
pwd = os.getcwd()
tarball = os.path.join(pwd, 'libaio_0.3.106.orig.tar.gz')
utils.update_version(pwd+'/src', False, version, setup, tarball, pwd)
| spcui/autotest | client/deps/libaio/libaio.py | Python | gpl-2.0 | 809 |
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The iosxr_lacp class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.iosxr.facts.facts import Facts
from ansible.module_utils.network.common.utils import dict_diff
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import remove_empties
from ansible.module_utils.network.iosxr. \
utils.utils import flatten_dict
class Lacp(ConfigBase):
"""
The iosxr_lacp class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'lacp',
]
def __init__(self, module):
super(Lacp, self).__init__(module)
def get_lacp_facts(self):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
lacp_facts = facts['ansible_network_resources'].get('lacp')
if not lacp_facts:
return {}
return lacp_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
commands = list()
warnings = list()
existing_lacp_facts = self.get_lacp_facts()
commands.extend(self.set_config(existing_lacp_facts))
if commands:
if not self._module.check_mode:
self._connection.edit_config(commands)
result['changed'] = True
result['commands'] = commands
changed_lacp_facts = self.get_lacp_facts()
result['before'] = existing_lacp_facts
if result['changed']:
result['after'] = changed_lacp_facts
result['warnings'] = warnings
return result
def set_config(self, existing_lacp_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
want = self._module.params.get('config')
if not want:
want = {}
have = existing_lacp_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params['state']
if state == 'deleted':
commands = self._state_deleted(want, have)
elif state == 'merged':
commands = self._state_merged(want, have)
elif state == 'replaced':
commands = self._state_replaced(want, have)
return commands
@staticmethod
def _state_replaced(want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
commands.extend(
Lacp._state_deleted(want, have)
)
commands.extend(
Lacp._state_merged(want, have)
)
return commands
@staticmethod
def _state_merged(want, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = []
updates = dict_diff(have, want)
if updates:
for key, value in iteritems(flatten_dict(remove_empties(updates['system']))):
commands.append('lacp system {0} {1}'.format(key.replace('address', 'mac'), value))
return commands
@staticmethod
def _state_deleted(want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
for x in [k for k in have.get('system', {}) if k not in remove_empties(want.get('system', {}))]:
commands.append('no lacp system {0}'.format(x))
return commands
| resmo/ansible | lib/ansible/module_utils/network/iosxr/config/lacp/lacp.py | Python | gpl-3.0 | 5,221 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import UserError
import pytz
from datetime import datetime, date
from dateutil.relativedelta import relativedelta
from odoo.tests.common import TransactionCase
class TestRecurrentEvents(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
lang = cls.env['res.lang']._lang_get(cls.env.user.lang)
lang.week_start = '1' # Monday
def assertEventDates(self, events, dates):
events = events.sorted('start')
self.assertEqual(len(events), len(dates), "Wrong number of events in the recurrence")
self.assertTrue(all(events.mapped('active')), "All events should be active")
for event, dates in zip(events, dates):
start, stop = dates
self.assertEqual(event.start, start)
self.assertEqual(event.stop, stop)
class TestCreateRecurrentEvents(TestRecurrentEvents):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.event = cls.env['calendar.event'].create({
'name': 'Recurrent Event',
'start': datetime(2019, 10, 21, 8, 0),
'stop': datetime(2019, 10, 23, 18, 0),
'recurrency': True,
})
def test_weekly_count(self):
""" Every week, on Tuesdays, for 3 occurences """
detached_events = self.event._apply_recurrence_values({
'rrule_type': 'weekly',
'tue': True,
'interval': 1,
'count': 3,
'event_tz': 'UTC',
})
self.assertEqual(detached_events, self.event, "It should be detached from the recurrence")
self.assertFalse(self.event.recurrence_id, "It should be detached from the recurrence")
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 10, 29, 8, 0), datetime(2019, 10, 31, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
def test_weekly_interval_2(self):
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'weekly',
'tue': True,
'count': 2,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
def test_weekly_interval_2_week_start_sunday(self):
lang = self.env['res.lang']._lang_get(self.env.user.lang)
lang.week_start = '7' # Sunday
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'weekly',
'tue': True,
'count': 2,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
lang.week_start = '1' # Monday
def test_weekly_until(self):
self.event._apply_recurrence_values({
'rrule_type': 'weekly',
'tue': True,
'interval': 2,
'end_type': 'end_date',
'until': datetime(2019, 11, 15),
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 2, "It should have 2 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 11, 5, 8, 0), datetime(2019, 11, 7, 18, 0)),
])
def test_monthly_count_by_date(self):
self.event._apply_recurrence_values({
'rrule_type': 'monthly',
'interval': 2,
'month_by': 'date',
'day': 27,
'end_type': 'count',
'count': 3,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 27, 8, 0), datetime(2019, 10, 29, 18, 0)),
(datetime(2019, 12, 27, 8, 0), datetime(2019, 12, 29, 18, 0)),
(datetime(2020, 2, 27, 8, 0), datetime(2020, 2, 29, 18, 0)),
])
def test_monthly_count_by_date_31(self):
self.event._apply_recurrence_values({
'rrule_type': 'monthly',
'interval': 1,
'month_by': 'date',
'day': 31,
'end_type': 'count',
'count': 3,
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 31, 8, 0), datetime(2019, 11, 2, 18, 0)),
# Missing 31th in November
(datetime(2019, 12, 31, 8, 0), datetime(2020, 1, 2, 18, 0)),
(datetime(2020, 1, 31, 8, 0), datetime(2020, 2, 2, 18, 0)),
])
def test_monthly_until_by_day(self):
""" Every 2 months, on the third Tuesday, until 27th March 2020 """
self.event.start = datetime(2019, 10, 1, 8, 0)
self.event.stop = datetime(2019, 10, 3, 18, 0)
self.event._apply_recurrence_values({
'rrule_type': 'monthly',
'interval': 2,
'month_by': 'day',
'byday': '3',
'weekday': 'TUE',
'end_type': 'end_date',
'until': date(2020, 3, 27),
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 3, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 15, 8, 0), datetime(2019, 10, 17, 18, 0)),
(datetime(2019, 12, 17, 8, 0), datetime(2019, 12, 19, 18, 0)),
(datetime(2020, 2, 18, 8, 0), datetime(2020, 2, 20, 18, 0)),
])
def test_monthly_until_by_day_last(self):
""" Every 2 months, on the last Wednesday, until 15th January 2020 """
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'monthly',
'month_by': 'day',
'weekday': 'WED',
'byday': '-1',
'end_type': 'end_date',
'until': date(2020, 1, 15),
'event_tz': 'UTC',
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
events = recurrence.calendar_event_ids
self.assertEqual(len(events), 2, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(datetime(2019, 10, 30, 8, 0), datetime(2019, 11, 1, 18, 0)),
(datetime(2019, 12, 25, 8, 0), datetime(2019, 12, 27, 18, 0)),
])
def test_yearly_count(self):
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'yearly',
'count': 2,
'event_tz': 'UTC',
})
events = self.event.recurrence_id.calendar_event_ids
self.assertEqual(len(events), 2, "It should have 3 events in the recurrence")
self.assertEventDates(events, [
(self.event.start, self.event.stop),
(self.event.start + relativedelta(years=2), self.event.stop + relativedelta(years=2)),
])
def test_dst_timezone(self):
""" Test hours stays the same, regardless of DST changes """
self.event.start = datetime(2002, 10, 28, 10, 0)
self.event.stop = datetime(2002, 10, 28, 12, 0)
self.event._apply_recurrence_values({
'interval': 2,
'rrule_type': 'weekly',
'mon': True,
'count': '2',
'event_tz': 'US/Eastern', # DST change on 2002/10/27
})
recurrence = self.env['calendar.recurrence'].search([('base_event_id', '=', self.event.id)])
self.assertEventDates(recurrence.calendar_event_ids, [
(datetime(2002, 10, 28, 10, 0), datetime(2002, 10, 28, 12, 0)),
(datetime(2002, 11, 11, 10, 0), datetime(2002, 11, 11, 12, 0)),
])
def test_ambiguous_dst_time_winter(self):
""" Test hours stays the same, regardless of DST changes """
eastern = pytz.timezone('US/Eastern')
dt = eastern.localize(datetime(2002, 10, 20, 1, 30, 00)).astimezone(pytz.utc).replace(tzinfo=None)
# Next occurence happens at 1:30am on 27th Oct 2002 which happened twice in the US/Eastern
# timezone when the clocks where put back at the end of Daylight Saving Time
self.event.start = dt
self.event.stop = dt + relativedelta(hours=1)
self.event._apply_recurrence_values({
'interval': 1,
'rrule_type': 'weekly',
'sun': True,
'count': '2',
'event_tz': 'US/Eastern' # DST change on 2002/4/7
})
events = self.event.recurrence_id.calendar_event_ids
self.assertEqual(events.mapped('duration'), [1, 1])
self.assertEventDates(events, [
(datetime(2002, 10, 20, 5, 30), datetime(2002, 10, 20, 6, 30)),
(datetime(2002, 10, 27, 6, 30), datetime(2002, 10, 27, 7, 30)),
])
def test_ambiguous_dst_time_spring(self):
""" Test hours stays the same, regardless of DST changes """
eastern = pytz.timezone('US/Eastern')
dt = eastern.localize(datetime(2002, 3, 31, 2, 30, 00)).astimezone(pytz.utc).replace(tzinfo=None)
# Next occurence happens 2:30am on 7th April 2002 which never happened at all in the
# US/Eastern timezone, as the clocks where put forward at 2:00am skipping the entire hour
self.event.start = dt
self.event.stop = dt + relativedelta(hours=1)
self.event._apply_recurrence_values({
'interval': 1,
'rrule_type': 'weekly',
'sun': True,
'count': '2',
'event_tz': 'US/Eastern' # DST change on 2002/4/7
})
events = self.event.recurrence_id.calendar_event_ids
self.assertEqual(events.mapped('duration'), [1, 1])
# The event begins at "the same time" (i.e. 2h30 after midnight), but that day, 2h30 after midnight happens to be at 3:30 am
self.assertEventDates(events, [
(datetime(2002, 3, 31, 7, 30), datetime(2002, 3, 31, 8, 30)),
(datetime(2002, 4, 7, 7, 30), datetime(2002, 4, 7, 8, 30)),
])
def test_ambiguous_full_day(self):
""" Test date stays the same, regardless of DST changes """
self.event.write({
'start': datetime(2020, 3, 23, 0, 0),
'stop': datetime(2020, 3, 23, 23, 59),
})
self.event.allday = True
self.event._apply_recurrence_values({
'interval': 1,
'rrule_type': 'weekly',
'mon': True,
'count': 2,
'event_tz': 'Europe/Brussels' # DST change on 2020/3/23
})
events = self.event.recurrence_id.calendar_event_ids
self.assertEventDates(events, [
(datetime(2020, 3, 23, 0, 0), datetime(2020, 3, 23, 23, 59)),
(datetime(2020, 3, 30, 0, 0), datetime(2020, 3, 30, 23, 59)),
])
class TestUpdateRecurrentEvents(TestRecurrentEvents):
@classmethod
def setUpClass(cls):
super().setUpClass()
event = cls.env['calendar.event'].create({
'name': 'Recurrent Event',
'start': datetime(2019, 10, 22, 1, 0),
'stop': datetime(2019, 10, 24, 18, 0),
'recurrency': True,
'rrule_type': 'weekly',
'tue': True,
'interval': 1,
'count': 3,
'event_tz': 'Etc/GMT-4',
})
cls.recurrence = event.recurrence_id
cls.events = event.recurrence_id.calendar_event_ids.sorted('start')
def test_shift_future(self):
event = self.events[1]
self.events[1].write({
'recurrence_update': 'future_events',
'start': event.start + relativedelta(days=4),
'stop': event.stop + relativedelta(days=5),
})
self.assertEqual(self.recurrence.end_type, 'end_date')
self.assertEqual(self.recurrence.until, date(2019, 10, 27))
self.assertEventDates(self.recurrence.calendar_event_ids, [
(datetime(2019, 10, 22, 1, 0), datetime(2019, 10, 24, 18, 0)),
])
new_recurrence = event.recurrence_id
self.assertNotEqual(self.recurrence, new_recurrence)
self.assertEqual(new_recurrence.count, 2)
self.assertEqual(new_recurrence.dtstart, datetime(2019, 11, 2, 1, 0))
self.assertFalse(new_recurrence.tue)
self.assertTrue(new_recurrence.sat)
self.assertEventDates(new_recurrence.calendar_event_ids, [
(datetime(2019, 11, 2, 1, 0), datetime(2019, 11, 5, 18, 0)),
(datetime(2019, 11, 9, 1, 0), datetime(2019, 11, 12, 18, 0)),
])
def test_shift_future_first(self):
event = self.events[0]
self.events[0].write({
'recurrence_update': 'future_events',
'start': event.start + relativedelta(days=4),
'stop': event.stop + relativedelta(days=5),
})
new_recurrence = event.recurrence_id
self.assertFalse(self.recurrence.exists())
self.assertEqual(new_recurrence.count, 3)
self.assertEqual(new_recurrence.dtstart, datetime(2019, 10, 26, 1, 0))
self.assertFalse(new_recurrence.tue)
self.assertTrue(new_recurrence.sat)
self.assertEventDates(new_recurrence.calendar_event_ids, [
(datetime(2019, 10, 26, 1, 0), datetime(2019, 10, 29, 18, 0)),
(datetime(2019, 11, 2, 1, 0), datetime(2019, 11, 5, 18, 0)),
(datetime(2019, 11, 9, 1, 0), datetime(2019, 11, 12, 18, 0)),
])
def test_shift_reapply(self):
event = self.events[2]
self.events[2].write({
'recurrence_update': 'future_events',
'start': event.start + relativedelta(days=4),
'stop': event.stop + relativedelta(days=5),
})
# re-Applying the first recurrence should be idempotent
self.recurrence._apply_recurrence()
self.assertEventDates(self.recurrence.calendar_event_ids, [
(datetime(2019, 10, 22, 1, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 10, 29, 1, 0), datetime(2019, 10, 31, 18, 0)),
])
def test_shift_all(self):
event = self.events[1]
self.assertEventDates(event.recurrence_id.calendar_event_ids, [
(datetime(2019, 10, 22, 1, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 10, 29, 1, 0), datetime(2019, 10, 31, 18, 0)),
(datetime(2019, 11, 5, 1, 0), datetime(2019, 11, 7, 18, 0)),
])
event.write({
'recurrence_update': 'all_events',
'tue': False,
'fri': False,
'sat': True,
'start': event.start + relativedelta(days=4),
'stop': event.stop + relativedelta(days=5),
})
recurrence = self.env['calendar.recurrence'].search([])
self.assertEventDates(recurrence.calendar_event_ids, [
(datetime(2019, 10, 26, 1, 0), datetime(2019, 10, 29, 18, 0)),
(datetime(2019, 11, 2, 1, 0), datetime(2019, 11, 5, 18, 0)),
(datetime(2019, 11, 9, 1, 0), datetime(2019, 11, 12, 18, 0)),
])
def test_change_week_day_rrule(self):
recurrence = self.events.recurrence_id
recurrence.rrule = 'FREQ=WEEKLY;COUNT=3;BYDAY=WE' # from TU to WE
self.assertFalse(self.recurrence.tue)
self.assertTrue(self.recurrence.wed)
def test_shift_all_base_inactive(self):
self.recurrence.base_event_id.active = False
event = self.events[1]
event.write({
'recurrence_update': 'all_events',
'start': event.start + relativedelta(days=4),
'stop': event.stop + relativedelta(days=5),
})
self.assertFalse(self.recurrence.calendar_event_ids, "Inactive event should not create recurrent events")
def test_shift_all_with_outlier(self):
outlier = self.events[1]
outlier.write({
'recurrence_update': 'self_only',
'start': datetime(2019, 10, 31, 1, 0), # Thursday
'stop': datetime(2019, 10, 31, 18, 0),
})
event = self.events[0]
event.write({
'recurrence_update': 'all_events',
'tue': False,
'fri': False,
'sat': True,
'start': event.start + relativedelta(days=4),
'stop': event.stop + relativedelta(days=4),
})
self.assertEventDates(event.recurrence_id.calendar_event_ids, [
(datetime(2019, 10, 26, 1, 0), datetime(2019, 10, 28, 18, 0)),
(datetime(2019, 11, 2, 1, 0), datetime(2019, 11, 4, 18, 0)),
(datetime(2019, 11, 9, 1, 0), datetime(2019, 11, 11, 18, 0))
])
self.assertFalse(outlier.exists(), 'The outlier should have been deleted')
def test_update_recurrence_future(self):
event = self.events[1]
event.write({
'recurrence_update': 'future_events',
'fri': True, # recurrence is now Tuesday AND Friday
'count': 4,
})
self.assertEventDates(self.recurrence.calendar_event_ids, [
(datetime(2019, 10, 22, 1, 0), datetime(2019, 10, 24, 18, 0)), # Tu
])
self.assertEventDates(event.recurrence_id.calendar_event_ids, [
(datetime(2019, 10, 29, 1, 0), datetime(2019, 10, 31, 18, 0)), # Tu
(datetime(2019, 11, 1, 1, 0), datetime(2019, 11, 3, 18, 0)), # Fr
(datetime(2019, 11, 5, 1, 0), datetime(2019, 11, 7, 18, 0)), # Tu
(datetime(2019, 11, 8, 1, 0), datetime(2019, 11, 10, 18, 0)), # Fr
])
events = event.recurrence_id.calendar_event_ids.sorted('start')
self.assertEqual(events[0], self.events[1], "Events on Tuesdays should not have changed")
self.assertEqual(events[2], self.events[2], "Events on Tuesdays should not have changed")
self.assertNotEqual(events.recurrence_id, self.recurrence, "Events should no longer be linked to the original recurrence")
self.assertEqual(events.recurrence_id.count, 4, "The new recurrence should have 4")
self.assertTrue(event.recurrence_id.tue)
self.assertTrue(event.recurrence_id.fri)
def test_update_recurrence_all(self):
self.events[1].write({
'recurrence_update': 'all_events',
'mon': True, # recurrence is now Tuesday AND Monday
})
recurrence = self.env['calendar.recurrence'].search([])
self.assertEventDates(recurrence.calendar_event_ids, [
(datetime(2019, 10, 22, 1, 0), datetime(2019, 10, 24, 18, 0)),
(datetime(2019, 10, 28, 1, 0), datetime(2019, 10, 30, 18, 0)),
(datetime(2019, 10, 29, 1, 0), datetime(2019, 10, 31, 18, 0)),
])
def test_shift_single(self):
event = self.events[1]
event.write({
'recurrence_update': 'self_only',
'name': "Updated event",
'start': event.start - relativedelta(hours=2)
})
self.events[0].write({
'recurrence_update': 'future_events',
'start': event.start + relativedelta(hours=4),
'stop': event.stop + relativedelta(hours=5),
})
def test_break_recurrence_future(self):
event = self.events[1]
event.write({
'recurrence_update': 'future_events',
'recurrency': False,
})
self.assertFalse(event.recurrence_id)
self.assertTrue(self.events[0].active)
self.assertTrue(self.events[1].active)
self.assertFalse(self.events[2].exists())
self.assertEqual(self.recurrence.until, date(2019, 10, 27))
self.assertEqual(self.recurrence.end_type, 'end_date')
self.assertEventDates(self.recurrence.calendar_event_ids, [
(datetime(2019, 10, 22, 1, 0), datetime(2019, 10, 24, 18, 0)),
])
def test_break_recurrence_all(self):
event = self.events[1]
event.write({
'recurrence_update': 'all_events',
'recurrency': False,
'count': 0, # In practice, JS framework sends updated recurrency fields, since they have been recomputed, triggered by the `recurrency` change
})
self.assertFalse(self.events[0].exists())
self.assertTrue(event.active)
self.assertFalse(self.events[2].exists())
self.assertFalse(event.recurrence_id)
self.assertFalse(self.recurrence.exists())
def test_all_day_shift(self):
recurrence = self.env['calendar.event'].create({
'name': 'Recurrent Event',
'start_date': datetime(2019, 10, 22),
'stop_date': datetime(2019, 10, 24),
'recurrency': True,
'rrule_type': 'weekly',
'tue': True,
'interval': 1,
'count': 3,
'event_tz': 'Etc/GMT-4',
'allday': True,
}).recurrence_id
events = recurrence.calendar_event_ids.sorted('start')
event = events[1]
event.write({
'recurrence_update': 'future_events',
'start': event.start + relativedelta(days=4),
'stop': event.stop + relativedelta(days=5),
})
self.assertEqual(recurrence.end_type, 'end_date')
self.assertEqual(recurrence.until, date(2019, 10, 27))
self.assertEventDates(recurrence.calendar_event_ids, [
(datetime(2019, 10, 22, 8, 0), datetime(2019, 10, 24, 18, 0)),
])
new_recurrence = event.recurrence_id
self.assertNotEqual(recurrence, new_recurrence)
self.assertEqual(new_recurrence.count, 2)
self.assertEqual(new_recurrence.dtstart, datetime(2019, 11, 2, 8, 0))
self.assertFalse(new_recurrence.tue)
self.assertTrue(new_recurrence.sat)
self.assertEventDates(new_recurrence.calendar_event_ids, [
(datetime(2019, 11, 2, 8, 0), datetime(2019, 11, 5, 18, 0)),
(datetime(2019, 11, 9, 8, 0), datetime(2019, 11, 12, 18, 0)),
])
def test_archive_recurrence_all(self):
self.events[1].action_mass_archive('all_events')
self.assertEqual([False, False, False], self.events.mapped('active'))
def test_archive_recurrence_future(self):
event = self.events[1]
event.action_mass_archive('future_events')
self.assertEqual([True, False, False], self.events.mapped('active'))
def test_unlink_recurrence_all(self):
event = self.events[1]
event.action_mass_deletion('all_events')
self.assertFalse(self.recurrence.exists())
self.assertFalse(self.events.exists())
def test_unlink_recurrence_future(self):
event = self.events[1]
event.action_mass_deletion('future_events')
self.assertTrue(self.recurrence)
self.assertEqual(self.events.exists(), self.events[0])
class TestUpdateMultiDayWeeklyRecurrentEvents(TestRecurrentEvents):
@classmethod
def setUpClass(cls):
super().setUpClass()
event = cls.env['calendar.event'].create({
'name': 'Recurrent Event',
'start': datetime(2019, 10, 22, 1, 0),
'stop': datetime(2019, 10, 24, 18, 0),
'recurrency': True,
'rrule_type': 'weekly',
'tue': True,
'fri': True,
'interval': 1,
'count': 3,
'event_tz': 'Etc/GMT-4',
})
cls.recurrence = event.recurrence_id
cls.events = event.recurrence_id.calendar_event_ids.sorted('start')
# Tuesday datetime(2019, 10, 22, 1, 0)
# Friday datetime(2019, 10, 25, 1, 0)
# Tuesday datetime(2019, 10, 29, 1, 0)
def test_shift_all_multiple_weekdays(self):
event = self.events[0] # Tuesday
# We go from 2 days a week Thuesday and Friday to one day a week, Thursday
event.write({
'recurrence_update': 'all_events',
'tue': False,
'thu': True,
'fri': False,
'start': event.start + relativedelta(days=2),
'stop': event.stop + relativedelta(days=2),
})
recurrence = self.env['calendar.recurrence'].search([])
# We don't try to do magic tricks. First event is moved, other remain
self.assertEventDates(recurrence.calendar_event_ids, [
(datetime(2019, 10, 24, 1, 0), datetime(2019, 10, 26, 18, 0)),
(datetime(2019, 10, 31, 1, 0), datetime(2019, 11, 2, 18, 0)),
(datetime(2019, 11, 7, 1, 0), datetime(2019, 11, 9, 18, 0)),
])
def test_shift_all_multiple_weekdays_duration(self):
event = self.events[0] # Tuesday
event.write({
'recurrence_update': 'all_events',
'tue': False,
'thu': True,
'fri': False,
'start': event.start + relativedelta(days=2),
'stop': event.stop + relativedelta(days=3),
})
recurrence = self.env['calendar.recurrence'].search([])
self.assertEventDates(recurrence.calendar_event_ids, [
(datetime(2019, 10, 24, 1, 0), datetime(2019, 10, 27, 18, 0)),
(datetime(2019, 10, 31, 1, 0), datetime(2019, 11, 3, 18, 0)),
(datetime(2019, 11, 7, 1, 0), datetime(2019, 11, 10, 18, 0)),
])
def test_shift_future_multiple_weekdays(self):
event = self.events[1] # Friday
event.write({
'recurrence_update': 'future_events',
'start': event.start + relativedelta(days=3),
'stop': event.stop + relativedelta(days=3),
})
self.assertTrue(self.recurrence.fri)
self.assertTrue(self.recurrence.tue)
self.assertTrue(event.recurrence_id.tue)
self.assertTrue(event.recurrence_id.mon)
self.assertFalse(event.recurrence_id.fri)
self.assertEqual(event.recurrence_id.count, 2)
class TestUpdateMonthlyByDay(TestRecurrentEvents):
@classmethod
def setUpClass(cls):
super().setUpClass()
event = cls.env['calendar.event'].create({
'name': 'Recurrent Event',
'start': datetime(2019, 10, 15, 1, 0),
'stop': datetime(2019, 10, 16, 18, 0),
'recurrency': True,
'rrule_type': 'monthly',
'interval': 1,
'count': 3,
'month_by': 'day',
'weekday': 'TUE',
'byday': '3',
'event_tz': 'Etc/GMT-4',
})
cls.recurrence = event.recurrence_id
cls.events = event.recurrence_id.calendar_event_ids.sorted('start')
# datetime(2019, 10, 15, 1, 0)
# datetime(2019, 11, 19, 1, 0)
# datetime(2019, 12, 17, 1, 0)
def test_shift_all(self):
event = self.events[1]
event.write({
'recurrence_update': 'all_events',
'start': event.start + relativedelta(hours=5),
'stop': event.stop + relativedelta(hours=5),
})
recurrence = self.env['calendar.recurrence'].search([])
self.assertEventDates(recurrence.calendar_event_ids, [
(datetime(2019, 10, 15, 6, 0), datetime(2019, 10, 16, 23, 0)),
(datetime(2019, 11, 19, 6, 0), datetime(2019, 11, 20, 23, 0)),
(datetime(2019, 12, 17, 6, 0), datetime(2019, 12, 18, 23, 0)),
])
class TestUpdateMonthlyByDate(TestRecurrentEvents):
@classmethod
def setUpClass(cls):
super().setUpClass()
event = cls.env['calendar.event'].create({
'name': 'Recurrent Event',
'start': datetime(2019, 10, 22, 1, 0),
'stop': datetime(2019, 10, 24, 18, 0),
'recurrency': True,
'rrule_type': 'monthly',
'interval': 1,
'count': 3,
'month_by': 'date',
'day': 22,
'event_tz': 'Etc/GMT-4',
})
cls.recurrence = event.recurrence_id
cls.events = event.recurrence_id.calendar_event_ids.sorted('start')
# datetime(2019, 10, 22, 1, 0)
# datetime(2019, 11, 22, 1, 0)
# datetime(2019, 12, 22, 1, 0)
def test_shift_future(self):
event = self.events[1]
event.write({
'recurrence_update': 'future_events',
'start': event.start + relativedelta(days=4),
'stop': event.stop + relativedelta(days=5),
})
self.assertEventDates(self.recurrence.calendar_event_ids, [
(datetime(2019, 10, 22, 1, 0), datetime(2019, 10, 24, 18, 0)),
])
self.assertEventDates(event.recurrence_id.calendar_event_ids, [
(datetime(2019, 11, 26, 1, 0), datetime(2019, 11, 29, 18, 0)),
(datetime(2019, 12, 26, 1, 0), datetime(2019, 12, 29, 18, 0)),
])
def test_update_all(self):
event = self.events[1]
event.write({
'recurrence_update': 'all_events',
'day': 25,
})
recurrence = self.env['calendar.recurrence'].search([('day', '=', 25)])
self.assertEventDates(recurrence.calendar_event_ids, [
(datetime(2019, 10, 25, 1, 0), datetime(2019, 10, 27, 18, 0)),
(datetime(2019, 11, 25, 1, 0), datetime(2019, 11, 27, 18, 0)),
(datetime(2019, 12, 25, 1, 0), datetime(2019, 12, 27, 18, 0)),
])
| jeremiahyan/odoo | addons/calendar/tests/test_event_recurrence.py | Python | gpl-3.0 | 30,782 |
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.helpers.variable import cleanHost, mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
from urllib2 import URLError
import json
import traceback
log = CPLog(__name__)
class Sabnzbd(Downloader):
type = ['nzb']
def download(self, data = {}, movie = {}, filedata = None):
log.info('Sending "%s" to SABnzbd.', data.get('name'))
req_params = {
'cat': self.conf('category'),
'mode': 'addurl',
'nzbname': self.createNzbName(data, movie),
}
if filedata:
if len(filedata) < 50:
log.error('No proper nzb available: %s', (filedata))
return False
# If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
nzb_filename = self.createFileName(data, filedata, movie)
req_params['mode'] = 'addfile'
else:
req_params['name'] = data.get('url')
try:
if req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True)
else:
sab_data = self.call(req_params)
except URLError:
log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0))
return False
except:
log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0))
return False
log.debug('Result from SAB: %s', sab_data)
if sab_data.get('status') and not sab_data.get('error'):
log.info('NZB sent to SAB successfully.')
if filedata:
return self.downloadReturnId(sab_data.get('nzo_ids')[0])
else:
return True
else:
log.error('Error getting data from SABNZBd: %s', sab_data)
return False
def getAllDownloadStatus(self):
log.debug('Checking SABnzbd download status.')
# Go through Queue
try:
queue = self.call({
'mode': 'queue',
})
except:
log.error('Failed getting queue: %s', traceback.format_exc(1))
return False
# Go through history items
try:
history = self.call({
'mode': 'history',
'limit': 15,
})
except:
log.error('Failed getting history json: %s', traceback.format_exc(1))
return False
statuses = StatusList(self)
# Get busy releases
for item in queue.get('slots', []):
statuses.append({
'id': item['nzo_id'],
'name': item['filename'],
'original_status': item['status'],
'timeleft': item['timeleft'] if not queue['paused'] else -1,
})
# Get old releases
for item in history.get('slots', []):
status = 'busy'
if item['status'] == 'Failed' or (item['status'] == 'Completed' and item['fail_message'].strip()):
status = 'failed'
elif item['status'] == 'Completed':
status = 'completed'
statuses.append({
'id': item['nzo_id'],
'name': item['name'],
'status': status,
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': item['storage'],
})
return statuses
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
try:
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '1',
'value': item['id']
}, use_json = False)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
return True
def call(self, request_params, use_json = True, **kwargs):
url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(mergeDicts(request_params, {
'apikey': self.conf('api_key'),
'output': 'json'
}))
data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs)
if use_json:
d = json.loads(data)
if d.get('error'):
log.error('Error getting data from SABNZBd: %s', d.get('error'))
return {}
return d.get(request_params['mode']) or d
else:
return data
| coolbombom/CouchPotatoServer | couchpotato/core/downloaders/sabnzbd/main.py | Python | gpl-3.0 | 4,865 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_ssl_certificate
description:
- An SslCertificate resource, used for HTTPS load balancing. This resource provides
a mechanism to upload an SSL key and certificate to the load balancer to serve secure
connections from the user.
short_description: Creates a GCP SslCertificate
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
certificate:
description:
- The certificate in PEM format.
- The certificate chain must be no greater than 5 certs long.
- The chain must include at least one intermediate cert.
required: true
description:
description:
- An optional description of this resource.
required: false
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: false
private_key:
description:
- The write-only private key in PEM format.
required: true
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates)'
- 'Official Documentation: U(https://cloud.google.com/load-balancing/docs/ssl-certificates)'
'''
EXAMPLES = '''
- name: create a ssl certificate
gcp_compute_ssl_certificate:
name: "test_object"
description: A certificate for testing. Do not use this certificate in production
certificate: |
-----BEGIN CERTIFICATE-----
MIICqjCCAk+gAwIBAgIJAIuJ+0352Kq4MAoGCCqGSM49BAMCMIGwMQswCQYDVQQG
EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjERMA8GA1UEBwwIS2lya2xhbmQxFTAT
BgNVBAoMDEdvb2dsZSwgSW5jLjEeMBwGA1UECwwVR29vZ2xlIENsb3VkIFBsYXRm
b3JtMR8wHQYDVQQDDBZ3d3cubXktc2VjdXJlLXNpdGUuY29tMSEwHwYJKoZIhvcN
AQkBFhJuZWxzb25hQGdvb2dsZS5jb20wHhcNMTcwNjI4MDQ1NjI2WhcNMjcwNjI2
MDQ1NjI2WjCBsDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xETAP
BgNVBAcMCEtpcmtsYW5kMRUwEwYDVQQKDAxHb29nbGUsIEluYy4xHjAcBgNVBAsM
FUdvb2dsZSBDbG91ZCBQbGF0Zm9ybTEfMB0GA1UEAwwWd3d3Lm15LXNlY3VyZS1z
aXRlLmNvbTEhMB8GCSqGSIb3DQEJARYSbmVsc29uYUBnb29nbGUuY29tMFkwEwYH
KoZIzj0CAQYIKoZIzj0DAQcDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ
4mzkzTv0dXyB750fOGN02HtkpBOZzzvUARTR10JQoSe2/5PIwaNQME4wHQYDVR0O
BBYEFKIQC3A2SDpxcdfn0YLKineDNq/BMB8GA1UdIwQYMBaAFKIQC3A2SDpxcdfn
0YLKineDNq/BMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSQAwRgIhALs4vy+O
M3jcqgA4fSW/oKw6UJxp+M6a+nGMX+UJR3YgAiEAvvl39QRVAiv84hdoCuyON0lJ
zqGNhIPGq2ULqXKK8BY=
-----END CERTIFICATE-----
private_key: |
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIObtRo8tkUqoMjeHhsOh2ouPpXCgBcP+EDxZCB/tws15oAoGCCqGSM49
AwEHoUQDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ4mzkzTv0dXyB750f
OGN02HtkpBOZzzvUARTR10JQoSe2/5PIwQ==
-----END EC PRIVATE KEY-----
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
certificate:
description:
- The certificate in PEM format.
- The certificate chain must be no greater than 5 certs long.
- The chain must include at least one intermediate cert.
returned: success
type: str
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
privateKey:
description:
- The write-only private key in PEM format.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
certificate=dict(required=True, type='str'),
description=dict(type='str'),
name=dict(type='str'),
private_key=dict(required=True, type='str')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#sslCertificate'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
module.fail_json(msg="SslCertificate cannot be edited")
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#sslCertificate',
u'certificate': module.params.get('certificate'),
u'description': module.params.get('description'),
u'name': module.params.get('name'),
u'privateKey': module.params.get('private_key')
}
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/sslCertificates/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/sslCertificates".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'certificate': response.get(u'certificate'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'id': response.get(u'id'),
u'name': response.get(u'name'),
u'privateKey': module.params.get('private_key')
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#sslCertificate')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], 'message')
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation')
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
| veger/ansible | lib/ansible/modules/cloud/google/gcp_compute_ssl_certificate.py | Python | gpl-3.0 | 11,771 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2012 Camptocamp SA
# Copyright 2012 Endian Solutions BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import Model
from openerp.osv import fields
import decimal_precision as dp
class product_product(Model):
_inherit = 'product.product'
def _cost_price(self, cr, uid, ids, field_name, arg, context=None):
if context is None:
context = {}
product_uom = context.get('product_uom')
bom_properties = context.get('properties')
res = self._compute_purchase_price(cr, uid, ids, product_uom,
bom_properties, context=context)
for self_obj in self.browse(cr, uid, ids, context=context):
res[self_obj.id] = res[self_obj.id] + self_obj.fixed_cost_price
return res
_columns = {
'fixed_cost_price': fields.float(
'Fixed Cost Price', digits_compute = dp.get_precision('Sale Price')),
'cost_price': fields.function(_cost_price,
string='Cost Price (incl. BoM)',
digits_compute=dp.get_precision('Sale Price'),
help="The cost price is the standard price or, if the product has a BoM, "
"the sum of all standard prices of its components. It also takes care of the "
"BoM costing like cost per cylce.")
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| acsone/margin-analysis | product_cost_incl_costs_with_bom/product.py | Python | agpl-3.0 | 2,373 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A command that reads JSON data and lists it."""
import json
import sys
from googlecloudsdk.calliope import base
class ListFromJson(base.ListCommand):
"""Read JSON data and list it on the standard output.
*{command}* is a test harness for resource output formatting and filtering.
It behaves like any other `gcloud ... list` command except that the resources
are read from a JSON data file.
The input JSON data is either a single resource object or a list of resource
objects of the same type. The resources are printed on the standard output.
The default output format is *json*.
"""
@staticmethod
def Args(parser):
base.URI_FLAG.RemoveFromParser(parser)
parser.add_argument(
'json_file',
metavar='JSON-FILE',
nargs='?',
default=None,
help=('A file containing JSON data for a single resource or a list of'
' resources of the same type. If omitted then the standard input'
' is read.'))
@staticmethod
def GetUriCacheUpdateOp():
"""No resource URIs."""
return None
def Run(self, args):
if args.json_file:
with open(args.json_file, 'r') as f:
resources = json.load(f)
else:
resources = json.load(sys.stdin)
return resources
def Format(self, unused_args):
return 'json'
| KaranToor/MA450 | google-cloud-sdk/lib/surface/meta/list_from_json.py | Python | apache-2.0 | 1,923 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.python.randbytes}.
"""
import os
from twisted.trial import unittest
from twisted.python import randbytes
class SecureRandomTestCaseBase(object):
"""
Base class for secureRandom test cases.
"""
def _check(self, source):
"""
The given random bytes source should return the number of bytes
requested each time it is called and should probably not return the
same bytes on two consecutive calls (although this is a perfectly
legitimate occurrence and rejecting it may generate a spurious failure
-- maybe we'll get lucky and the heat death with come first).
"""
for nbytes in range(17, 25):
s = source(nbytes)
self.assertEqual(len(s), nbytes)
s2 = source(nbytes)
self.assertEqual(len(s2), nbytes)
# This is crude but hey
self.assertNotEquals(s2, s)
class SecureRandomTestCase(SecureRandomTestCaseBase, unittest.TestCase):
"""
Test secureRandom under normal conditions.
"""
def test_normal(self):
"""
L{randbytes.secureRandom} should return a string of the requested
length and make some effort to make its result otherwise unpredictable.
"""
self._check(randbytes.secureRandom)
class ConditionalSecureRandomTestCase(SecureRandomTestCaseBase,
unittest.TestCase):
"""
Test random sources one by one, then remove it to.
"""
def setUp(self):
"""
Create a L{randbytes.RandomFactory} to use in the tests.
"""
self.factory = randbytes.RandomFactory()
def errorFactory(self, nbytes):
"""
A factory raising an error when a source is not available.
"""
raise randbytes.SourceNotAvailable()
def test_osUrandom(self):
"""
L{RandomFactory._osUrandom} should work as a random source whenever
L{os.urandom} is available.
"""
self._check(self.factory._osUrandom)
def test_withoutAnything(self):
"""
Remove all secure sources and assert it raises a failure. Then try the
fallback parameter.
"""
self.factory._osUrandom = self.errorFactory
self.assertRaises(randbytes.SecureRandomNotAvailable,
self.factory.secureRandom, 18)
def wrapper():
return self.factory.secureRandom(18, fallback=True)
s = self.assertWarns(
RuntimeWarning,
"urandom unavailable - "
"proceeding with non-cryptographically secure random source",
__file__,
wrapper)
self.assertEqual(len(s), 18)
class RandomTestCaseBase(SecureRandomTestCaseBase, unittest.TestCase):
"""
'Normal' random test cases.
"""
def test_normal(self):
"""
Test basic case.
"""
self._check(randbytes.insecureRandom)
def test_withoutGetrandbits(self):
"""
Test C{insecureRandom} without C{random.getrandbits}.
"""
factory = randbytes.RandomFactory()
factory.getrandbits = None
self._check(factory.insecureRandom)
| nlloyd/SubliminalCollaborator | libs/twisted/test/test_randbytes.py | Python | apache-2.0 | 3,309 |
"""Home Assistant Switcher Component."""
from asyncio import QueueEmpty, TimeoutError as Asyncio_TimeoutError, wait_for
from datetime import datetime, timedelta
import logging
from typing import Dict, Optional
from aioswitcher.api import SwitcherV2Api
from aioswitcher.bridge import SwitcherV2Bridge
from aioswitcher.consts import COMMAND_ON
import voluptuous as vol
from homeassistant.auth.permissions.const import POLICY_EDIT
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback, split_entity_id
from homeassistant.exceptions import Unauthorized, UnknownUser
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import async_listen_platform, async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import (
ContextType,
DiscoveryInfoType,
EventType,
HomeAssistantType,
ServiceCallType,
)
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
DOMAIN = "switcher_kis"
CONF_AUTO_OFF = "auto_off"
CONF_TIMER_MINUTES = "timer_minutes"
CONF_DEVICE_ID = "device_id"
CONF_DEVICE_PASSWORD = "device_password"
CONF_PHONE_ID = "phone_id"
DATA_DEVICE = "device"
SIGNAL_SWITCHER_DEVICE_UPDATE = "switcher_device_update"
ATTR_AUTO_OFF_SET = "auto_off_set"
ATTR_ELECTRIC_CURRENT = "electric_current"
ATTR_REMAINING_TIME = "remaining_time"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PHONE_ID): cv.string,
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Required(CONF_DEVICE_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SET_AUTO_OFF_NAME = "set_auto_off"
SERVICE_SET_AUTO_OFF_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(CONF_AUTO_OFF): cv.time_period_str,
}
)
SERVICE_TURN_ON_WITH_TIMER_NAME = "turn_on_with_timer"
SERVICE_TURN_ON_WITH_TIMER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TIMER_MINUTES): vol.All(
cv.positive_int, vol.Range(min=1, max=90)
),
}
)
@bind_hass
async def _validate_edit_permission(
hass: HomeAssistantType, context: ContextType, entity_id: str
) -> None:
"""Use for validating user control permissions."""
splited = split_entity_id(entity_id)
if splited[0] != SWITCH_DOMAIN or not splited[1].startswith(DOMAIN):
raise Unauthorized(context=context, entity_id=entity_id, permission=POLICY_EDIT)
user = await hass.auth.async_get_user(context.user_id)
if user is None:
raise UnknownUser(context=context, entity_id=entity_id, permission=POLICY_EDIT)
if not user.permissions.check_entity(entity_id, POLICY_EDIT):
raise Unauthorized(context=context, entity_id=entity_id, permission=POLICY_EDIT)
async def async_setup(hass: HomeAssistantType, config: Dict) -> bool:
"""Set up the switcher component."""
phone_id = config[DOMAIN][CONF_PHONE_ID]
device_id = config[DOMAIN][CONF_DEVICE_ID]
device_password = config[DOMAIN][CONF_DEVICE_PASSWORD]
v2bridge = SwitcherV2Bridge(hass.loop, phone_id, device_id, device_password)
await v2bridge.start()
async def async_stop_bridge(event: EventType) -> None:
"""On Home Assistant stop, gracefully stop the bridge if running."""
await v2bridge.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_bridge)
try:
device_data = await wait_for(v2bridge.queue.get(), timeout=10.0)
except (Asyncio_TimeoutError, RuntimeError):
_LOGGER.exception("Failed to get response from device")
await v2bridge.stop()
return False
hass.data[DOMAIN] = {DATA_DEVICE: device_data}
async def async_switch_platform_discovered(
platform: str, discovery_info: DiscoveryInfoType
) -> None:
"""Use for registering services after switch platform is discovered."""
if platform != DOMAIN:
return
async def async_set_auto_off_service(service: ServiceCallType) -> None:
"""Use for handling setting device auto-off service calls."""
await _validate_edit_permission(
hass, service.context, service.data[ATTR_ENTITY_ID]
)
async with SwitcherV2Api(
hass.loop, device_data.ip_addr, phone_id, device_id, device_password
) as swapi:
await swapi.set_auto_shutdown(service.data[CONF_AUTO_OFF])
async def async_turn_on_with_timer_service(service: ServiceCallType) -> None:
"""Use for handling turning device on with a timer service calls."""
await _validate_edit_permission(
hass, service.context, service.data[ATTR_ENTITY_ID]
)
async with SwitcherV2Api(
hass.loop, device_data.ip_addr, phone_id, device_id, device_password
) as swapi:
await swapi.control_device(COMMAND_ON, service.data[CONF_TIMER_MINUTES])
hass.services.async_register(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
async_set_auto_off_service,
schema=SERVICE_SET_AUTO_OFF_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_TURN_ON_WITH_TIMER_NAME,
async_turn_on_with_timer_service,
schema=SERVICE_TURN_ON_WITH_TIMER_SCHEMA,
)
async_listen_platform(hass, SWITCH_DOMAIN, async_switch_platform_discovered)
hass.async_create_task(async_load_platform(hass, SWITCH_DOMAIN, DOMAIN, {}, config))
@callback
def device_updates(timestamp: Optional[datetime]) -> None:
"""Use for updating the device data from the queue."""
if v2bridge.running:
try:
device_new_data = v2bridge.queue.get_nowait()
if device_new_data:
async_dispatcher_send(
hass, SIGNAL_SWITCHER_DEVICE_UPDATE, device_new_data
)
except QueueEmpty:
pass
async_track_time_interval(hass, device_updates, timedelta(seconds=4))
return True
| tboyce021/home-assistant | homeassistant/components/switcher_kis/__init__.py | Python | apache-2.0 | 6,458 |
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
import logging
import copy
from urlparse import urlparse
from org.o3project.odenos.core.util.request_parser import RequestParser
from org.o3project.odenos.remoteobject.message.request import Request
from org.o3project.odenos.remoteobject.message.response import Response
from org.o3project.odenos.remoteobject.object_property import ObjectProperty
from org.o3project.odenos.remoteobject.remote_object_manager import RemoteObjectManager
from org.o3project.odenos.remoteobject.manager.component.component_type\
import ComponentType
class ComponentManager(RemoteObjectManager):
DESCRIPTION = "python's ComponentManager"
COMPONENT_TYPES = "component_types"
def __init__(self, object_id, dispatcher):
RemoteObjectManager.__init__(self, object_id, dispatcher)
self._object_property.set_property(ComponentManager.COMPONENT_TYPES, "")
def register_components(self, components):
self.register_remote_objects(components)
types = ",".join(self.remote_object_classes.keys())
self._object_property.set_property(ComponentManager.COMPONENT_TYPES,
types)
def _add_rules(self):
rules = []
rules.append({RequestParser.PATTERN: r"^component_types/?$",
RequestParser.METHOD: Request.Method.GET,
RequestParser.FUNC: self._do_get_component_types,
RequestParser.PARAMS: 0})
rules.append({RequestParser.PATTERN: r"^components/?$",
RequestParser.METHOD: Request.Method.GET,
RequestParser.FUNC: self._do_get_remote_objects,
RequestParser.PARAMS: 0})
rules.append({RequestParser.PATTERN: r"^components/"
+ "([a-zA-Z0-9_-]+)/?$",
RequestParser.METHOD: Request.Method.PUT,
RequestParser.FUNC: self._do_put_remote_object,
RequestParser.PARAMS: 2})
rules.append({RequestParser.PATTERN: r"^components/"
+ "([a-zA-Z0-9_-]+)/?$",
RequestParser.METHOD: Request.Method.GET,
RequestParser.FUNC: self._do_get_remote_object,
RequestParser.PARAMS: 1})
rules.append({RequestParser.PATTERN: r"^components/"
+ "([a-zA-Z0-9_-]+)/?$",
RequestParser.METHOD: Request.Method.DELETE,
RequestParser.FUNC: self._do_delete_remote_object,
RequestParser.PARAMS: 1})
self._parser.add_rule(rules)
def _do_get_component_types(self):
comp_types = {}
tmp = None
try:
for type_name, clazz in self.remote_object_classes.items():
comp_id = "%s_%s" % (self.object_id, type_name)
component = clazz(comp_id, None)
obj_prop = component.object_property
component = None
type = obj_prop.get_property(ObjectProperty.OBJECT_TYPE)
super_type = obj_prop.get_property(ObjectProperty.OBJECT_SUPER_TYPE)
connection_types = {}
connection_types_str = obj_prop.get_property(
ObjectProperty.CONNECTION_TYPES)
conn_type_list = connection_types_str.split(",")
for type_elem in conn_type_list:
type_elem_list = type_elem.split(":")
if len(type_elem_list) == 2:
connection_types[type_elem_list[0]] = type_elem_list[1]
description = obj_prop.get_property(ObjectProperty.DESCRIPTION)
target = ComponentType(type, super_type,
connection_types, description)
comp_types[type_name] = target.packed_object()
except Exception, e:
return Response(Response.StatusCode.INTERNAL_SERVER_ERROR,
str(e))
return Response(Response.StatusCode.OK, comp_types)
| nis-sdn/odenos | src/main/python/org/o3project/odenos/core/manager/component_manager.py | Python | apache-2.0 | 5,084 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateless random ops."""
import functools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_stateless_random_ops_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops as stateless
from tensorflow.python.platform import test
# Note that in theory each test will reset the eager context and may choose to
# hide some devices, so we shouldn't cache this transient info. Tests in this
# file don't make those config changes, so caching is fine. It provides a good
# speed-up.
_cached_device = None
def get_device():
global _cached_device
if _cached_device is not None:
return _cached_device
# Precedence from high to low
for device_type in ('XLA_GPU', 'GPU', 'XLA_CPU', 'CPU'):
devices = config.list_logical_devices(device_type)
if devices:
_cached_device = devices[0]
return _cached_device
raise ValueError('Cannot find any suitable device. Available devices: %s' %
config.list_logical_devices())
BEFORE_EXPIRE = (2020, 10, 24)
AFTER_EXPIRE = (2020, 10, 26)
def invert_philox(key, value):
"""Invert the Philox bijection."""
key = np.array(key, dtype=np.uint32)
value = np.array(value, dtype=np.uint32)
step = np.array([0x9E3779B9, 0xBB67AE85], dtype=np.uint32)
for n in range(10)[::-1]:
key0, key1 = key + n * step
v0 = value[3] * 0x991a7cdb & 0xffffffff
v2 = value[1] * 0x6d7cae67 & 0xffffffff
hi0 = v0 * 0xD2511F53 >> 32
hi1 = v2 * 0xCD9E8D57 >> 32
v1 = hi1 ^ value[0] ^ key0
v3 = hi0 ^ value[2] ^ key1
value = v0, v1, v2, v3
return np.array(value)
SEEDS = ((7, 17), (11, 5), (2, 3))
SEED_TYPES = [dtypes.int32, dtypes.int64]
def float_cases(shape_dtypes=(None,)):
cases = (
# Uniform distribution, with and without range
('uniform', stateless.stateless_random_uniform, random_ops.random_uniform,
{}),
('uniform2', stateless.stateless_random_uniform,
random_ops.random_uniform, dict(minval=2.2, maxval=7.1)),
# Normal distribution, with and without mean+stddev
('normal', stateless.stateless_random_normal, random_ops.random_normal,
{}),
('normal2', stateless.stateless_random_normal, random_ops.random_normal,
dict(mean=2, stddev=3)),
# Truncated normal distribution, with and without mean+stddev
('trnorm', stateless.stateless_truncated_normal,
random_ops.truncated_normal, {}),
('trnorm2', stateless.stateless_truncated_normal,
random_ops.truncated_normal, dict(mean=3, stddev=4)),
)
# Explicitly passing in params because capturing cell variable from loop is
# problematic in Python
def wrap(op, dtype, shape, shape_dtype, seed, **kwargs):
device_type = get_device().device_type
# Some dtypes are not supported on some devices
if (dtype == dtypes.float16 and device_type in ('XLA_GPU', 'XLA_CPU') or
dtype == dtypes.bfloat16 and device_type == 'GPU'):
dtype = dtypes.float32
shape_ = (constant_op.constant(shape, dtype=shape_dtype)
if shape_dtype is not None else shape)
return op(seed=seed, shape=shape_, dtype=dtype, **kwargs)
def _name(a):
if hasattr(a, 'name'):
return a.name
else:
return a
for dtype in dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64:
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
for name, stateless_op, stateful_op, kwargs in cases:
yield (('%s_%s_%s_%s' %
(name, _name(dtype), shape, _name(shape_dtype))).replace(
' ', ''),
functools.partial(wrap, stateless_op, dtype, shape,
shape_dtype, **kwargs),
functools.partial(wrap, stateful_op, dtype, shape, shape_dtype,
**kwargs))
def int_cases(shape_dtypes=(None,), minval_maxval=None):
def wrap(op, minval, maxval, shape, shape_dtype, dtype, seed, **kwargs):
shape_ = (constant_op.constant(shape, dtype=shape_dtype)
if shape_dtype is not None else shape)
return op(
seed=seed, shape=shape_, minval=minval, maxval=maxval, dtype=dtype,
**kwargs)
if minval_maxval is None:
minval_maxval = ((2, 11111),)
for minval, maxval in minval_maxval:
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
for dtype in dtypes.int32, dtypes.int64:
yield ('uniform_%s_%s' % (minval, maxval),
functools.partial(wrap, stateless.stateless_random_uniform,
minval, maxval, shape, shape_dtype, dtype),
functools.partial(wrap, random_ops.random_uniform, minval,
maxval, shape, shape_dtype, dtype))
def multinomial_cases():
num_samples = 10
def wrap(op, logits, logits_dtype, output_dtype, seed):
return op(seed=seed,
logits=constant_op.constant(logits, dtype=logits_dtype),
num_samples=num_samples, output_dtype=output_dtype)
for logits_dtype in np.float16, np.float32, np.float64:
for output_dtype in dtypes.int32, dtypes.int64:
for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2],
[0.25, 0.75]]):
yield ('multinomial',
functools.partial(wrap, stateless.stateless_multinomial, logits,
logits_dtype, output_dtype),
functools.partial(wrap, random_ops.multinomial, logits,
logits_dtype, output_dtype))
def gamma_cases():
def wrap(op, alpha, dtype, shape, seed):
return op(seed=seed, shape=shape,
alpha=constant_op.constant(alpha, dtype=dtype), dtype=dtype)
for dtype in np.float16, np.float32, np.float64:
for alpha in ([[.5, 1., 2.]], [[0.5, 0.5], [0.8, 0.2], [0.25, 0.75]]):
yield ('gamma',
functools.partial(wrap, stateless.stateless_random_gamma, alpha,
dtype, (10,) + tuple(np.shape(alpha))),
functools.partial(wrap, random_ops.random_gamma, alpha, dtype,
(10,)))
def poisson_cases():
def wrap(op, lam, lam_dtype, out_dtype, shape, seed):
return op(seed=seed, shape=shape,
lam=constant_op.constant(lam_dtype(lam), dtype=lam_dtype),
dtype=out_dtype)
for lam_dtype in np.float16, np.float32, np.float64, np.int32, np.int64:
for out_dtype in np.float16, np.float32, np.float64, np.int32, np.int64:
for lam in ([[5.5, 1., 2.]], [[7.5, 10.5], [3.8, 8.2], [1.25, 9.75]]):
yield ('poisson',
functools.partial(wrap, stateless.stateless_random_poisson, lam,
lam_dtype, out_dtype,
(10,) + tuple(np.shape(lam))),
functools.partial(wrap, random_ops.random_poisson, lam,
lam_dtype, out_dtype, (10,)))
@test_util.with_eager_op_as_function
class StatelessOpsTest(test.TestCase, parameterized.TestCase):
def _test_match(self, case, seed):
# Stateless ops should be the same as stateful ops on the first call
# after seed scrambling.
key = 0x3ec8f720, 0x02461e29
preseed = invert_philox(key, (seed[0], 0, seed[1], 0)).astype(np.uint64)
preseed = preseed[::2] | preseed[1::2] << 32
with ops.device(get_device().name):
_, stateless_op, stateful_op = case
random_seed.set_random_seed(seed[0])
stateful = stateful_op(seed=seed[1])
pure = stateless_op(seed=preseed)
self.assertAllEqual(stateful, pure)
def _test_match_stateless_cpu_gpu(self, case, seed):
# Stateless ops should produce the same result on CPUs and GPUs.
_, stateless_op, _ = case
with ops.device('CPU'):
result_cpu = stateless_op(seed=seed)
with ops.device(get_device().name):
result_gpu = stateless_op(seed=seed)
self.assertAllClose(result_cpu, result_gpu)
def _test_old_and_new_stateless_match(self, case, seed):
"""Tests that the new stateless ops match the old stateless ones."""
with ops.device(get_device().name):
_, stateless_op, _ = case
with compat.forward_compatibility_horizon(*BEFORE_EXPIRE):
old = stateless_op(seed=seed)
with compat.forward_compatibility_horizon(*AFTER_EXPIRE):
new = stateless_op(seed=seed)
self.assertAllClose(old, new)
def _test_explicit_alg(self, case, seed):
"""Tests that alg=philox and alg=None are the same (on CPU/GPU)."""
with ops.device(get_device().name):
_, stateless_op, _ = case
implicit_alg = stateless_op(seed=seed)
# All device types allowed in this test will result in Philox
explicit_alg = stateless_op(seed=seed, alg='philox')
self.assertAllClose(implicit_alg, explicit_alg)
def _test_determinism(self, case, seed_type):
# Stateless values should be equal iff the seeds are equal (roughly)
seeds = [(x, y) for x in range(5) for y in range(5)] * 3 # pylint: disable=g-complex-comprehension
with self.test_session(), ops.device(get_device().name):
_, stateless_op, _ = case
if context.executing_eagerly():
values = [
(seed, stateless_op(seed=constant_op.constant(seed, seed_type)))
for seed in seeds]
else:
# Have this branch because the above branch is too slow in graph
# mode
seed_t = array_ops.placeholder(seed_type, shape=[2])
pure = stateless_op(seed=seed_t)
values = [
(seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds
]
for s0, v0 in values:
for s1, v1 in values:
if dtypes.as_dtype(v0.dtype) != dtypes.bfloat16:
self.assertEqual(s0 == s1, np.all(v0 == v1))
elif s0 == s1:
# Skip the s0 != s1 case because v0 and v1 can be either equal or
# unequal in that case due to bfloat16's low precision
self.assertAllEqual(v0, v1)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(float_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchFloat(self, case, seed):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Skip on XLA because XLA kernels do not support int64 '
'seeds needed by this test.')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(int_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchInt(self, case, seed):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Skip on XLA because XLA kernels do not support int64 '
'seeds needed by this test.')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(multinomial_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchMultinomial(self, case, seed):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(gamma_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchGamma(self, case, seed):
if get_device().device_type == 'GPU':
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking GPU kernel')
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(gamma_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testStatelessGammaCpuGpuMatch(self, case, seed):
if get_device().device_type != 'GPU':
# This test compares the numbers produced by the CPU and GPU kernel for
# stateless_random_gamma.
self.skipTest('This test requires GPU')
self._test_match_stateless_cpu_gpu(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(poisson_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchPoisson(self, case, seed):
if get_device().device_type == 'GPU':
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking GPU kernel')
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(float_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testOldAndNewStatelessMatchFloat(self, case, seed):
self._test_old_and_new_stateless_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(
int_cases(minval_maxval=((2, 11111), (None, None)))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testOldAndNewStatelessMatchInt(self, case, seed):
self._test_old_and_new_stateless_match(case, seed)
@parameterized.named_parameters(
('_%s_%s' % (case[0], case_id), case)
for case_id, case in enumerate(float_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testExplicitAlgFloat(self, case):
seed = (7, 17)
self._test_explicit_alg(case, seed)
@parameterized.named_parameters(
('_%s_%s' % (case[0], case_id), case)
for case_id, case in enumerate(
int_cases(minval_maxval=((2, 11111), (None, None)))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testExplicitAlgInt(self, case):
seed = (7, 17)
self._test_explicit_alg(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(
float_cases(shape_dtypes=(dtypes.int32, dtypes.int64))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismFloat(self, case, seed_type):
if seed_type == dtypes.int64 and get_device().device_type in ('XLA_GPU',
'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest(
'Skip on XLA because XLA kernels do not support int64 seeds.')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(
int_cases(shape_dtypes=(dtypes.int32, dtypes.int64))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismInt(self, case, seed_type):
if seed_type == dtypes.int64 and get_device().device_type in ('XLA_GPU',
'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest(
'Skip on XLA because XLA kernels do not support int64 seeds.')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(multinomial_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismMultinomial(self, case, seed_type):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(gamma_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismGamma(self, case, seed_type):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(poisson_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismPoisson(self, case, seed_type):
if get_device().device_type == 'GPU':
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking GPU kernel')
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_determinism(case, seed_type)
@test_util.run_v2_only
def testGetKeyCounterAlg(self):
seed = [1, 2]
key, counter = gen_stateless_random_ops_v2.stateless_random_get_key_counter(
seed)
self.assertAllEqual(key.shape, [1])
self.assertAllEqual(counter.shape, [2])
alg = gen_stateless_random_ops_v2.stateless_random_get_alg()
self.assertAllEqual(alg.shape, [])
def assertDTypeEqual(self, a, b):
self.assertEqual(dtypes.as_dtype(a), dtypes.as_dtype(b))
def assertNoEqualPair(self, ls):
for i in range(len(ls)):
for j in range(i + 1, len(ls)):
self.assertFalse(math_ops.reduce_all(ls[i] == ls[j]))
@parameterized.parameters(['int32', 'int64'])
@test_util.run_v2_only
def testSplit(self, dtype):
"""Test for `split`."""
seed = constant_op.constant([1, 2], dtype=dtype)
new_seed = stateless.split(seed, 3)
self.assertEqual(new_seed.shape, [3, 2])
self.assertDTypeEqual(new_seed.dtype, dtype)
self.assertNoEqualPair([seed] + array_ops.unstack(new_seed))
@parameterized.parameters(['int32', 'int64'])
@test_util.run_v2_only
def testFoldIn(self, dtype):
"""Test for `fold_in`."""
orig_seed = constant_op.constant([1, 2], dtype='int32')
seed = stateless.fold_in(orig_seed, constant_op.constant(3, dtype=dtype))
new_seeds = []
new_seeds.append(seed)
seed = stateless.fold_in(seed, constant_op.constant(4, dtype=dtype))
new_seeds.append(seed)
for s in new_seeds:
self.assertEqual(s.shape, [2])
self.assertDTypeEqual(s.dtype, dtype)
self.assertNoEqualPair([math_ops.cast(orig_seed, dtype)] + new_seeds)
@test_util.run_v2_only
def testErrors(self):
"""Tests that proper errors are raised.
"""
shape = [2, 3]
with self.assertRaisesWithPredicateMatch(
ValueError,
'minval must be a scalar; got a tensor of shape '):
@def_function.function
def f():
stateless.stateless_random_uniform(
shape=shape, seed=[1, 2], minval=array_ops.zeros(shape, 'int32'),
maxval=100, dtype='int32')
f()
with self.assertRaisesWithPredicateMatch(
ValueError,
'maxval must be a scalar; got a tensor of shape '):
@def_function.function
def f2():
stateless.stateless_random_uniform(
shape=shape, seed=[1, 2], minval=0,
maxval=array_ops.ones(shape, 'int32') * 100,
dtype='int32')
f2()
if __name__ == '__main__':
config.set_soft_device_placement(False)
context.context().enable_xla_devices()
test.main()
| tensorflow/tensorflow | tensorflow/python/kernel_tests/random/stateless_random_ops_test.py | Python | apache-2.0 | 23,279 |
# This file is part of Peach-Py package and is licensed under the Simplified BSD license.
# See license.rst for the full text of the license.
from peachpy.x86_64 import *
from peachpy import *
matrix = Argument(ptr(float_))
with Function("transpose4x4_opt", (matrix,)):
reg_matrix = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_matrix, matrix)
xmm_rows = [XMMRegister() for _ in range(4)]
for i, xmm_row in enumerate(xmm_rows):
MOVUPS(xmm_row, [reg_matrix + i * XMMRegister.size])
xmm_temps = [XMMRegister() for _ in range(2)]
# xmm_temps[0] = ( m00, m01, m02, m03 )
MOVAPS(xmm_temps[0], xmm_rows[0])
# xmm_temps[1] = ( m20, m21, m22, m23 )
MOVAPS(xmm_temps[1], xmm_rows[2])
# xmm_rows[0] = ( m00, m10, m01, m11 )
UNPCKLPS(xmm_rows[0], xmm_rows[1])
# xmm_rows[2] = ( m20, m30, m21, m31 )
UNPCKLPS(xmm_rows[2], xmm_rows[3])
# xmm_rows[1] = ( m02, m12, m03, m13 )
UNPCKHPS(xmm_temps[0], xmm_rows[1])
xmm_rows[1] = xmm_temps[0]
# xmm_rows[3] = ( m22, m32, m23, m33 )
UNPCKHPS(xmm_temps[1], xmm_rows[3])
xmm_rows[3] = xmm_temps[1]
xmm_temps = [XMMRegister() for _ in range(2)]
# xmm_temps[0] = ( m00, m10, m01, m11 )
MOVAPS(xmm_temps[0], xmm_rows[0])
# xmm_temps[1] = ( m02, m12, m03, m13 )
MOVAPS(xmm_temps[1], xmm_rows[1])
# xmm_rows[0] = ( m00, m10, m20, m30 )
MOVLHPS(xmm_rows[0], xmm_rows[2])
MOVUPS([reg_matrix], xmm_rows[0])
# xmm_rows[2] = ( m01, m11, m21, m31 )
MOVHLPS(xmm_rows[2], xmm_temps[0])
MOVUPS([reg_matrix + 16], xmm_rows[2])
# xmm_rows[1] = ( m02, m12, m22, m32 )
MOVLHPS(xmm_rows[1], xmm_rows[3])
MOVUPS([reg_matrix + 32], xmm_rows[1])
# xmm_rows[3] = ( m03, m13, m23, m33 )
MOVHLPS(xmm_rows[3], xmm_temps[1])
MOVUPS([reg_matrix + 48], xmm_rows[3])
RETURN()
| silky/PeachPy | examples/nmake/transpose4x4-opt.py | Python | bsd-2-clause | 1,848 |
# Copyright (c) 2015, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import unittest
import numpy as np
import GPy
class MFtests(unittest.TestCase):
def test_simple_mean_function(self):
"""
The simplest possible mean function. No parameters, just a simple Sinusoid.
"""
#create simple mean function
mf = GPy.core.Mapping(1,1)
mf.f = np.sin
mf.update_gradients = lambda a,b: None
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
self.assertTrue(m.checkgrad())
def test_parametric_mean_function(self):
"""
A linear mean function with parameters that we'll learn alongside the kernel
"""
X = np.linspace(-1,10,50).reshape(-1,1)
Y = 3-np.abs((X-6))
Y += .5*np.cos(3*X) + 0.3*np.random.randn(*X.shape)
mf = GPy.mappings.PiecewiseLinear(1, 1, [-1,1], [9,2])
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
self.assertTrue(m.checkgrad())
def test_parametric_mean_function_composition(self):
"""
A linear mean function with parameters that we'll learn alongside the kernel
"""
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + 3*X
mf = GPy.mappings.Compound(GPy.mappings.Linear(1,1),
GPy.mappings.Kernel(1, 1, np.random.normal(0,1,(1,1)),
GPy.kern.RBF(1))
)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
self.assertTrue(m.checkgrad())
def test_parametric_mean_function_additive(self):
"""
A linear mean function with parameters that we'll learn alongside the kernel
"""
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + 3*X
mf = GPy.mappings.Additive(GPy.mappings.Constant(1,1,3),
GPy.mappings.Additive(GPy.mappings.MLP(1,1),
GPy.mappings.Identity(1,1)
)
)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
self.assertTrue(m.checkgrad())
def test_svgp_mean_function(self):
# an instance of the SVIGOP with a men function
X = np.linspace(0,10,500).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape)
Y = np.where(Y>0, 1,0) # make aclassificatino problem
mf = GPy.mappings.Linear(1,1)
Z = np.linspace(0,10,50).reshape(-1,1)
lik = GPy.likelihoods.Bernoulli()
k =GPy.kern.RBF(1) + GPy.kern.White(1, 1e-4)
m = GPy.core.SVGP(X, Y,Z=Z, kernel=k, likelihood=lik, mean_function=mf)
self.assertTrue(m.checkgrad())
| befelix/GPy | GPy/testing/meanfunc_tests.py | Python | bsd-3-clause | 3,340 |
# coding: utf-8
from livereload import Server, shell
server = Server()
server.watch('docs/*.rst', shell('make html'))
server.serve(root='docs/_build/html', open_url=True)
| mgedmin/python-livereload | server.py | Python | bsd-3-clause | 173 |
# Copyright (c) 2003-2010, Berend-Jan "SkyLined" Wever <berendjanwever@gmail.com>
# Project homepage: http://code.google.com/p/alpha3/
# All rights reserved. See COPYRIGHT.txt for details.
import charsets, encode, io
import x86, x64, test
import os, re, sys
#_______________________________________________________________________________________________________________________
#
# ,sSSs,,s, ,sSSSs, : ALPHA3 - Alphanumeric shellcode encoder.
# dS" Y$P" YS" ,SY : Version 1.0 alpha
# iS' dY ssS" : Copyright (C) 2003-2009 by SkyLined.
# YS, dSb SP, ;SP : <berendjanwever@gmail.com>
# `"YSS'"S' "YSSSY" : http://skypher.com/wiki/index.php/ALPHA3
#_______________________________________________________________________________________________________________________
#
_settings = {
"architecture": None,
"character encoding": None,
"case": None
}
_default_settings = {
"architecture": "x86",
"character encoding": "ascii",
"case": "mixedcase"
}
_valid_settings = {
"case": charsets.valid_character_casings,
"character encoding": charsets.valid_character_encodings,
"architecture": ["x86", "x64"]
}
_arguments = {
"base address": None
}
_switches = {
"input": None,
"output": None
}
_flags = {
"verbose": 0,
"help": 0,
"test": 0,
"int3": 0
}
encoders = [];
import print_functions;
from print_functions import *
def ParseCommandLine():
global _settings, _arguments, _switches, _flags;
# Parse settings, arguments, switches and flags from the command line:
if len(sys.argv) == 1:
_flags["help"] = 1;
else:
for i in range(1, len(sys.argv)):
arg = sys.argv[i];
if arg[:2] == "--":
end_switch_name = arg.find("=");
if end_switch_name != -1:
switch_name = arg[2:end_switch_name];
switch_value = arg[end_switch_name + 1:];
for valid_switch_name in _switches:
if switch_name == valid_switch_name:
_switches[switch_name] = switch_value;
break;
else:
print >>sys.stderr, "Unknown switch '%s'!" % arg[2:];
return False;
else:
flag_name = arg[2:]
for valid_flag_name in _flags:
if flag_name == valid_flag_name:
_flags[flag_name] += 1;
break
else:
print >>sys.stderr, "Unknown flag '%s'!" % valid_flag_name;
return False;
else:
for setting_name in _valid_settings:
if arg in _valid_settings[setting_name]:
_settings[setting_name] = arg;
break;
else:
for argument_name in _arguments:
if _arguments[argument_name] == None:
_arguments[argument_name] = arg;
break;
else:
print >>sys.stderr, "Unknown _arguments: %s." % repr(arg);
return False;
return True;
def PrintLogo():
PrintInfo([
(None, "____________________________________________________________________________"),
(None, """ ,sSSs,,s, ,sSSSs, ALPHA3 - Alphanumeric shellcode encoder."""),
(None, """ dS" Y$P" YS" ,SY Version 1.0 alpha"""),
(None, """ iS' dY ssS" Copyright (C) 2003-2009 by SkyLined."""),
(None, """ YS, dSb SP, ;SP <berendjanwever@gmail.com>"""),
(None, """ `"YSS'"S' "YSSSY" http://skypher.com/wiki/index.php/ALPHA3"""),
(None, "____________________________________________________________________________"),
]);
def PrintHelp():
PrintInfo([
(None, "[Usage]"),
(" ", "ALPHA3.py [ encoder settings | I/O settings | flags ]"),
(None, ""),
(None, "[Encoder setting]"),
(" architecture ", "Which processor architecture to target (x86, x64)."),
(" character encoding ", "Which character encoding to use (ascii, cp437, latin-1, utf-16)."),
(" casing ", "Which character casing to use (uppercase, mixedcase, lowercase)."),
(" base address ", "How to determine the base address in the decoder code (each encoder has its own set of "
"valid values)."),
(None, ""),
(None, "[I/O Setting]"),
(" --input=\"file\"", "Path to a file that contains the shellcode to be encoded (Optional, default is to read "
"input from stdin)."),
(" --output=\"file\"", "Path to a file that will receive the encoded shellcode (Optional, default is to write "
"output to stdout)."),
(None, ""),
(None, "[Flags]"),
(" --verbose", "Display verbose information while executing. Use this flag twice to output progress "
"during encoding."),
(" --help", "Display this message and quit."),
(" --test", "Run all available tests for all encoders. (Useful while developing/testing new "
"encoders)."),
(" --int3", "Trigger a breakpoint before executing the result of a test. (Use in combination with "
"--test)."),
(None, ""),
(None, "[Notes]"),
(" ", "You can provide encoder settings in combination with the --help and --test switches to filter which "
"encoders you get help information for and which get tested, respectively.")
]);
def Main():
# Print header
if _flags["help"]:
# Print the main help body before displaying encoder specific help:
PrintLogo();
PrintWrappedLine();
PrintHelp();
PrintWrappedLine();
encoding = False;
elif not _flags["test"]:
if _flags["verbose"]:
PrintLogo();
encoding = True;
else:
if _flags["verbose"]:
PrintLogo();
PrintWrappedLine();
# We're testing our encoders
encoding = False;
# Print the _settings provided by the user and if we're encoding shellcode, set and print the default _settings
# for anything not provided:
if _flags["verbose"]:
for name in _settings:
if _settings[name] is not None:
PrintInfo([(name, _settings[name])]);
elif encoding:
_settings[name] = _default_settings[name];
PrintInfo([(name, _settings[name] + " (default)")]);
for name in _arguments:
if _arguments[name] is not None:
PrintInfo([(name, _arguments[name])]);
# If the user wants to encode shellcode, it needs to be read from stdin or a file:
if encoding:
if _switches["input"] is not None:
shellcode = io.ReadFile(_switches["input"]);
else:
shellcode = sys.stdin.read();
# Scan all encoders to see which match the given _settings/_arguments and take action:
results = [];
errors = False;
help_results = {};
at_least_one_encoder_found = False;
for encoder_settings in encoders:
for name in _settings:
if not name in encoder_settings:
raise AssertionError("One of the encoders is missing the '%s' setting: %s" % (name, encoder_settings["name"]));
if _settings[name] != None and _settings[name] != encoder_settings[name]:
# This _settings is specified but does not match this encoders _settings: skip the encoder.
break;
else: # All _settings match
# Check "base address" argument:
if (_arguments["base address"] is None or
re.match(encoder_settings["base address"], _arguments["base address"], re.IGNORECASE)):
at_least_one_encoder_found = True;
if _flags["test"]:
problems = test.TestEncoder(encoder_settings, _arguments["base address"], _flags["int3"] > 0);
if problems is not None: # None => No test was found for the given base address
at_least_one_encoder_found = True;
results.extend(problems);
errors = True;
elif _flags["help"]:
encoder_settings_string = "%s %s %s" % (encoder_settings["architecture"],
encoder_settings["character encoding"], encoder_settings["case"]);
if encoder_settings_string not in help_results:
help_results[encoder_settings_string] = [];
help_results[encoder_settings_string].append((
encoder_settings["name"], " ".join(encoder_settings["base address samples"])));
else:
encoder_function = encoder_settings["function"];
if "function args" in encoder_settings:
encoder_function_args = encoder_settings["function args"];
else:
encoder_function_args = {};
if _switches["output"] is not None:
io.WriteFile(_settings["output file"], result);
else:
encoded_shellcode = encoder_function(_arguments["base address"], shellcode, *encoder_function_args);
results += test.CheckEncodedShellcode(encoded_shellcode, encoder_settings);
sys.stdout.write(encoded_shellcode);
if _flags["help"]:
if not help_results:
PrintWrappedLine("No encoder found that can encode using the given settings and arguments.");
errors = True;
else:
PrintWrappedLine("Valid base address examples for each encoder, ordered by encoder settings, are:");
help_results_encoder_settings = help_results.keys();
help_results_encoder_settings.sort();
for encoder_settings_string in help_results_encoder_settings:
PrintWrappedLine("");
PrintWrappedLine("[%s]" % encoder_settings_string);
for encoder_name, valid_base_address_samples in help_results[encoder_settings_string]:
PrintInfo([(' ' + encoder_name, valid_base_address_samples)]);
else:
if not at_least_one_encoder_found:
results.append("No encoder exists for the given settings.");
errors = True;
if results:
PrintWrappedLine("");
PrintWrappedLine("The following problems were found:");
for result in results:
PrintWrappedLine(result);
return not errors;
def toInt(s):
if s[:2] == "0x":
return int(s[2:], 16);
return int(s);
if __name__ == "__main__":
encoders.extend(x86.encoders);
encoders.extend(x64.encoders);
success = ParseCommandLine();
if success:
print_functions.g_output_verbosity_level = _flags["verbose"];
success = Main();
exit_code = {True:0, False:1}[success];
exit(exit_code);
| zachriggle/alpha3 | ALPHA3.py | Python | bsd-3-clause | 11,046 |
from contextlib import contextmanager
import json
import mock
# Mock tastypi API.
class ProjectData(object):
def get(self):
return dict()
def mock_version(repo):
class MockVersion(object):
def __init__(self, x=None):
pass
def put(self, x=None):
return x
def get(self, **kwargs):
# SCIENTIST DOG
version = json.loads("""
{
"active": false,
"built": false,
"id": "12095",
"identifier": "remotes/origin/zip_importing",
"resource_uri": "/api/v1/version/12095/",
"slug": "zip_importing",
"uploaded": false,
"verbose_name": "zip_importing"
}""")
project = json.loads("""
{
"absolute_url": "/projects/docs/",
"analytics_code": "",
"copyright": "",
"default_branch": "",
"default_version": "latest",
"description": "Make docs.readthedocs.org work :D",
"django_packages_url": "",
"documentation_type": "sphinx",
"id": "2599",
"modified_date": "2012-03-12T19:59:09.130773",
"name": "docs",
"project_url": "",
"pub_date": "2012-02-19T18:10:56.582780",
"repo": "git://github.com/rtfd/readthedocs.org",
"repo_type": "git",
"requirements_file": "",
"resource_uri": "/api/v1/project/2599/",
"slug": "docs",
"subdomain": "http://docs.readthedocs.org/",
"suffix": ".rst",
"theme": "default",
"use_virtualenv": false,
"users": [
"/api/v1/user/1/"
],
"version": ""
}""")
version['project'] = project
project['repo'] = repo
if 'slug' in kwargs:
return {'objects': [version], 'project': project}
else:
return version
return MockVersion
class MockApi(object):
def __init__(self, repo):
self.version = mock_version(repo)
def project(self, x):
return ProjectData()
@contextmanager
def mock_api(repo):
api_mock = MockApi(repo)
with (
mock.patch('readthedocs.restapi.client.api', api_mock) and
mock.patch('readthedocs.api.client.api', api_mock) and
mock.patch('readthedocs.projects.tasks.api_v2', api_mock) and
mock.patch('readthedocs.projects.tasks.api_v1', api_mock)):
yield api_mock
| GovReady/readthedocs.org | readthedocs/rtd_tests/mocks/mock_api.py | Python | mit | 2,893 |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright 2015 Umbrella Tech.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = 'Kelson da Costa Medeiros <kelsoncm@gmail.com>'
| kelsoncm/suapwebconf | tests/__init__.py | Python | mit | 1,170 |
import ocl
import pyocl
import camvtk
import time
import vtk
import datetime
import math
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../stl/gnu_tux_mod.stl")
stl = camvtk.STLSurf("../stl/mount_rush.stl")
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.5))
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STL surface with", s.size(), "triangles read"
# define a cutter
length=5
cutter = ocl.BallCutter(15.4321, length)
#cutter = ocl.CylCutter(1.123, length)
#cutter = ocl.BullCutter(1.123, 0.2, length)
#cutter = ocl.ConeCutter(0.43, math.pi/7, length)
print cutter
#define grid of CL-points
minx=-42
dx=2
maxx=47
miny=-27
dy=1
maxy=20
z=-55
clpoints = pyocl.CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
print "generated grid with", len(clpoints)," CL-points"
# batchdropcutter
bdc1 = ocl.BatchDropCutter()
bdc1.setSTL(s)
bdc1.setCutter(cutter)
# push the points to ocl
for p in clpoints:
bdc1.appendPoint(p)
# run the actual calculation
t_before = time.time()
bdc1.run()
t_after = time.time()
calctime = t_after-t_before
print " done in ", calctime," s"
# get back results from ocl
clpts = bdc1.getCLPoints()
# draw the results
print "rendering...",
camvtk.drawCLPointCloud(myscreen, clpts)
print "done"
myscreen.camera.SetPosition(25, 23, 15)
myscreen.camera.SetFocalPoint(4, 5, 0)
# ocl text
t = camvtk.Text()
t.SetText("OpenCAMLib")
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
# other text
t2 = camvtk.Text()
stltext = "%i triangles\n%i CL-points\n%0.1f seconds" % (s.size(), len(clpts), calctime)
t2.SetText(stltext)
t2.SetPos( (50, myscreen.height-100) )
myscreen.addActor( t2)
t3 = camvtk.Text()
ctext = "Cutter: %s" % ( str(cutter) )
t3.SetText(ctext)
t3.SetPos( (50, myscreen.height-150) )
myscreen.addActor( t3)
myscreen.render()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
| AlanZatarain/opencamlib | scripts/batchdropcutter_mtrush.py | Python | gpl-3.0 | 2,327 |
"""setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
from distutils.filelist import FileList as _FileList
from distutils.errors import DistutilsInternalError
from distutils.util import convert_path
from distutils import log
import distutils.errors
import distutils.filelist
import os
import re
import sys
import io
import warnings
import time
import collections
from setuptools import Command
from setuptools.command.sdist import sdist
from setuptools.command.sdist import walk_revctrl
from setuptools.command.setopt import edit_config
from setuptools.command import bdist_egg
from pkg_resources import (
parse_requirements, safe_name, parse_version,
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
import setuptools.unicode_utils as unicode_utils
from setuptools.glob import glob
from setuptools.extern import packaging
from setuptools import SetuptoolsDeprecationWarning
def translate_pattern(glob):
"""
Translate a file path glob like '*.txt' in to a regular expression.
This differs from fnmatch.translate which allows wildcards to match
directory separators. It also knows about '**/' which matches any number of
directories.
"""
pat = ''
# This will split on '/' within [character classes]. This is deliberate.
chunks = glob.split(os.path.sep)
sep = re.escape(os.sep)
valid_char = '[^%s]' % (sep,)
for c, chunk in enumerate(chunks):
last_chunk = c == len(chunks) - 1
# Chunks that are a literal ** are globstars. They match anything.
if chunk == '**':
if last_chunk:
# Match anything if this is the last component
pat += '.*'
else:
# Match '(name/)*'
pat += '(?:%s+%s)*' % (valid_char, sep)
continue # Break here as the whole path component has been handled
# Find any special characters in the remainder
i = 0
chunk_len = len(chunk)
while i < chunk_len:
char = chunk[i]
if char == '*':
# Match any number of name characters
pat += valid_char + '*'
elif char == '?':
# Match a name character
pat += valid_char
elif char == '[':
# Character class
inner_i = i + 1
# Skip initial !/] chars
if inner_i < chunk_len and chunk[inner_i] == '!':
inner_i = inner_i + 1
if inner_i < chunk_len and chunk[inner_i] == ']':
inner_i = inner_i + 1
# Loop till the closing ] is found
while inner_i < chunk_len and chunk[inner_i] != ']':
inner_i = inner_i + 1
if inner_i >= chunk_len:
# Got to the end of the string without finding a closing ]
# Do not treat this as a matching group, but as a literal [
pat += re.escape(char)
else:
# Grab the insides of the [brackets]
inner = chunk[i + 1:inner_i]
char_class = ''
# Class negation
if inner[0] == '!':
char_class = '^'
inner = inner[1:]
char_class += re.escape(inner)
pat += '[%s]' % (char_class,)
# Skip to the end ]
i = inner_i
else:
pat += re.escape(char)
i += 1
# Join each chunk with the dir separator
if not last_chunk:
pat += sep
pat += r'\Z'
return re.compile(pat, flags=re.MULTILINE | re.DOTALL)
class InfoCommon:
tag_build = None
tag_date = None
@property
def name(self):
return safe_name(self.distribution.get_name())
def tagged_version(self):
return safe_version(self._maybe_tag(self.distribution.get_version()))
def _maybe_tag(self, version):
"""
egg_info may be called more than once for a distribution,
in which case the version string already contains all tags.
"""
return (
version if self.vtags and version.endswith(self.vtags)
else version + self.vtags
)
def tags(self):
version = ''
if self.tag_build:
version += self.tag_build
if self.tag_date:
version += time.strftime("-%Y%m%d")
return version
vtags = property(tags)
class egg_info(InfoCommon, Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date']
negative_opt = {
'no-date': 'tag-date',
}
def initialize_options(self):
self.egg_base = None
self.egg_name = None
self.egg_info = None
self.egg_version = None
self.broken_egg_info = False
####################################
# allow the 'tag_svn_revision' to be detected and
# set, supporting sdists built on older Setuptools.
@property
def tag_svn_revision(self):
pass
@tag_svn_revision.setter
def tag_svn_revision(self, value):
pass
####################################
def save_version_info(self, filename):
"""
Materialize the value of date into the
build tag. Install build keys in a deterministic order
to avoid arbitrary reordering on subsequent builds.
"""
egg_info = collections.OrderedDict()
# follow the order these keys would have been added
# when PYTHONHASHSEED=0
egg_info['tag_build'] = self.tags()
egg_info['tag_date'] = 0
edit_config(filename, dict(egg_info=egg_info))
def finalize_options(self):
# Note: we need to capture the current value returned
# by `self.tagged_version()`, so we can later update
# `self.distribution.metadata.version` without
# repercussions.
self.egg_name = self.name
self.egg_version = self.tagged_version()
parsed_version = parse_version(self.egg_version)
try:
is_version = isinstance(parsed_version, packaging.version.Version)
spec = (
"%s==%s" if is_version else "%s===%s"
)
list(
parse_requirements(spec % (self.egg_name, self.egg_version))
)
except ValueError as e:
raise distutils.errors.DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name, self.egg_version)
) from e
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('', os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name) + '.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name:
self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key == self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def run(self):
self.mkpath(self.egg_info)
os.utime(self.egg_info, None)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
ep.require(installer=installer)
writer = ep.resolve()
writer(self, ep.name, os.path.join(self.egg_info, ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name + '.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-" * 78 + '\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n' + '-' * 78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
# Implementations of the various MANIFEST.in commands
def process_template_line(self, line):
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dir_pattern).
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
self.debug_print("include " + ' '.join(patterns))
for pattern in patterns:
if not self.include(pattern):
log.warn("warning: no files found matching '%s'", pattern)
elif action == 'exclude':
self.debug_print("exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.exclude(pattern):
log.warn(("warning: no previously-included files "
"found matching '%s'"), pattern)
elif action == 'global-include':
self.debug_print("global-include " + ' '.join(patterns))
for pattern in patterns:
if not self.global_include(pattern):
log.warn(("warning: no files found matching '%s' "
"anywhere in distribution"), pattern)
elif action == 'global-exclude':
self.debug_print("global-exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.global_exclude(pattern):
log.warn(("warning: no previously-included files matching "
"'%s' found anywhere in distribution"),
pattern)
elif action == 'recursive-include':
self.debug_print("recursive-include %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.recursive_include(dir, pattern):
log.warn(("warning: no files found matching '%s' "
"under directory '%s'"),
pattern, dir)
elif action == 'recursive-exclude':
self.debug_print("recursive-exclude %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.recursive_exclude(dir, pattern):
log.warn(("warning: no previously-included files matching "
"'%s' found under directory '%s'"),
pattern, dir)
elif action == 'graft':
self.debug_print("graft " + dir_pattern)
if not self.graft(dir_pattern):
log.warn("warning: no directories found matching '%s'",
dir_pattern)
elif action == 'prune':
self.debug_print("prune " + dir_pattern)
if not self.prune(dir_pattern):
log.warn(("no previously-included directories found "
"matching '%s'"), dir_pattern)
else:
raise DistutilsInternalError(
"this cannot happen: invalid action '%s'" % action)
def _remove_files(self, predicate):
"""
Remove all files from the file list that match the predicate.
Return True if any matching files were removed
"""
found = False
for i in range(len(self.files) - 1, -1, -1):
if predicate(self.files[i]):
self.debug_print(" removing " + self.files[i])
del self.files[i]
found = True
return found
def include(self, pattern):
"""Include files that match 'pattern'."""
found = [f for f in glob(pattern) if not os.path.isdir(f)]
self.extend(found)
return bool(found)
def exclude(self, pattern):
"""Exclude files that match 'pattern'."""
match = translate_pattern(pattern)
return self._remove_files(match.match)
def recursive_include(self, dir, pattern):
"""
Include all files anywhere in 'dir/' that match the pattern.
"""
full_pattern = os.path.join(dir, '**', pattern)
found = [f for f in glob(full_pattern, recursive=True)
if not os.path.isdir(f)]
self.extend(found)
return bool(found)
def recursive_exclude(self, dir, pattern):
"""
Exclude any file anywhere in 'dir/' that match the pattern.
"""
match = translate_pattern(os.path.join(dir, '**', pattern))
return self._remove_files(match.match)
def graft(self, dir):
"""Include all files from 'dir/'."""
found = [
item
for match_dir in glob(dir)
for item in distutils.filelist.findall(match_dir)
]
self.extend(found)
return bool(found)
def prune(self, dir):
"""Filter out files from 'dir/'."""
match = translate_pattern(os.path.join(dir, '**'))
return self._remove_files(match.match)
def global_include(self, pattern):
"""
Include all files anywhere in the current directory that match the
pattern. This is very inefficient on large file trees.
"""
if self.allfiles is None:
self.findall()
match = translate_pattern(os.path.join('**', pattern))
found = [f for f in self.allfiles if match.match(f)]
self.extend(found)
return bool(found)
def global_exclude(self, pattern):
"""
Exclude all files anywhere that match the pattern.
"""
match = translate_pattern(os.path.join('**', pattern))
return self._remove_files(match.match)
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if self._safe_path(path):
self.files.append(path)
def extend(self, paths):
self.files.extend(filter(self._safe_path, paths))
def _repair(self):
"""
Replace self.files with only safe paths
Because some owners of FileList manipulate the underlying
``files`` attribute directly, this method must be called to
repair those paths.
"""
self.files = list(filter(self._safe_path, self.files))
def _safe_path(self, path):
enc_warn = "'%s' not %s encodable -- skipping"
# To avoid accidental trans-codings errors, first to unicode
u_path = unicode_utils.filesys_decode(path)
if u_path is None:
log.warn("'%s' in unexpected encoding -- skipping" % path)
return False
# Must ensure utf-8 encodability
utf8_path = unicode_utils.try_encode(u_path, "utf-8")
if utf8_path is None:
log.warn(enc_warn, path, 'utf-8')
return False
try:
# accept is either way checks out
if os.path.exists(u_path) or os.path.exists(utf8_path):
return True
# this will catch any encode errors decoding u_path
except UnicodeEncodeError:
log.warn(enc_warn, path, sys.getfilesystemencoding())
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options(self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def _manifest_normalize(self, path):
path = unicode_utils.filesys_decode(path)
return path.replace(os.sep, '/')
def write_manifest(self):
"""
Write the file list in 'self.filelist' to the manifest file
named by 'self.manifest'.
"""
self.filelist._repair()
# Now _repairs should encodability, but not unicode
files = [self._manifest_normalize(f) for f in self.filelist.files]
msg = "writing manifest file '%s'" % self.manifest
self.execute(write_file, (self.manifest, files), msg)
def warn(self, msg):
if not self._should_suppress_warning(msg):
sdist.warn(self, msg)
@staticmethod
def _should_suppress_warning(msg):
"""
suppress missing-file warnings from sdist
"""
return re.match(r"standard file .*not found", msg)
def add_defaults(self):
sdist.add_defaults(self)
self.check_license()
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
if os.path.exists("setup.py"):
# setup.py should be included by default, even if it's not
# the script called to create the sdist
self.filelist.append("setup.py")
ei_cmd = self.get_finalized_command('egg_info')
self.filelist.graft(ei_cmd.egg_info)
def prune_file_list(self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.prune(build.build_base)
self.filelist.prune(base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
is_regex=1)
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents)
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution, 'zip_safe', None)
bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def _write_requirements(stream, reqs):
lines = yield_lines(reqs or ())
def append_cr(line):
return line + '\n'
lines = map(append_cr, lines)
stream.writelines(lines)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = io.StringIO()
_write_requirements(data, dist.install_requires)
extras_require = dist.extras_require or {}
for extra in sorted(extras_require):
data.write('\n[{extra}]\n'.format(**vars()))
_write_requirements(data, extras_require[extra])
cmd.write_or_delete_file("requirements", filename, data.getvalue())
def write_setup_requirements(cmd, basename, filename):
data = io.StringIO()
_write_requirements(data, cmd.distribution.setup_requires)
cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[
k.split('.', 1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value) + '\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep, str) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in sorted(ep.items()):
if not isinstance(contents, str):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(sorted(map(str, contents.values())))
data.append('[%s]\n%s\n\n' % (section, contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
"""
Get a -r### off of PKG-INFO Version in case this is an sdist of
a subversion revision.
"""
warnings.warn(
"get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning)
if os.path.exists('PKG-INFO'):
with io.open('PKG-INFO') as f:
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
return 0
class EggInfoDeprecationWarning(SetuptoolsDeprecationWarning):
"""Deprecated behavior warning for EggInfo, bypassing suppression."""
| RalfBarkow/Zettelkasten | venv/lib/python3.9/site-packages/setuptools/command/egg_info.py | Python | gpl-3.0 | 25,495 |
""" Client-side transfer class for monitoring system
"""
import time
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import S_OK
class SiteMapClient:
###########################################################################
def __init__( self, getRPCClient = None ):
self.getRPCClient = getRPCClient
self.lastDataRetrievalTime = 0
self.sitesData = {}
def __getRPCClient( self ):
if self.getRPCClient:
return self.getRPCClient( "Framework/SiteMap" )
return RPCClient( "Framework/SiteMap" )
###########################################################################
def getSitesData( self ):
""" Retrieves a single file and puts it in the output directory
"""
if self.lastDataRetrievalTime - time.time() < 300:
result = self.__getRPCClient().getSitesData()
if 'rpcStub' in result:
del( result[ 'rpcStub' ] )
if not result[ 'OK' ]:
return result
self.sitesData = result[ 'Value' ]
if self.sitesData:
self.lastDataRetrievalTime = time.time()
return S_OK( self.sitesData )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| Sbalbp/DIRAC | FrameworkSystem/Client/SiteMapClient.py | Python | gpl-3.0 | 1,184 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Michael DeHaan <michael@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: say
version_added: "1.2"
short_description: Makes a computer to speak.
description:
- makes a computer speak! Amuse your friends, annoy your coworkers!
notes:
- In 2.5, this module has been renamed from C(osx_say) to M(say).
- If you like this module, you may also be interested in the osx_say callback plugin.
options:
msg:
description:
What to say
required: true
voice:
description:
What voice to use
required: false
requirements: [ say or espeak or espeak-ng ]
author:
- "Ansible Core Team"
- "Michael DeHaan (@mpdehaan)"
'''
EXAMPLES = '''
- say:
msg: '{{ inventory_hostname }} is all done'
voice: Zarvox
delegate_to: localhost
'''
import os
from ansible.module_utils.basic import AnsibleModule, get_platform
def say(module, executable, msg, voice):
cmd = [executable, msg]
if voice:
cmd.extend(('-v', voice))
module.run_command(cmd, check_rc=True)
def main():
module = AnsibleModule(
argument_spec=dict(
msg=dict(required=True),
voice=dict(required=False),
),
supports_check_mode=True
)
msg = module.params['msg']
voice = module.params['voice']
possibles = ('say', 'espeak', 'espeak-ng')
if get_platform() != 'Darwin':
# 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
voice = None
for possible in possibles:
executable = module.get_bin_path(possible)
if executable:
break
else:
module.fail_json(msg='Unable to find either %s' % ', '.join(possibles))
if module.check_mode:
module.exit_json(msg=msg, changed=False)
say(module, executable, msg, voice)
module.exit_json(msg=msg, changed=True)
if __name__ == '__main__':
main()
| skg-net/ansible | lib/ansible/modules/notification/say.py | Python | gpl-3.0 | 2,275 |
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import functools
import bleach
# BEGIN PATCH
import html5lib
from html5lib.serializer.htmlserializer import HTMLSerializer
def _serialize(domtree):
walker = html5lib.treewalkers.getTreeWalker('etree')
stream = walker(domtree)
serializer = HTMLSerializer(quote_attr_values=True,
omit_optional_tags=False,
alphabetical_attributes=True)
return serializer.render(stream)
bleach._serialize = _serialize
# END PATCH
from django.core.cache import cache
from django.utils.encoding import force_bytes
from markdown import Markdown
from .extensions.autolink import AutolinkExtension
from .extensions.automail import AutomailExtension
from .extensions.semi_sane_lists import SemiSaneListExtension
from .extensions.spaced_link import SpacedLinkExtension
from .extensions.strikethrough import StrikethroughExtension
from .extensions.wikilinks import WikiLinkExtension
from .extensions.emojify import EmojifyExtension
from .extensions.mentions import MentionsExtension
from .extensions.references import TaigaReferencesExtension
from .extensions.target_link import TargetBlankLinkExtension
# Bleach configuration
bleach.ALLOWED_TAGS += ["p", "table", "thead", "tbody", "th", "tr", "td", "h1",
"h2", "h3", "h4", "h5", "h6", "div", "pre", "span",
"hr", "dl", "dt", "dd", "sup", "img", "del", "br",
"ins"]
bleach.ALLOWED_STYLES.append("background")
bleach.ALLOWED_ATTRIBUTES["a"] = ["href", "title", "alt", "target"]
bleach.ALLOWED_ATTRIBUTES["img"] = ["alt", "src"]
bleach.ALLOWED_ATTRIBUTES["*"] = ["class", "style"]
def _make_extensions_list(project=None):
return [AutolinkExtension(),
AutomailExtension(),
SemiSaneListExtension(),
SpacedLinkExtension(),
StrikethroughExtension(),
WikiLinkExtension(project),
EmojifyExtension(),
MentionsExtension(),
TaigaReferencesExtension(project),
TargetBlankLinkExtension(),
"extra",
"codehilite",
"sane_lists",
"toc",
"nl2br"]
import diff_match_patch
def cache_by_sha(func):
@functools.wraps(func)
def _decorator(project, text):
sha1_hash = hashlib.sha1(force_bytes(text)).hexdigest()
key = "{}-{}".format(sha1_hash, project.id)
# Try to get it from the cache
cached = cache.get(key)
if cached is not None:
return cached
returned_value = func(project, text)
cache.set(key, returned_value, timeout=None)
return returned_value
return _decorator
def _get_markdown(project):
extensions = _make_extensions_list(project=project)
md = Markdown(extensions=extensions)
md.extracted_data = {"mentions": [], "references": []}
return md
@cache_by_sha
def render(project, text):
md = _get_markdown(project)
return bleach.clean(md.convert(text))
def render_and_extract(project, text):
md = _get_markdown(project)
result = bleach.clean(md.convert(text))
return (result, md.extracted_data)
class DiffMatchPatch(diff_match_patch.diff_match_patch):
def diff_pretty_html(self, diffs):
html = []
for (op, data) in diffs:
text = (data.replace("&", "&").replace("<", "<")
.replace(">", ">").replace("\n", "<br />"))
if op == self.DIFF_INSERT:
html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text)
elif op == self.DIFF_DELETE:
html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text)
elif op == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html)
def get_diff_of_htmls(html1, html2):
diffutil = DiffMatchPatch()
diffs = diffutil.diff_main(html1, html2)
diffutil.diff_cleanupSemantic(diffs)
return diffutil.diff_pretty_html(diffs)
__all__ = ["render", "get_diff_of_htmls", "render_and_extract"]
| rajiteh/taiga-back | taiga/mdrender/service.py | Python | agpl-3.0 | 4,927 |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_NV_register_combiners'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_NV_register_combiners',error_checker=_errors._error_checker)
GL_BIAS_BY_NEGATIVE_ONE_HALF_NV=_C('GL_BIAS_BY_NEGATIVE_ONE_HALF_NV',0x8541)
GL_COLOR_SUM_CLAMP_NV=_C('GL_COLOR_SUM_CLAMP_NV',0x854F)
GL_COMBINER0_NV=_C('GL_COMBINER0_NV',0x8550)
GL_COMBINER1_NV=_C('GL_COMBINER1_NV',0x8551)
GL_COMBINER2_NV=_C('GL_COMBINER2_NV',0x8552)
GL_COMBINER3_NV=_C('GL_COMBINER3_NV',0x8553)
GL_COMBINER4_NV=_C('GL_COMBINER4_NV',0x8554)
GL_COMBINER5_NV=_C('GL_COMBINER5_NV',0x8555)
GL_COMBINER6_NV=_C('GL_COMBINER6_NV',0x8556)
GL_COMBINER7_NV=_C('GL_COMBINER7_NV',0x8557)
GL_COMBINER_AB_DOT_PRODUCT_NV=_C('GL_COMBINER_AB_DOT_PRODUCT_NV',0x8545)
GL_COMBINER_AB_OUTPUT_NV=_C('GL_COMBINER_AB_OUTPUT_NV',0x854A)
GL_COMBINER_BIAS_NV=_C('GL_COMBINER_BIAS_NV',0x8549)
GL_COMBINER_CD_DOT_PRODUCT_NV=_C('GL_COMBINER_CD_DOT_PRODUCT_NV',0x8546)
GL_COMBINER_CD_OUTPUT_NV=_C('GL_COMBINER_CD_OUTPUT_NV',0x854B)
GL_COMBINER_COMPONENT_USAGE_NV=_C('GL_COMBINER_COMPONENT_USAGE_NV',0x8544)
GL_COMBINER_INPUT_NV=_C('GL_COMBINER_INPUT_NV',0x8542)
GL_COMBINER_MAPPING_NV=_C('GL_COMBINER_MAPPING_NV',0x8543)
GL_COMBINER_MUX_SUM_NV=_C('GL_COMBINER_MUX_SUM_NV',0x8547)
GL_COMBINER_SCALE_NV=_C('GL_COMBINER_SCALE_NV',0x8548)
GL_COMBINER_SUM_OUTPUT_NV=_C('GL_COMBINER_SUM_OUTPUT_NV',0x854C)
GL_CONSTANT_COLOR0_NV=_C('GL_CONSTANT_COLOR0_NV',0x852A)
GL_CONSTANT_COLOR1_NV=_C('GL_CONSTANT_COLOR1_NV',0x852B)
GL_DISCARD_NV=_C('GL_DISCARD_NV',0x8530)
GL_EXPAND_NEGATE_NV=_C('GL_EXPAND_NEGATE_NV',0x8539)
GL_EXPAND_NORMAL_NV=_C('GL_EXPAND_NORMAL_NV',0x8538)
GL_E_TIMES_F_NV=_C('GL_E_TIMES_F_NV',0x8531)
GL_FOG=_C('GL_FOG',0x0B60)
GL_HALF_BIAS_NEGATE_NV=_C('GL_HALF_BIAS_NEGATE_NV',0x853B)
GL_HALF_BIAS_NORMAL_NV=_C('GL_HALF_BIAS_NORMAL_NV',0x853A)
GL_MAX_GENERAL_COMBINERS_NV=_C('GL_MAX_GENERAL_COMBINERS_NV',0x854D)
GL_NONE=_C('GL_NONE',0)
GL_NUM_GENERAL_COMBINERS_NV=_C('GL_NUM_GENERAL_COMBINERS_NV',0x854E)
GL_PRIMARY_COLOR_NV=_C('GL_PRIMARY_COLOR_NV',0x852C)
GL_REGISTER_COMBINERS_NV=_C('GL_REGISTER_COMBINERS_NV',0x8522)
GL_SCALE_BY_FOUR_NV=_C('GL_SCALE_BY_FOUR_NV',0x853F)
GL_SCALE_BY_ONE_HALF_NV=_C('GL_SCALE_BY_ONE_HALF_NV',0x8540)
GL_SCALE_BY_TWO_NV=_C('GL_SCALE_BY_TWO_NV',0x853E)
GL_SECONDARY_COLOR_NV=_C('GL_SECONDARY_COLOR_NV',0x852D)
GL_SIGNED_IDENTITY_NV=_C('GL_SIGNED_IDENTITY_NV',0x853C)
GL_SIGNED_NEGATE_NV=_C('GL_SIGNED_NEGATE_NV',0x853D)
GL_SPARE0_NV=_C('GL_SPARE0_NV',0x852E)
GL_SPARE0_PLUS_SECONDARY_COLOR_NV=_C('GL_SPARE0_PLUS_SECONDARY_COLOR_NV',0x8532)
GL_SPARE1_NV=_C('GL_SPARE1_NV',0x852F)
GL_TEXTURE0_ARB=_C('GL_TEXTURE0_ARB',0x84C0)
GL_TEXTURE1_ARB=_C('GL_TEXTURE1_ARB',0x84C1)
GL_UNSIGNED_IDENTITY_NV=_C('GL_UNSIGNED_IDENTITY_NV',0x8536)
GL_UNSIGNED_INVERT_NV=_C('GL_UNSIGNED_INVERT_NV',0x8537)
GL_VARIABLE_A_NV=_C('GL_VARIABLE_A_NV',0x8523)
GL_VARIABLE_B_NV=_C('GL_VARIABLE_B_NV',0x8524)
GL_VARIABLE_C_NV=_C('GL_VARIABLE_C_NV',0x8525)
GL_VARIABLE_D_NV=_C('GL_VARIABLE_D_NV',0x8526)
GL_VARIABLE_E_NV=_C('GL_VARIABLE_E_NV',0x8527)
GL_VARIABLE_F_NV=_C('GL_VARIABLE_F_NV',0x8528)
GL_VARIABLE_G_NV=_C('GL_VARIABLE_G_NV',0x8529)
GL_ZERO=_C('GL_ZERO',0)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum)
def glCombinerInputNV(stage,portion,variable,input,mapping,componentUsage):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLboolean,_cs.GLboolean,_cs.GLboolean)
def glCombinerOutputNV(stage,portion,abOutput,cdOutput,sumOutput,scale,bias,abDotProduct,cdDotProduct,muxSum):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLfloat)
def glCombinerParameterfNV(pname,param):pass
@_f
@_p.types(None,_cs.GLenum,arrays.GLfloatArray)
def glCombinerParameterfvNV(pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint)
def glCombinerParameteriNV(pname,param):pass
@_f
@_p.types(None,_cs.GLenum,arrays.GLintArray)
def glCombinerParameterivNV(pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum)
def glFinalCombinerInputNV(variable,input,mapping,componentUsage):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
def glGetCombinerInputParameterfvNV(stage,portion,variable,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetCombinerInputParameterivNV(stage,portion,variable,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
def glGetCombinerOutputParameterfvNV(stage,portion,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetCombinerOutputParameterivNV(stage,portion,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
def glGetFinalCombinerInputParameterfvNV(variable,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetFinalCombinerInputParameterivNV(variable,pname,params):pass
| stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/NV/register_combiners.py | Python | lgpl-3.0 | 5,260 |
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
L{DSSKey}
"""
from Crypto.PublicKey import DSA
from Crypto.Hash import SHA
from paramiko.common import *
from paramiko import util
from paramiko.ssh_exception import SSHException
from paramiko.message import Message
from paramiko.ber import BER, BERException
from paramiko.pkey import PKey
class DSSKey (PKey):
"""
Representation of a DSS key which can be used to sign an verify SSH2
data.
"""
def __init__(self, msg=None, data=None, filename=None, password=None, vals=None, file_obj=None):
self.p = None
self.q = None
self.g = None
self.y = None
self.x = None
if file_obj is not None:
self._from_private_key(file_obj, password)
return
if filename is not None:
self._from_private_key_file(filename, password)
return
if (msg is None) and (data is not None):
msg = Message(data)
if vals is not None:
self.p, self.q, self.g, self.y = vals
else:
if msg is None:
raise SSHException('Key object may not be empty')
if msg.get_string() != 'ssh-dss':
raise SSHException('Invalid key')
self.p = msg.get_mpint()
self.q = msg.get_mpint()
self.g = msg.get_mpint()
self.y = msg.get_mpint()
self.size = util.bit_length(self.p)
def __str__(self):
m = Message()
m.add_string('ssh-dss')
m.add_mpint(self.p)
m.add_mpint(self.q)
m.add_mpint(self.g)
m.add_mpint(self.y)
return str(m)
def __hash__(self):
h = hash(self.get_name())
h = h * 37 + hash(self.p)
h = h * 37 + hash(self.q)
h = h * 37 + hash(self.g)
h = h * 37 + hash(self.y)
# h might be a long by now...
return hash(h)
def get_name(self):
return 'ssh-dss'
def get_bits(self):
return self.size
def can_sign(self):
return self.x is not None
def sign_ssh_data(self, rng, data):
digest = SHA.new(data).digest()
dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q), long(self.x)))
# generate a suitable k
qsize = len(util.deflate_long(self.q, 0))
while True:
k = util.inflate_long(rng.read(qsize), 1)
if (k > 2) and (k < self.q):
break
r, s = dss.sign(util.inflate_long(digest, 1), k)
m = Message()
m.add_string('ssh-dss')
# apparently, in rare cases, r or s may be shorter than 20 bytes!
rstr = util.deflate_long(r, 0)
sstr = util.deflate_long(s, 0)
if len(rstr) < 20:
rstr = '\x00' * (20 - len(rstr)) + rstr
if len(sstr) < 20:
sstr = '\x00' * (20 - len(sstr)) + sstr
m.add_string(rstr + sstr)
return m
def verify_ssh_sig(self, data, msg):
if len(str(msg)) == 40:
# spies.com bug: signature has no header
sig = str(msg)
else:
kind = msg.get_string()
if kind != 'ssh-dss':
return 0
sig = msg.get_string()
# pull out (r, s) which are NOT encoded as mpints
sigR = util.inflate_long(sig[:20], 1)
sigS = util.inflate_long(sig[20:], 1)
sigM = util.inflate_long(SHA.new(data).digest(), 1)
dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q)))
return dss.verify(sigM, (sigR, sigS))
def _encode_key(self):
if self.x is None:
raise SSHException('Not enough key information')
keylist = [ 0, self.p, self.q, self.g, self.y, self.x ]
try:
b = BER()
b.encode(keylist)
except BERException:
raise SSHException('Unable to create ber encoding of key')
return str(b)
def write_private_key_file(self, filename, password=None):
self._write_private_key_file('DSA', filename, self._encode_key(), password)
def write_private_key(self, file_obj, password=None):
self._write_private_key('DSA', file_obj, self._encode_key(), password)
def generate(bits=1024, progress_func=None):
"""
Generate a new private DSS key. This factory function can be used to
generate a new host key or authentication key.
@param bits: number of bits the generated key should be.
@type bits: int
@param progress_func: an optional function to call at key points in
key generation (used by C{pyCrypto.PublicKey}).
@type progress_func: function
@return: new private key
@rtype: L{DSSKey}
"""
dsa = DSA.generate(bits, rng.read, progress_func)
key = DSSKey(vals=(dsa.p, dsa.q, dsa.g, dsa.y))
key.x = dsa.x
return key
generate = staticmethod(generate)
### internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file('DSA', filename, password)
self._decode_key(data)
def _from_private_key(self, file_obj, password):
data = self._read_private_key('DSA', file_obj, password)
self._decode_key(data)
def _decode_key(self, data):
# private key file contains:
# DSAPrivateKey = { version = 0, p, q, g, y, x }
try:
keylist = BER(data).decode()
except BERException, x:
raise SSHException('Unable to parse key file: ' + str(x))
if (type(keylist) is not list) or (len(keylist) < 6) or (keylist[0] != 0):
raise SSHException('not a valid DSA private key file (bad ber encoding)')
self.p = keylist[1]
self.q = keylist[2]
self.g = keylist[3]
self.y = keylist[4]
self.x = keylist[5]
self.size = util.bit_length(self.p)
| noslenfa/tdjangorest | uw/lib/python2.7/site-packages/paramiko/dsskey.py | Python | apache-2.0 | 6,726 |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared testing utilities."""
# Avoid the grpc and google.cloud.grpc collision.
from __future__ import absolute_import
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
if len(kw) == 0: # pragma: NO COVER
raise ValueError('_Monkey was used with nothing to monkey-patch')
self.to_restore = {key: getattr(module, key) for key in kw}
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
class _NamedTemporaryFile(object):
def __init__(self, suffix=''):
import os
import tempfile
filehandle, self.name = tempfile.mkstemp(suffix=suffix)
os.close(filehandle)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
import os
os.remove(self.name)
def _tempdir_maker():
import contextlib
import shutil
import tempfile
@contextlib.contextmanager
def _tempdir_mgr():
temp_dir = tempfile.mkdtemp()
yield temp_dir
shutil.rmtree(temp_dir)
return _tempdir_mgr
_tempdir = _tempdir_maker()
del _tempdir_maker
class _GAXBaseAPI(object):
_random_gax_error = False
def __init__(self, **kw):
self.__dict__.update(kw)
def _make_grpc_error(self, status_code, trailing=None):
from grpc._channel import _RPCState
from google.cloud.exceptions import GrpcRendezvous
details = 'Some error details.'
exc_state = _RPCState((), None, trailing, status_code, details)
return GrpcRendezvous(exc_state, None, None, None)
def _make_grpc_not_found(self):
from grpc import StatusCode
return self._make_grpc_error(StatusCode.NOT_FOUND)
def _make_grpc_failed_precondition(self):
from grpc import StatusCode
return self._make_grpc_error(StatusCode.FAILED_PRECONDITION)
def _make_grpc_deadline_exceeded(self):
from grpc import StatusCode
return self._make_grpc_error(StatusCode.DEADLINE_EXCEEDED)
class _GAXPageIterator(object):
def __init__(self, *pages, **kwargs):
self._pages = iter(pages)
self.page_token = kwargs.get('page_token')
def next(self):
import six
return six.next(self._pages)
__next__ = next
| axbaretto/beam | sdks/python/.tox/py27gcp/lib/python2.7/site-packages/google/cloud/_testing.py | Python | apache-2.0 | 3,140 |
"""
Some helper functions to analyze the output of sys.getdxp() (which is
only available if Python was built with -DDYNAMIC_EXECUTION_PROFILE).
These will tell you which opcodes have been executed most frequently
in the current process, and, if Python was also built with -DDXPAIRS,
will tell you which instruction _pairs_ were executed most frequently,
which may help in choosing new instructions.
If Python was built without -DDYNAMIC_EXECUTION_PROFILE, importing
this module will raise a RuntimeError.
If you're running a script you want to profile, a simple way to get
the common pairs is:
$ PYTHONPATH=$PYTHONPATH:<python_srcdir>/Tools/scripts \
./python -i -O the_script.py --args
...
> from analyze_dxp import *
> s = render_common_pairs()
> open('/tmp/some_file', 'w').write(s)
"""
import copy
import opcode
import operator
import sys
import threading
if not hasattr(sys, "getdxp"):
raise RuntimeError("Can't import analyze_dxp: Python built without"
" -DDYNAMIC_EXECUTION_PROFILE.")
_profile_lock = threading.RLock()
_cumulative_profile = sys.getdxp()
# If Python was built with -DDXPAIRS, sys.getdxp() returns a list of
# lists of ints. Otherwise it returns just a list of ints.
def has_pairs(profile):
"""Returns True if the Python that produced the argument profile
was built with -DDXPAIRS."""
return len(profile) > 0 and isinstance(profile[0], list)
def reset_profile():
"""Forgets any execution profile that has been gathered so far."""
with _profile_lock:
sys.getdxp() # Resets the internal profile
global _cumulative_profile
_cumulative_profile = sys.getdxp() # 0s out our copy.
def merge_profile():
"""Reads sys.getdxp() and merges it into this module's cached copy.
We need this because sys.getdxp() 0s itself every time it's called."""
with _profile_lock:
new_profile = sys.getdxp()
if has_pairs(new_profile):
for first_inst in range(len(_cumulative_profile)):
for second_inst in range(len(_cumulative_profile[first_inst])):
_cumulative_profile[first_inst][second_inst] += (
new_profile[first_inst][second_inst])
else:
for inst in range(len(_cumulative_profile)):
_cumulative_profile[inst] += new_profile[inst]
def snapshot_profile():
"""Returns the cumulative execution profile until this call."""
with _profile_lock:
merge_profile()
return copy.deepcopy(_cumulative_profile)
def common_instructions(profile):
"""Returns the most common opcodes in order of descending frequency.
The result is a list of tuples of the form
(opcode, opname, # of occurrences)
"""
if has_pairs(profile) and profile:
inst_list = profile[-1]
else:
inst_list = profile
result = [(op, opcode.opname[op], count)
for op, count in enumerate(inst_list)
if count > 0]
result.sort(key=operator.itemgetter(2), reverse=True)
return result
def common_pairs(profile):
"""Returns the most common opcode pairs in order of descending frequency.
The result is a list of tuples of the form
((1st opcode, 2nd opcode),
(1st opname, 2nd opname),
# of occurrences of the pair)
"""
if not has_pairs(profile):
return []
result = [((op1, op2), (opcode.opname[op1], opcode.opname[op2]), count)
# Drop the row of single-op profiles with [:-1]
for op1, op1profile in enumerate(profile[:-1])
for op2, count in enumerate(op1profile)
if count > 0]
result.sort(key=operator.itemgetter(2), reverse=True)
return result
def render_common_pairs(profile=None):
"""Renders the most common opcode pairs to a string in order of
descending frequency.
The result is a series of lines of the form:
# of occurrences: ('1st opname', '2nd opname')
"""
if profile is None:
profile = snapshot_profile()
def seq():
for _, ops, count in common_pairs(profile):
yield "%s: %s\n" % (count, ops)
return ''.join(seq())
| google/google-ctf | third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/analyze_dxp.py | Python | apache-2.0 | 4,312 |
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Connection plugin."""
import gettext
from otopi import constants as otopicons
from otopi import plugin, util
from ovirt_engine import configfile
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.engine_common import database
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Connection plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_BOOT,
)
def _boot(self):
self.environment[
otopicons.CoreEnv.LOG_FILTER_KEYS
].append(
oenginecons.EngineDBEnv.PASSWORD
)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
oenginecons.EngineDBEnv.HOST,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.PORT,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.SECURED,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.SECURED_HOST_VALIDATION,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.USER,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.PASSWORD,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.DATABASE,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.DUMPER,
oenginecons.Defaults.DEFAULT_DB_DUMPER
)
self.environment.setdefault(
oenginecons.EngineDBEnv.FILTER,
oenginecons.Defaults.DEFAULT_DB_FILTER
)
self.environment.setdefault(
oenginecons.EngineDBEnv.RESTORE_JOBS,
oenginecons.Defaults.DEFAULT_DB_RESTORE_JOBS
)
self.environment[oenginecons.EngineDBEnv.CONNECTION] = None
self.environment[oenginecons.EngineDBEnv.STATEMENT] = None
self.environment[oenginecons.EngineDBEnv.NEW_DATABASE] = True
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
name=oengcommcons.Stages.DB_CONNECTION_SETUP,
)
def _setup(self):
dbovirtutils = database.OvirtUtils(
plugin=self,
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
)
dbovirtutils.detectCommands()
config = configfile.ConfigFile([
oenginecons.FileLocations.OVIRT_ENGINE_SERVICE_CONFIG_DEFAULTS,
oenginecons.FileLocations.OVIRT_ENGINE_SERVICE_CONFIG
])
if config.get('ENGINE_DB_PASSWORD'):
try:
dbenv = {}
for e, k in (
(oenginecons.EngineDBEnv.HOST, 'ENGINE_DB_HOST'),
(oenginecons.EngineDBEnv.PORT, 'ENGINE_DB_PORT'),
(oenginecons.EngineDBEnv.USER, 'ENGINE_DB_USER'),
(oenginecons.EngineDBEnv.PASSWORD, 'ENGINE_DB_PASSWORD'),
(oenginecons.EngineDBEnv.DATABASE, 'ENGINE_DB_DATABASE'),
):
dbenv[e] = config.get(k)
for e, k in (
(oenginecons.EngineDBEnv.SECURED, 'ENGINE_DB_SECURED'),
(
oenginecons.EngineDBEnv.SECURED_HOST_VALIDATION,
'ENGINE_DB_SECURED_VALIDATION'
)
):
dbenv[e] = config.getboolean(k)
dbovirtutils.tryDatabaseConnect(dbenv)
self.environment.update(dbenv)
self.environment[
oenginecons.EngineDBEnv.NEW_DATABASE
] = dbovirtutils.isNewDatabase()
except RuntimeError as e:
self.logger.debug(
'Existing credential use failed',
exc_info=True,
)
msg = _(
'Cannot connect to Engine database using existing '
'credentials: {user}@{host}:{port}'
).format(
host=dbenv[oenginecons.EngineDBEnv.HOST],
port=dbenv[oenginecons.EngineDBEnv.PORT],
database=dbenv[oenginecons.EngineDBEnv.DATABASE],
user=dbenv[oenginecons.EngineDBEnv.USER],
)
if self.environment[
osetupcons.CoreEnv.ACTION
] == osetupcons.Const.ACTION_REMOVE:
self.logger.warning(msg)
else:
raise RuntimeError(msg)
# vim: expandtab tabstop=4 shiftwidth=4
| walteryang47/ovirt-engine | packaging/setup/plugins/ovirt-engine-common/ovirt-engine/db/connection.py | Python | apache-2.0 | 5,588 |
#!/usr/bin/env python
"""
@package mi.dataset.driver.dosta_ln.wfp
@file marine-integrations/mi/dataset/driver/dosta_ln/wfp/dosta_ln_wfp.py
@author Tapana Gupta
@brief Driver for the dosta_ln_wfp instrument
Release notes:
Initial Release
"""
from mi.core.log import get_logger
from mi.dataset.parser.WFP_E_file_common import StateKey
from mi.dataset.parser.dosta_ln_wfp import DostaLnWfpParser
from mi.dataset.dataset_driver import DataSetDriver
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.core.versioning import version
@version("15.6.1")
def parse(unused, source_file_path, particle_data_handler):
log = get_logger()
with open(source_file_path, "r") as stream_handle:
def exception_callback(exception):
log.debug("Exception: %s", exception)
particle_data_handler.setParticleDataCaptureFailure()
parser = DostaLnWfpParser(
{DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dosta_ln_wfp',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'DostaLnWfpInstrumentParserDataParticle'},
{StateKey.POSITION: 0},
stream_handle,
lambda state, ingested: None,
lambda data: None,
exception_callback
)
driver = DataSetDriver(parser, particle_data_handler)
driver.processFileStream()
return particle_data_handler
| janeen666/mi-instrument | mi/dataset/driver/dosta_ln/wfp/dosta_ln_wfp_driver.py | Python | bsd-2-clause | 1,413 |
from wxPython._controls import wxLIST_MASK_STATE
from wxPython._controls import wxLIST_STATE_SELECTED
import os.path
# Modified by Francois Malan, LUMC / TU Delft
# December 2009
#
# based on the SkeletonAUIViewer:
# skeleton of an AUI-based viewer module
# Copyright (c) Charl P. Botha, TU Delft.
# set to False for 3D viewer, True for 2D image viewer
IMAGE_VIEWER = False
# import the frame, i.e. the wx window containing everything
import MaskComBinarFrame
# and do a reload, so that the GUI is also updated at reloads of this
# module.
reload(MaskComBinarFrame)
from module_base import ModuleBase
from module_mixins import IntrospectModuleMixin
import module_utils
import os
import vtk
import itk
import wx
import copy
import subprocess
#import numpy as np
from OverlaySliceViewer import OverlaySliceViewer
class Mask(object):
def __init__(self, name, file_path, image_data):
self.name = name
self.file_path = file_path
self.data = image_data
# def deepcopy(self):
# return Mask(self.name, self.file_path, self.data.DeepCopy())
class MaskComBinar(IntrospectModuleMixin, ModuleBase):
def __init__(self, module_manager):
"""Standard constructor. All DeVIDE modules have these, we do
the required setup actions.
"""
# we record the setting here, in case the user changes it
# during the lifetime of this model, leading to different
# states at init and shutdown.
self.IMAGE_VIEWER = IMAGE_VIEWER
ModuleBase.__init__(self, module_manager)
# create the view frame
self._view_frame = module_utils.instantiate_module_view_frame(
self, self._module_manager,
MaskComBinarFrame.MaskComBinarFrame)
# change the title to something more spectacular
self._view_frame.SetTitle('MaskComBinar - a tool for measuring and manipulating binary masks')
#initialise data structures
self._init_data_structures()
self._init_2d_render_window()
self._init_3d_render_window()
self.reset_camera_on_mask_display = True
self.first_save_warning = True
# hook up all event handlers
self._bind_events()
# anything you stuff into self._config will be saved
self._config.last_used_dir = ''
# make our window appear (this is a viewer after all)
self.view()
# all modules should toggle this once they have shown their
# views.
self.view_initialised = True
# apply config information to underlying logic
self.sync_module_logic_with_config()
# then bring it all the way up again to the view
self.sync_module_view_with_logic()
#This tool can be used for introspection of wx components
#
def _init_2d_render_window(self):
#create the necessary VTK objects for the 2D window. We use Charl's CMSliceViewer
#which defines all the nice goodies we'll need
self.ren2d = vtk.vtkRenderer()
self.ren2d.SetBackground(0.4,0.4,0.4)
self.slice_viewer = OverlaySliceViewer(self._view_frame.rwi2d, self.ren2d)
self._view_frame.rwi2d.GetRenderWindow().AddRenderer(self.ren2d)
self.slice_viewer.add_overlay('a', [0, 0, 1, 1]) #Blue for selection A
self.slice_viewer.add_overlay('b', [1, 0, 0, 1]) #Red for selection B
self.slice_viewer.add_overlay('intersect', [1, 1, 0, 1]) #Yellow for for intersection
def _init_3d_render_window(self):
# create the necessary VTK objects for the 3D window: we only need a renderer,
# the RenderWindowInteractor in the view_frame has the rest.
self.ren3d = vtk.vtkRenderer()
self.ren3d.SetBackground(0.6,0.6,0.6)
self._view_frame.rwi3d.GetRenderWindow().AddRenderer(self.ren3d)
def _init_data_structures(self):
self.opacity_3d = 0.5
self.rgb_blue = [0,0,1]
self.rgb_red = [1,0,0]
self.rgb_yellow = [1,1,0]
self.masks = {}
self.surfaces = {} #This prevents recomputing surface meshes
self.actors3d = {}
self.rendered_masks_in_a = set()
self.rendered_masks_in_b = set()
self.rendered_overlap = False
def _load_mask_from_file(self, file_path):
print "Opening file: %s" % (file_path)
filename = os.path.split(file_path)[1]
reader = None
extension = os.path.splitext(filename)[1]
if extension == '.vti': # VTI
reader = vtk.vtkXMLImageDataReader()
elif extension == '.mha': # MHA
reader = vtk.vtkMetaImageReader()
else:
self._view_frame.dialog_error('Unknown file extension: %s' % extension, 'Unable to handle extension')
return
reader.SetFileName(file_path)
reader.Update()
result = vtk.vtkImageData()
result.DeepCopy(reader.GetOutput())
return result
def load_binary_mask_from_file(self, file_path):
mask_image_data = self._load_mask_from_file(file_path)
filename = os.path.split(file_path)[1]
fileBaseName =os.path.splitext(filename)[0]
mask = Mask(fileBaseName, file_path, mask_image_data)
self.add_mask(mask)
def load_multi_mask_from_file(self, file_path):
mask_image_data = self._load_mask_from_file(file_path)
filename = os.path.split(file_path)[1]
fileBaseName =os.path.splitext(filename)[0]
#Now we have to create a separate mask for each integer level.
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(mask_image_data)
accumulator.Update()
max_label = int(accumulator.GetMax()[0])
#We assume all labels to have positive values.
for i in range(1,max_label+1):
label_data = self._threshold_image(mask_image_data, i, i)
new_name = '%s_%d' % (fileBaseName, i)
mask = Mask(new_name, file_path, label_data)
self.add_mask(mask)
def save_mask_to_file(self, mask_name, file_path):
if os.path.exists(file_path):
result = self._view_frame.dialog_yesno("%s already exists! \nOverwrite?" % file_path,"File already exists")
if result == False:
print 'Skipped writing %s' % file_path
return #skip this file if overwrite is denied
mask = self.masks[mask_name]
mask.file_path = file_path
self._save_image_to_file(mask.data, file_path)
print 'Wrote mask %s to %s' % (mask_name, file_path)
def _save_image_to_file(self, imagedata, file_path):
filename = os.path.split(file_path)[1]
extension = os.path.splitext(filename)[1]
writer = None
if extension == '.vti': # VTI
writer = vtk.vtkXMLImageDataWriter()
elif extension == '.mha': # MHA
print 'Attempting to create an mha writer. This has failed in the past (?)'
writer = vtk.vtkMetaImageWriter()
writer.SetCompression(True)
else:
self._view_frame.dialog_error('Unknown file extension: %s' % extension, 'Unable to handle extension')
return
writer.SetInput(imagedata)
writer.SetFileName(file_path)
writer.Update()
result = writer.Write()
if result == 0:
self._view_frame.dialog_error('Error writing %s' % filename, 'Error writing file')
print 'ERROR WRITING FILE!!!'
else:
self._view_frame.dialog_info('Successfully wrote %s' % filename, 'Success')
print 'Successfully wrote %s' % file_path
def add_mask(self, mask):
[accept, name] = self._view_frame.dialog_inputtext('Please choose a name for the new mask','Choose a name', mask.name)
if accept:
mask.name = name
if self.masks.has_key(name):
i=1
new_name = '%s%d' % (name, i)
while self.masks.has_key(new_name):
i += 1
new_name = '%s%d' % (mask.name, i)
mask.name = new_name
self.masks[mask.name] = mask
self._view_frame.add_mask(mask.name)
def delete_masks(self, mask_names):
temp = mask_names.copy()
if len(mask_names) > 0:
mask_names_str = mask_names.pop()
while len(mask_names) > 0:
mask_names_str = mask_names_str + ',%s' % mask_names.pop()
mask_names = temp
if self._view_frame.dialog_yesno('Are you sure you want to delete the following masks: %s' % mask_names_str, 'Delete masks?'):
for mask_name in mask_names:
print 'deleting mask: %s' % mask_name
if self.masks.has_key(mask_name):
self.masks.pop(mask_name)
self._view_frame.delete_mask(mask_name)
else:
self._view_frame.dialog_error('Mask "%s" not found in internal mask list!' % mask_name, 'Mask not found')
if len(self.masks) == 0: #If there are no masks left we disable the 2D viewer's pickable plane
self.slice_viewer.set_input(0, None)
def close(self):
"""Clean-up method called on all DeVIDE modules when they are
deleted.
"""
# with this complicated de-init, we make sure that VTK is
# properly taken care of
self.ren2d.RemoveAllViewProps()
self.ren3d.RemoveAllViewProps()
# this finalize makes sure we don't get any strange X
# errors when we kill the module.
self._view_frame.rwi2d.GetRenderWindow().Finalize()
self._view_frame.rwi2d.SetRenderWindow(None)
del self._view_frame.rwi2d
self._view_frame.rwi3d.GetRenderWindow().Finalize()
self._view_frame.rwi3d.SetRenderWindow(None)
del self._view_frame.rwi3d
# done with VTK de-init
# now take care of the wx window
self._view_frame.close()
# then shutdown our introspection mixin
IntrospectModuleMixin.close(self)
def get_input_descriptions(self):
# define this as a tuple of input descriptions if you want to
# take input data e.g. return ('vtkPolyData', 'my kind of
# data')
return ()
def get_output_descriptions(self):
# define this as a tuple of output descriptions if you want to
# generate output data.
return ()
def set_input(self, idx, input_stream):
# this gets called right before you get executed. take the
# input_stream and store it so that it's available during
# execute_module()
pass
def get_output(self, idx):
# this can get called at any time when a consumer module wants
# your output data.
pass
def execute_module(self):
# when it's your turn to execute as part of a network
# execution, this gets called.
pass
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def config_to_view(self):
pass
def view_to_config(self):
pass
def view(self):
self._view_frame.Show()
self._view_frame.Raise()
# because we have an RWI involved, we have to do this
# SafeYield, so that the window does actually appear before we
# call the render. If we don't do this, we get an initial
# empty renderwindow.
wx.SafeYield()
self.render()
def _update_3d_masks(self, id, removed, added):
rgb_colour = [0,0,0]
if id == 'a':
rgb_colour = self.rgb_blue
elif id == 'b':
rgb_colour = self.rgb_red
for name in removed:
key = id + name
self.ren3d.RemoveActor(self.actors3d[key])
self.render()
for name in added:
self._render_3d_mask(id, name, rgb_colour, self.opacity_3d)
def _update_3d_masks_overlapping(self, mask_a, mask_b, mask_intersect):
self._clear_3d_window()
self._render_3d_data('a_not_b', mask_a.data, self.rgb_blue, self.opacity_3d)
self._render_3d_data('b_not_a', mask_b.data, self.rgb_red, self.opacity_3d)
self._render_3d_data('a_and_b', mask_intersect.data, self.rgb_yellow, self.opacity_3d)
def _clear_3d_window(self):
for actor in self.actors3d.values():
self.ren3d.RemoveActor(actor)
self.ren3d.Clear()
self.rendered_masks_in_a = set()
self.rendered_masks_in_b = set()
self.rendered_overlap = False
def _render_2d_mask(self, id, mask):
mask_data = None
if mask != None:
mask_data = mask.data
self.slice_viewer.set_input(id, mask_data)
if self.reset_camera_on_mask_display:
self.slice_viewer.reset_camera()
#self.slice_viewer.reset_to_default_view(2)
self.slice_viewer.render()
def _render_3d_mask(self, id, name, rgb_colour, opacity):
"""Add the given mask to the 3D display window.
An iso-surface of colour rgb_colour is rendered at value = 1.
"""
surface = None
mask = self.masks[name]
if not self.surfaces.has_key(name):
surface_creator = vtk.vtkDiscreteMarchingCubes()
surface_creator.SetInput(mask.data)
surface_creator.Update()
surface = surface_creator.GetOutput()
self.surfaces[name] = surface
else:
surface = self.surfaces[name]
m = vtk.vtkPolyDataMapper()
m.SetInput(surface)
m.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(m)
actor.SetPosition(mask.data.GetOrigin())
actor.GetProperty().SetColor(rgb_colour)
actor.GetProperty().SetOpacity(opacity)
#actor.GetProperty().SetInterpolationToFlat()
self.ren3d.AddActor(actor)
self.actors3d[id+name] = actor
if self.reset_camera_on_mask_display:
self.ren3d.ResetCamera()
self.render()
def _render_3d_data(self, id, data, rgb_colour, opacity):
"""Add the given mask to the 3D display window.
An iso-surface of colour rgb_colour is rendered at value = 1.
"""
surface_creator = vtk.vtkDiscreteMarchingCubes()
surface_creator.SetInput(data)
surface_creator.Update()
surface = surface_creator.GetOutput()
m = vtk.vtkPolyDataMapper()
m.SetInput(surface)
m.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(m)
actor.SetPosition(data.GetOrigin())
actor.GetProperty().SetColor(rgb_colour)
actor.GetProperty().SetOpacity(opacity)
#actor.GetProperty().SetInterpolationToFlat()
self.ren3d.AddActor(actor)
self.actors3d[id] = actor
if self.reset_camera_on_mask_display:
self.ren3d.ResetCamera()
self.render()
def _bind_events(self):
"""Bind wx events to Python callable object event handlers.
"""
vf = self._view_frame
vf.Bind(wx.EVT_MENU, self._handler_open_binary_mask,
id = vf.id_open_binary_mask)
vf.Bind(wx.EVT_MENU, self._handler_open_multi_mask,
id = vf.id_open_multi_mask)
vf.Bind(wx.EVT_MENU, self._handler_save_multi_mask,
id = vf.id_save_multi_mask)
vf.Bind(wx.EVT_MENU, self._handler_open_mask_dir,
id = vf.id_open_mask_dir)
vf.Bind(wx.EVT_MENU, self._handler_save_mask,
id = vf.id_save_mask)
vf.Bind(wx.EVT_MENU, self._handler_close,
id = vf.id_quit)
vf.Bind(wx.EVT_MENU, self._handler_introspect,
id = vf.id_introspect)
vf.Bind(wx.EVT_MENU, self._handler_about,
id = vf.id_about)
self._view_frame.reset_cam2d_button.Bind(wx.EVT_BUTTON,
self._handler_reset_cam2d_button)
self._view_frame.reset_cam3d_button.Bind(wx.EVT_BUTTON,
self._handler_reset_cam3d_button)
self._view_frame.clear_selection_button.Bind(wx.EVT_BUTTON,
self._handler_clear_selection_button)
self._view_frame.list_ctrl_maskA.Bind(wx.EVT_LIST_ITEM_SELECTED, self._handler_listctrl)
self._view_frame.list_ctrl_maskA.Bind(wx.EVT_LIST_ITEM_DESELECTED, self._handler_listctrl)
self._view_frame.list_ctrl_maskB.Bind(wx.EVT_LIST_ITEM_SELECTED, self._handler_listctrl)
self._view_frame.list_ctrl_maskB.Bind(wx.EVT_LIST_ITEM_DESELECTED, self._handler_listctrl)
self._view_frame.list_ctrl_maskA.Bind(wx.EVT_LIST_KEY_DOWN, self._handler_delete_mask_a)
self._view_frame.list_ctrl_maskB.Bind(wx.EVT_LIST_KEY_DOWN, self._handler_delete_mask_b)
#Mask operations
self._view_frame.mask_join_button.Bind(wx.EVT_BUTTON, self._handler_mask_join)
self._view_frame.mask_subtract_button.Bind(wx.EVT_BUTTON, self._handler_mask_subtract)
self._view_frame.mask_intersect_button.Bind(wx.EVT_BUTTON, self._handler_mask_intersect)
self._view_frame.mask_align_metadata_button.Bind(wx.EVT_BUTTON, self._handler_align_masks_metadata)
self._view_frame.mask_align_icp_button.Bind(wx.EVT_BUTTON, self._handler_align_masks_icp)
self._view_frame.split_disconnected_button.Bind(wx.EVT_BUTTON, self._handler_split_disconnected)
#Mask diagnostics
self._view_frame.test_all_dimensions_button.Bind(wx.EVT_BUTTON, self._handler_test_all_dimensions)
self._view_frame.test_selected_dimensions_button.Bind(wx.EVT_BUTTON, self._handler_test_selected_dimensions)
self._view_frame.test_all_intersections_button.Bind(wx.EVT_BUTTON, self._handler_test_all_intersections)
self._view_frame.test_selected_intersections_button.Bind(wx.EVT_BUTTON, self._handler_test_selected_intersections)
#Mask metrics
self._view_frame.volume_button.Bind(wx.EVT_BUTTON, self._handler_compute_volume)
self._view_frame.dice_coefficient_button.Bind(wx.EVT_BUTTON, self._handler_compute_dice_coefficient)
self._view_frame.hausdorff_distance_button.Bind(wx.EVT_BUTTON, self._handler_compute_hausdorff_distance)
self._view_frame.mean_hausdorff_distance_button.Bind(wx.EVT_BUTTON, self._handler_compute_mean_hausdorff_distance)
#self._view_frame.Bind(wx.EVT_SLIDER, self._handler_slider_update)
def _handler_reset_cam2d_button(self, event):
#self.slice_viewer.reset_camera()
self.slice_viewer.reset_to_default_view(2)
self.render()
def _handler_reset_cam3d_button(self, event):
self.ren3d.ResetCamera()
self.render()
def _handler_clear_selection_button(self, event):
self._view_frame.clear_selections()
self._clear_3d_window()
self.slice_viewer.set_input(0, None)
self.slice_viewer.set_input('a', None)
self.slice_viewer.set_input('b', None)
self.slice_viewer.set_input('intersect', None)
self.render()
def _handler_delete_mask_a(self, event):
'''Handler for deleting an mask from either of the two lists (acts on both)'''
if event.KeyCode == 127: #This is the keycode for "delete"
names_a = self._view_frame.get_selected_mask_names_a()
if len(names_a) > 0:
self.delete_masks(names_a)
def _handler_delete_mask_b(self, event):
'''Handler for deleting an mask from either of the two lists (acts on both)'''
if event.KeyCode == 127: #This is the keycode for "delete"
names_b = self._view_frame.get_selected_mask_names_b()
if len(names_b) > 0:
self.delete_masks(names_b)
def _handler_listctrl(self, event):
"""Mask is selected or deselected in listcontrol A"""
if self.rendered_overlap:
self._clear_3d_window()
self.rendered_overlap = False
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
new_in_a = set()
new_in_b = set()
gone_from_a = set()
gone_from_b = set()
#Check what has changed
for name in names_a:
if not name in self.rendered_masks_in_a:
new_in_a.add(name)
for name in self.rendered_masks_in_a:
if not name in names_a:
gone_from_a.add(name)
#Update the list of selected items
self.rendered_masks_in_a = names_a
for name in names_b:
if not name in self.rendered_masks_in_b:
new_in_b.add(name)
for name in self.rendered_masks_in_b:
if not name in names_b:
gone_from_b.add(name)
#Update the list of selected items
self.rendered_masks_in_b = names_b
overlap = None
union_masks_a = None
union_masks_b = None
if (len(gone_from_a) > 0) or (len(new_in_a) > 0) or (len(gone_from_b) > 0) or (len(new_in_b) > 0):
union_masks_a = self.compute_mask_union(names_a)
union_masks_b = self.compute_mask_union(names_b)
self._render_2d_mask('a',union_masks_a)
self._render_2d_mask('b',union_masks_b)
overlap = self._logical_intersect_masks(union_masks_a, union_masks_b)
if self._is_empty_mask(overlap):
overlap = None
self._render_2d_mask('intersect',overlap)
if overlap == None:
#We don't need to render any custom mask - only a list of existing selected masks
self._update_3d_masks('a', gone_from_a, new_in_a)
self._update_3d_masks('b', gone_from_b, new_in_b)
else:
#We require a more expensive custom render to show overlapping areas in 3D
a_not_b = self._logical_subtract_masks(union_masks_a, overlap)
b_not_a = self._logical_subtract_masks(union_masks_b, overlap)
self._update_3d_masks_overlapping(a_not_b, b_not_a, overlap)
self.rendered_masks_in_a = {}
self.rendered_masks_in_b = {}
self.rendered_overlap = True
def _handler_open_binary_mask(self, event):
"""Opens a binary mask file"""
filters = 'Mask files (*.vti;*.mha)|*.vti;*.mha'
dlg = wx.FileDialog(self._view_frame, "Choose a binary mask file", self._config.last_used_dir, "", filters, wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename=dlg.GetFilename()
self._config.last_used_dir=dlg.GetDirectory()
full_file_path = "%s\\%s" % (self._config.last_used_dir, filename)
self.load_binary_mask_from_file(full_file_path)
dlg.Destroy()
def _handler_open_multi_mask(self, event):
"""Opens an integer-labeled multi-material mask file"""
filters = 'Mask files (*.vti;*.mha)|*.vti;*.mha'
dlg = wx.FileDialog(self._view_frame, "Choose a multi-label mask file", self._config.last_used_dir, "", filters, wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename=dlg.GetFilename()
self._config.last_used_dir=dlg.GetDirectory()
full_file_path = "%s\\%s" % (self._config.last_used_dir, filename)
self.load_multi_mask_from_file(full_file_path)
dlg.Destroy()
def _handler_open_mask_dir(self, event):
"""Opens all masks in a given directory"""
dlg = wx.DirDialog(self._view_frame, "Choose a directory containing masks", self._config.last_used_dir)
if dlg.ShowModal() == wx.ID_OK:
dir_name=dlg.GetPath()
self._config.last_used_dir=dir_name
all_files = os.listdir(dir_name)
#First we set up actor list of files with the correct extension
file_list = []
source_ext = '.vti'
for f in all_files:
file_name = os.path.splitext(f)
if file_name[1] == source_ext:
file_list.append(f)
for filename in file_list:
full_file_path = "%s\\%s" % (dir_name, filename)
self.load_binary_mask_from_file(full_file_path)
dlg.Destroy()
print 'Done!'
def _specify_output_file_path(self):
file_path = None
filters = 'Mask files (*.vti;*.mha)|*.vti;*.mha'
dlg = wx.FileDialog(self._view_frame, "Choose a destination", self._config.last_used_dir, "", filters, wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
filename=dlg.GetFilename()
self._config.last_used_dir=dlg.GetDirectory()
file_path = "%s\\%s" % (self._config.last_used_dir, filename)
dlg.Destroy()
return file_path
def _handler_save_multi_mask(self, event):
"""Saves a multi-label mask file"""
if self.test_valid_mask_selection_multiple():
file_path = self._specify_output_file_path()
if file_path != None:
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
names = set()
for mask_name in names_a:
names.add(mask_name)
for mask_name in names_b:
names.add(mask_name)
mask_name = names.pop()
imagedata = vtk.vtkImageData()
maskdata = self.masks[mask_name].data
imagedata.DeepCopy(maskdata)
k = 1
for mask_name in names:
k = k+1
maskdata = self.masks[mask_name].data
imath = vtk.vtkImageMathematics()
imath.SetOperationToMultiplyByK()
imath.SetConstantK(k)
print 'Multiplying %s with %d and adding to volume' % (mask_name, k)
imath.SetInput(maskdata)
imath.Update()
adder = vtk.vtkImageMathematics()
adder.SetOperationToAdd()
adder.SetInput1(imagedata)
adder.SetInput2(imath.GetOutput())
adder.Update()
imagedata.DeepCopy(adder.GetOutput())
self._save_image_to_file(imagedata, file_path)
print 'Wrote multi-label mask with %d labels to %s' % (k, file_path)
def _handler_save_mask(self, event):
"""Saves a mask file"""
if self.test_single_mask_selection():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
mask_name = ''
if len(names_b) == 1:
mask_name = names_b.pop()
else:
mask_name = names_a.pop()
file_path = self._specify_output_file_path()
if mask_name != None:
self.save_mask_to_file(mask_name, file_path)
else:
self._view_frame.dialog_exclaim("No valid file name specified")
def _handler_align_masks_metadata(self, event):
"""Aligns two masks by copying metadata from the first to the second (origin, spacing, extent, wholeextent)
As always, creates a new mask in the list of masks as output.
"""
if self.test_single_mask_pair_selection():
#We know that there is only a single mask selected in each of A and B, therefor we only index the 0th element in each
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
maskA = self.masks[names_a.pop()]
maskB = self.masks[names_b.pop()]
mask_data = vtk.vtkImageData()
mask_data.DeepCopy(maskB.data)
mask_data.SetOrigin(maskA.data.GetOrigin())
mask_data.SetExtent(maskA.data.GetExtent())
mask_data.SetWholeExtent(maskA.data.GetWholeExtent())
mask_data.SetSpacing(maskA.data.GetSpacing())
mask = Mask('%s_a' % maskB.name, maskB.file_path, mask_data)
self.add_mask(mask)
def _handler_split_disconnected(self, event):
"""Splits the selected mask into disconnected regions"""
if self.test_single_mask_selection():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
mask_name = ''
if len(names_b) == 1:
mask_name = names_b.pop()
else:
mask_name = names_a.pop()
self._split_disconnected_objects(mask_name)
def _handler_align_masks_icp(self, event):
"""Aligns two masks by using the Iterative Closest Point algorithm (rigid transformation)
As always, creates a new mask in the list of masks as output.
"""
if self.test_single_mask_pair_selection():
#We know that there is only a single mask selected in each of A and B, therefor we only index the 0th element in each
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
maskA = self.masks[names_a.pop()]
maskB = self.masks[names_b.pop()]
#We need meshes (polydata) as input to the ICP algorithm
meshA = None
meshB = None
#actually this should never happen, but let's keep it for making double sure
if not self.surfaces.has_key(maskA.name):
surface_creator_A = vtk.vtkDiscreteMarchingCubes()
surface_creator_A.SetInput(maskA.data)
surface_creator_A.Update()
meshA = surface_creator_A.GetOutput()
else:
meshA = self.surfaces[maskA.name]
#actually this should never happen, but let's keep it for making double sure
if not self.surfaces.has_key(maskB.name):
surface_creator_B = vtk.vtkDiscreteMarchingCubes()
surface_creator_B.SetInput(maskB.data)
surface_creator_B.Update()
meshB = surface_creator_B.GetOutput()
else:
meshB = self.surfaces[maskB.name]
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetMaximumNumberOfIterations(50)
icp.SetSource(meshA)
icp.SetTarget(meshB)
print 'Executing ICP alorithm'
icp.Update()
del meshA, meshB
reslicer = vtk.vtkImageReslice()
reslicer.SetInterpolationModeToNearestNeighbor()
#reslicer.SetInterpolationModeToCubic()
reslicer.SetInput(maskB.data)
reslicer.SetResliceTransform(icp)
reslicer.Update()
del maskA, maskB
result = vtk.vtkImageData()
result.DeepCopy(reslicer.GetOutput())
self.add_mask(Mask('Aligned','',result))
def _handler_compute_volume(self, event):
"""Computes the volume of of mask A (in milliliters)"""
if self.test_valid_mask_selection_a():
names_a = self._view_frame.get_selected_mask_names_a()
union_masksA = self.compute_mask_union(names_a)
spacing = union_masksA.data.GetSpacing()
voxel_volume = spacing[0] * spacing[1] * spacing[2]
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(union_masksA.data)
accumulator.Update()
nonzero_count = accumulator.GetMean()[0] * accumulator.GetVoxelCount()
volume = voxel_volume * nonzero_count / 1000.0
print "Volume = %.2f ml" % (volume)
copy_to_clipboard = self._view_frame.dialog_yesno('Volume = %f ml\n\nCopy to clipboard?' % volume, 'Volume = %.1f%% ml' % (volume))
if copy_to_clipboard:
self._view_frame.copy_text_to_clipboard('%f' % volume)
def _is_empty_mask(self, mask):
if mask == None:
return True
else:
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(mask.data)
accumulator.Update()
return accumulator.GetMax()[0] == 0
def _handler_compute_dice_coefficient(self, event):
"""Computes the Dice coefficient between selections in A and B
Implementation from Charl's coderunner code"""
if self.test_valid_mask_selection_a_and_b():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
union_masksA = self.compute_mask_union(names_a)
union_masksB = self.compute_mask_union(names_b)
# Given two binary volumes, this CodeRunner will implement
# the percentage volume overlap. This is useful for
# doing validation with ground truth / golden standard /
# manually segmented volumes. This is also called the Dice
# coefficient and ranges from 0.0 to 1.0.
# interesting paper w.r.t. segmentation validation:
# Valmet: A new validation tool for assessing and improving 3D object segmentation
# basic idea:
# threshold data (so we have >0 == 1 and everything else 0)
# then histogram into two bins.
threshes = []
for _ in range(2):
t = vtk.vtkImageThreshold()
threshes.append(t)
# anything equal to or lower than 0.0 will be "In"
t.ThresholdByLower(0.0)
# <= 0 -> 0
t.SetInValue(0)
# > 0 -> 1
t.SetOutValue(1)
t.SetOutputScalarTypeToUnsignedChar()
# have to stuff all components into one image
iac = vtk.vtkImageAppendComponents()
iac.SetInput(0, threshes[0].GetOutput())
iac.SetInput(1, threshes[1].GetOutput())
# generate 2 by 2 matrix (histogram)
ia = vtk.vtkImageAccumulate()
ia.SetInput(iac.GetOutput())
ia.SetComponentExtent(0,1, 0,1, 0,0)
threshes[0].SetInput(union_masksA.data)
threshes[1].SetInput(union_masksB.data)
ia.Update()
iasc = ia.GetOutput().GetPointData().GetScalars()
cells = [0] * 4
for i in range(4):
cells[i] = iasc.GetTuple1(i)
# tuple 0: not in actor, not in b
# tuple 1: in actor, not in b
# tuple 2: in b, not in actor
# tuple 3: in actor, in b
# percentage overlap: (a intersect b) / (a union b)
dice_coeff = (2 * cells[3] / (2* cells[3] + cells[1] + cells[2]))
print "Dice Coefficiet = %.2f" % (dice_coeff)
copy_to_clipboard = self._view_frame.dialog_yesno('Dice coefficient = %f\n\nCopy to clipboard?' % dice_coeff, '%.1f%% overlap' % (100*dice_coeff))
if copy_to_clipboard:
self._view_frame.copy_text_to_clipboard('%f' % dice_coeff)
def _compute_hausdorff_distances(self, maskA, maskB):
"""
Computes the Hausdorff Distance between selections in A and B.
Uses the external software tool Metro to do point-based mesh sampling
"""
#We need meshes (polydata) for computing the Hausdorff distances
meshA = None
meshB = None
#actually this should never happen, but let's keep it for making double sure
if not self.surfaces.has_key(maskA.name):
self._view_frame.dialog_exclaim('Mesh belonging to Mask A not found in list, and created on the fly. This is unexpected...', 'Unexpected program state')
surface_creator_A = vtk.vtkDiscreteMarchingCubes()
surface_creator_A.SetInput(maskA.data)
surface_creator_A.Update()
meshA = surface_creator_A.GetOutput()
else:
meshA = self.surfaces[maskA.name]
#actually this should never happen, but let's keep it for making double sure
if not self.surfaces.has_key(maskB.name):
self._view_frame.dialog_exclaim('Mesh belonging to Mask B not found in list, and created on the fly. This is unexpected...', 'Unexpected program state')
surface_creator_B = vtk.vtkDiscreteMarchingCubes()
surface_creator_B.SetInput(maskB.data)
surface_creator_B.Update()
meshB = surface_creator_B.GetOutput()
else:
meshB = self.surfaces[maskB.name]
filename_a = '@temp_mesh_a.ply'
filename_b = '@temp_mesh_b.ply'
ply_writer = vtk.vtkPLYWriter()
ply_writer.SetFileTypeToBinary()
print 'Writing temporary PLY mesh A = %s' % filename_a
ply_writer.SetFileName(filename_a)
ply_writer.SetInput(meshA)
ply_writer.Update()
print 'Writing temporary PLY mesh B = %s' % filename_b
ply_writer.SetFileName(filename_b)
ply_writer.SetInput(meshB)
ply_writer.Update()
command = 'metro.exe %s %s' % (filename_a, filename_b)
p = subprocess.Popen(command, shell=True, stdout = subprocess.PIPE)
outp = p.stdout.read() #The command line output from metro
if len(outp) < 50:
self._view_frame.dialog_error('Hausdorff distance computation requires Metro to be installed and available in the system path.\n\nMetro failed to execute.\n\nAborting.\n\nMetro may be downloaded from http://vcg.sourceforge.net/index.php/Metro', 'Metro was not found')
return
print 'Executing: %s' % command
print '....................................'
print outp
print '....................................'
index = outp.find('max')
hdf = float(outp[index+6:index+54].split()[0]) #Forward Hausdorff distance
index = outp.find('max', index+3)
hdb = float(outp[index+6:index+54].split()[0]) #Backward Hausdorff distance
index = outp.find('mean')
mhdf = float(outp[index+7:index+35].split()[0]) #Forward Mean Hausdorff distance
index = outp.find('mean', index+4)
mhdb = float(outp[index+7:index+35].split()[0]) #Backward Mean Hausdorff distance
hausdorff_distance = max(hdf, hdb)
mean_hausdorff_distance = 0.5 * (mhdf + mhdb)
print 'removing temporary files'
os.remove(filename_a)
os.remove(filename_b)
print 'done!'
print '\nSampled Hausdorff distance = %.4f\nSampled Mean Hausdorff distance = %.4f\n' % (hausdorff_distance, mean_hausdorff_distance)
return [hausdorff_distance, mean_hausdorff_distance]
def _handler_compute_hausdorff_distance(self, event):
"""
Computes the Hausdorff Distance between meshes in A and B.
Uses the external software tool Metro to do point-based mesh sampling
"""
if self.test_single_mask_pair_selection():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
maskA = self.masks[names_a.pop()]
maskB = self.masks[names_b.pop()]
[hausdorff_distance, _] = self._compute_hausdorff_distances(maskA, maskB)
copy_to_clipboard = self._view_frame.dialog_yesno('Hausdorff distance = %.4f mm\n\nCopy to clipboard?' % hausdorff_distance, 'Hausdorff Distance')
if copy_to_clipboard:
self._view_frame.copy_text_to_clipboard('%f' % hausdorff_distance)
def _handler_compute_mean_hausdorff_distance(self, event):
"""
Computes the Mean Hausdorff Distance between meshes in A and B.
Uses the external software tool Metro to do point-based mesh sampling
"""
if self.test_single_mask_pair_selection():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
maskA = self.masks[names_a.pop()]
maskB = self.masks[names_b.pop()]
[_, mean_hausdorff_distance] = self._compute_hausdorff_distances(maskA, maskB)
copy_to_clipboard = self._view_frame.dialog_yesno('Mean Hausdorff distance = %.4f mm\n\nCopy to clipboard?' % mean_hausdorff_distance, 'Mean Hausdorff distance')
if copy_to_clipboard:
self._view_frame.copy_text_to_clipboard('%f' % mean_hausdorff_distance)
def _handler_mask_join(self, event):
"""Computes the union of the masks selected in boxes A and B.
Saves the result as a new Mask
"""
if self.test_valid_mask_selection_any():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
if len(names_a) + len(names_b) < 2:
return
union_masksA = self.compute_mask_union(names_a)
union_masksB = self.compute_mask_union(names_b)
new_mask = None
if len(names_a) == 0:
new_mask = union_masksB
elif len(names_b) == 0:
new_mask = union_masksA
else:
new_mask = self._logical_unite_masks(union_masksA, union_masksB)
self.add_mask(new_mask)
def _handler_mask_subtract(self, event):
"""Subtracts the the union of the masks selected in box B from the union of the masks selected in box A.
Saves the result as a new Mask
"""
if self.test_valid_mask_selection_a_and_b():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
union_masksA = self.compute_mask_union(names_a)
union_masksB = self.compute_mask_union(names_b)
new_mask = self._logical_subtract_masks(union_masksA, union_masksB)
self.add_mask(new_mask)
def _handler_mask_intersect(self, event):
"""Intersects the the union of the masks selected in box A with the union of the masks selected in box B.
Saves the result as a new Mask
"""
if self.test_valid_mask_selection_a_and_b():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
union_masksA = self.compute_mask_union(names_a)
union_masksB = self.compute_mask_union(names_b)
new_mask = self._logical_intersect_masks(union_masksA, union_masksB)
self.add_mask(new_mask)
def _test_intersections(self, mask_name_list):
"""
Tests for intersections between the masks listed in mask_names
"""
mask_names = copy.copy(mask_name_list)
first_name = mask_names.pop()
data = self.masks[first_name].data
intersections_found = False
eight_bit = False
for mask_name in mask_names:
print 'adding %s' % mask_name
data2 = self.masks[mask_name].data
adder = vtk.vtkImageMathematics()
adder.SetOperationToAdd()
adder.SetInput1(data)
adder.SetInput2(data2)
adder.Update()
data = adder.GetOutput()
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(data)
accumulator.Update()
max = accumulator.GetMax()[0]
if max == 255:
eight_bit = True
elif max > 1:
intersections_found = True
else:
self._view_frame.dialog_info("No intersections found.\n(duplicate selections in A and B ignored).", "No intersections")
if eight_bit:
eight_bit_mask_names = ''
mask_names = copy.copy(mask_name_list)
for mask_name in mask_names:
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(self.masks[mask_name].data)
accumulator.Update()
if accumulator.GetMax()[0] == 255:
eight_bit_mask_names = '%s, "%s"' % (eight_bit_mask_names, mask_name)
eight_bit_mask_names = eight_bit_mask_names[2:] #Remove the first two characters for neat display purposes
self._view_frame.dialog_error("Masks should be binary. The following masks were found to be 8-bit:\n%s" % eight_bit_mask_names,"Non-binary mask found!")
elif intersections_found:
mask_name_pair_list = ''
mask_names = copy.copy(mask_name_list)
while len(mask_names) > 0:
name1 = mask_names.pop()
for name2 in mask_names:
adder = vtk.vtkImageMathematics()
adder.SetOperationToAdd()
adder.SetInput1(self.masks[name1].data)
adder.SetInput2(self.masks[name2].data)
adder.Update()
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(adder.GetOutput())
accumulator.Update()
if accumulator.GetMax()[0] == 2:
mask_name_pair_list = '%s,\n ("%s","%s")' % (mask_name_pair_list, name1, name2)
mask_name_pair_list = mask_name_pair_list[2:] #Remove the first two characters for neat display purposes
self._view_frame.dialog_exclaim("Intersections found between the following mask pairs:\n%s" % mask_name_pair_list,"Intersections found!")
def _test_dimensions(self, mask_names, msg):
"""
Tests whether the given masks have matching volumetric dimensions.
In practice mismatches can occur due to problems with feature generation algorithms (such as filtered backprojection)
"""
masks_by_dimensions = {}
masks_by_extent = {}
masks_by_whole_extent = {}
masks_by_spacing = {}
for mask_name in mask_names:
maskdata = self.masks[mask_name].data
dimensions = maskdata.GetDimensions()
spacing = maskdata.GetSpacing()
extent = maskdata.GetExtent()
whole_extent = maskdata.GetWholeExtent()
if not masks_by_dimensions.has_key(dimensions):
masks_by_dimensions[dimensions] = [str(mask_name)]
else:
masks_by_dimensions[dimensions].append(str(mask_name))
if not masks_by_spacing.has_key(spacing):
masks_by_spacing[spacing] = [str(mask_name)]
else:
masks_by_spacing[spacing].append(str(mask_name))
if not masks_by_extent.has_key(extent):
masks_by_extent[extent] = [str(mask_name)]
else:
masks_by_extent[extent].append(str(mask_name))
if not masks_by_whole_extent.has_key(whole_extent):
masks_by_whole_extent[whole_extent] = [str(mask_name)]
else:
masks_by_whole_extent[whole_extent].append(str(mask_name))
if len(masks_by_dimensions.keys()) == 1 and len(masks_by_spacing.keys()) == 1 and len(masks_by_extent.keys()) == 1 and len(masks_by_whole_extent.keys()):
dimension_report = '%s masks have the same dimensions, spacing, extent and whole extent:\n\n' % msg
dimensions = masks_by_dimensions.keys().pop()
dimension_report = '%s dimensions = %s\n' % (dimension_report, str(dimensions))
dimensions = masks_by_spacing.keys().pop()
dimension_report = '%s spacing = %s\n' % (dimension_report, str(dimensions))
dimensions = masks_by_extent.keys().pop()
dimension_report = '%s extent = %s\n' % (dimension_report, str(dimensions))
dimensions = masks_by_whole_extent.keys().pop()
dimension_report = '%s whole extent = %s\n' % (dimension_report, str(dimensions))
self._view_frame.dialog_info(dimension_report, 'No mismatches')
else:
dimension_report = '% masks possess %d unique sets of dimensions. See below:\n' % (msg, len(masks_by_dimensions))
for k in masks_by_dimensions.keys():
dimension_report = '%s\n%s => %s' % (dimension_report, str(k), str( masks_by_dimensions[k]))
dimension_report = '%s\n\n%d unique spacings with their defining masks:\n' % (dimension_report, len(masks_by_spacing))
for k in masks_by_spacing.keys():
dimension_report = '%s\n%s => %s' % (dimension_report, str(k), str( masks_by_spacing[k]))
dimension_report = '%s\n\n%d unique extents with their defining masks:\n' % (dimension_report, len(masks_by_extent))
for k in masks_by_extent.keys():
dimension_report = '%s\n%s => %s' % (dimension_report, str(k), str( masks_by_extent[k]))
dimension_report = '%s\n\n%d unique whole_extents with their defining masks:\n' % (dimension_report, len(masks_by_whole_extent))
for k in masks_by_whole_extent.keys():
dimension_report = '%s\n%s => %s' % (dimension_report, str(k), str( masks_by_whole_extent[k]))
self._view_frame.dialog_exclaim(dimension_report,"Mismatches found!")
def _handler_test_all_dimensions(self, event):
"""
Tests whether any of the loaded masks have mismatching volume dimensions
"""
if len(self.masks) < 2:
self._view_frame.dialog_info("At least 2 masks need to be loaded to compare dimensions!","Fewer than two masks loaded")
return
mask_names = self.masks.keys()
self._test_dimensions(mask_names, 'All')
def _handler_test_selected_dimensions(self, event):
"""
Tests the selected masks have mismatching volume dimensions
"""
if self.test_valid_mask_selection_multiple():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
mask_names = names_a.copy()
for name in names_b:
mask_names.add(name)
self._test_dimensions(mask_names, 'Selected')
def _handler_test_all_intersections(self, event):
"""
Tests whether there is an intersection between any of the loaded masks
"""
if len(self.masks) < 2:
self._view_frame.dialog_info("At least 2 masks need to be loaded to detect intersections!","Fewer than two masks loaded")
return
mask_names = self.masks.keys()
self._test_intersections(mask_names)
def _handler_test_selected_intersections(self, event):
"""
Tests whether there is an intersection between the selected masks
"""
if self.test_valid_mask_selection_multiple():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
mask_names = names_a.copy()
for name in names_b:
mask_names.add(name)
self._test_intersections(mask_names)
def compute_mask_union(self, mask_names_set):
'''Computes and returns the union of a set of masks, identified by a set of mask names.'''
mask_names = mask_names_set.copy() #To prevent changes to the passed set due to popping
united_mask = None
if len(mask_names) > 0:
mask_name = mask_names.pop()
united_mask = self.masks[mask_name]
for mask_name in mask_names:
united_mask = self._logical_unite_masks(united_mask, self.masks[mask_name])
return united_mask
def test_single_mask_selection(self):
selectionCountA = self._view_frame.list_ctrl_maskA.GetSelectedItemCount()
selectionCountB = self._view_frame.list_ctrl_maskB.GetSelectedItemCount()
if selectionCountA + selectionCountB == 0:
self._view_frame.dialog_info("No masks are selected in either column A or B.\nThis operation requires a single mask, either in A or B.","No masks selected - invalid operation")
return False
elif selectionCountA + selectionCountB > 1:
self._view_frame.dialog_info("Multiple masks are selected in columns A and/or B.\nThis operation requires a single mask, either in A or B (but not both).","Multiple masks selected - invalid operation")
return False
return True
def test_single_mask_pair_selection(self):
selectionCountA = self._view_frame.list_ctrl_maskA.GetSelectedItemCount()
selectionCountB = self._view_frame.list_ctrl_maskB.GetSelectedItemCount()
if selectionCountA == 0:
self._view_frame.dialog_info("No mask selected in column A.\nThis operation requires a single input each, for A and B.","Too few masks selected - invalid operation")
return False
if selectionCountB == 0:
self._view_frame.dialog_info("No mask selected in column B.\nThis operation requires a single input each, for A and B.","Too few masks selected - invalid operation")
return False
if selectionCountA > 1:
self._view_frame.dialog_info("Multiple masks are selected in column A.\nThis operation requires a single input each, for A and B.","Multiple maks selected - invalid operation")
return False
elif selectionCountB > 1:
self._view_frame.dialog_info("Multiple masks are selected in column B.\nThis operation requires a single input each, for A and B.","Multiple maks selected - invalid operation")
return False
return True
def test_valid_mask_selection_any(self, warn = True):
selectionCountA = self._view_frame.list_ctrl_maskA.GetSelectedItemCount()
selectionCountB = self._view_frame.list_ctrl_maskB.GetSelectedItemCount()
if selectionCountA == 0 and selectionCountB == 0:
if warn:
self._view_frame.dialog_info("No masks are selected.","No masks selected")
return False
return True
def test_valid_mask_selection_multiple(self, warn = True):
names = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
for name in names_b:
names.add(name)
if len(names) < 2:
if warn:
self._view_frame.dialog_info("Fewer than two unique masks selected.","Too few masks selected")
return False
return True
def test_valid_mask_selection_a_and_b(self, warn = True):
selectionCountA = self._view_frame.list_ctrl_maskA.GetSelectedItemCount()
selectionCountB = self._view_frame.list_ctrl_maskB.GetSelectedItemCount()
if selectionCountA == 0:
if warn:
self._view_frame.dialog_info("No mask is selected in column A.\nThis operation requires inputs A and B.","Mask A not defined")
return False
elif selectionCountB == 0:
if warn:
self._view_frame.dialog_info("No mask is selected in column B.\nThis operation requires inputs A and B.","Mask B not defined")
return False
return True
def test_valid_mask_selection_a(self, warn = True):
selection_count_a = self._view_frame.list_ctrl_maskA.GetSelectedItemCount()
if selection_count_a == 0:
if warn:
self._view_frame.dialog_info("This operation requires input from column A.","Mask A not defined")
return False
return True
def test_valid_mask_selection_b(self, warn = True):
selection_count_b = self._view_frame.list_ctrl_maskB.GetSelectedItemCount()
if selection_count_b == 0:
if warn:
self._view_frame.dialog_info("This operation requires input from column B.","Mask B not defined")
return False
return True
def _handler_close(self, event):
"Closes this program"
self.close()
def _handler_introspect(self, event):
self.miscObjectConfigure(self._view_frame, self, 'MaskComBinar')
def _handler_about(self, event):
self._view_frame.dialog_info("MaskComBinar:\nA tool for measuring and manipulating binary masks\n\nby Francois Malan","About MaskComBinar")
def render(self):
"""Method that calls Render() on the embedded RenderWindow.
Use this after having made changes to the scene.
"""
self._view_frame.render()
def _logical_unite_masks(self, maskA, maskB):
"""Returns logical addition of maskA and maskB => maskA OR maskB"""
if maskA == None:
return maskB
elif maskB == None:
return maskA
print 'Joining masks %s and %s' % (maskA.name, maskB.name)
logicOR = vtk.vtkImageLogic()
logicOR.SetOperationToOr()
logicOR.SetInput1(maskA.data)
logicOR.SetInput2(maskB.data)
logicOR.Update()
result = self._threshold_image(logicOR.GetOutput(), 1, 255)
return Mask('Merged','',result)
def _logical_intersect_masks(self, maskA, maskB):
if maskA == None or maskB == None:
return None
print 'Intersecting masks %s and %s' % (maskA.name, maskB.name)
logicAND = vtk.vtkImageLogic()
logicAND.SetOperationToAnd()
logicAND.SetInput1(maskA.data)
logicAND.SetInput2(maskB.data)
logicAND.Update()
result = self._threshold_image(logicAND.GetOutput(), 1, 255)
return Mask('Intersect','',result)
def _logical_subtract_masks(self, maskA, maskB):
"""Returns logical subtraction of maskB from maskA => maskA AND (NOT maskB)"""
if maskB == None:
return maskA
print 'Subtracting mask %s and %s' % (maskA.name, maskB.name)
logicNOT = vtk.vtkImageLogic()
logicNOT.SetOperationToNot()
logicNOT.SetInput1(maskB.data)
logicNOT.Update()
logicAND = vtk.vtkImageLogic()
logicAND.SetOperationToAnd()
logicAND.SetInput1(maskA.data)
logicAND.SetInput2(logicNOT.GetOutput())
logicAND.Update()
result = self._threshold_image(logicAND.GetOutput(), 1, 255)
return Mask('Diff','',result)
def _threshold_image(self, image, lower, upper):
"""Thresholds a VTK Image, returning a signed short mask with 1 inside and 0 outside [lower, upper]"""
thresholder = vtk.vtkImageThreshold()
thresholder.SetInput(image)
thresholder.ThresholdBetween(lower, upper)
thresholder.SetInValue(1)
thresholder.SetOutValue(0)
thresholder.SetOutputScalarTypeToUnsignedChar()
thresholder.Update()
result = vtk.vtkImageData()
result.DeepCopy(thresholder.GetOutput())
return result
def _split_disconnected_objects(self, mask_name):
#This is done by labelling the objects from large to small
#Convert to ITK
mask = self.masks[mask_name]
thresholder = vtk.vtkImageThreshold()
thresholder.SetInput(mask.data)
thresholder.ThresholdBetween(1, 9999)
thresholder.SetInValue(1)
thresholder.SetOutValue(0)
thresholder.SetOutputScalarTypeToShort()
thresholder.Update()
v2i = itk.VTKImageToImageFilter[itk.Image.SS3].New()
v2i.SetInput(thresholder.GetOutput())
ccf = itk.ConnectedComponentImageFilter.ISS3ISS3.New()
ccf.SetInput(v2i.GetOutput())
relabeller = itk.RelabelComponentImageFilter.ISS3ISS3.New()
relabeller.SetInput(ccf.GetOutput())
#convert back to VTK
i2v = itk.ImageToVTKImageFilter[itk.Image.SS3].New()
i2v.SetInput(relabeller.GetOutput())
i2v.Update()
labeled = i2v.GetOutput()
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(labeled)
accumulator.Update()
nr_of_components = accumulator.GetMax()[0]
print 'Found %d disconnected mask components' % nr_of_components
message = '%d disconnected components found.\nHow many do you want to accept (large to small)?' % nr_of_components
nr_to_process_str = self._view_frame.dialog_inputtext(message, 'Choose number of disconnected components', '1')[1]
try:
nr_to_process = int(nr_to_process_str)
except:
self._view_frame.dialog_error('Invalid numeric input: %s' % nr_to_process_str, "Invalid input")
return
if (nr_to_process < 0) or (nr_to_process > nr_of_components):
self._view_frame.dialog_error('Number must be between 1 and %d' % nr_of_components, "Invalid input")
return
print 'Saving the largest %d components to new masks' % nr_to_process
thresholder = vtk.vtkImageThreshold()
thresholder.SetInput(labeled)
thresholder.SetInValue(1)
thresholder.SetOutValue(0)
thresholder.SetOutputScalarTypeToUnsignedChar()
for i in range(1, nr_to_process+1):
thresholder.ThresholdBetween(i, i)
thresholder.Update()
mask_data = vtk.vtkImageData()
mask_data.DeepCopy(thresholder.GetOutput())
new_mask = Mask('comp_%d' % i,'',mask_data)
self.add_mask(new_mask) | nagyistoce/devide | modules/viewers/MaskComBinar.py | Python | bsd-3-clause | 62,014 |
# -*- test-case-name: twisted.conch.test.test_ckeygen -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation module for the `ckeygen` command.
"""
import sys, os, getpass, socket
if getpass.getpass == getpass.unix_getpass:
try:
import termios # hack around broken termios
termios.tcgetattr, termios.tcsetattr
except (ImportError, AttributeError):
sys.modules['termios'] = None
reload(getpass)
from twisted.conch.ssh import keys
from twisted.python import failure, filepath, log, usage, randbytes
class GeneralOptions(usage.Options):
synopsis = """Usage: ckeygen [options]
"""
longdesc = "ckeygen manipulates public/private keys in various ways."
optParameters = [['bits', 'b', 1024, 'Number of bits in the key to create.'],
['filename', 'f', None, 'Filename of the key file.'],
['type', 't', None, 'Specify type of key to create.'],
['comment', 'C', None, 'Provide new comment.'],
['newpass', 'N', None, 'Provide new passphrase.'],
['pass', 'P', None, 'Provide old passphrase.']]
optFlags = [['fingerprint', 'l', 'Show fingerprint of key file.'],
['changepass', 'p', 'Change passphrase of private key file.'],
['quiet', 'q', 'Quiet.'],
['no-passphrase', None, "Create the key with no passphrase."],
['showpub', 'y', 'Read private key file and print public key.']]
compData = usage.Completions(
optActions={"type": usage.CompleteList(["rsa", "dsa"])})
def run():
options = GeneralOptions()
try:
options.parseOptions(sys.argv[1:])
except usage.UsageError, u:
print 'ERROR: %s' % u
options.opt_help()
sys.exit(1)
log.discardLogs()
log.deferr = handleError # HACK
if options['type']:
if options['type'] == 'rsa':
generateRSAkey(options)
elif options['type'] == 'dsa':
generateDSAkey(options)
else:
sys.exit('Key type was %s, must be one of: rsa, dsa' % options['type'])
elif options['fingerprint']:
printFingerprint(options)
elif options['changepass']:
changePassPhrase(options)
elif options['showpub']:
displayPublicKey(options)
else:
options.opt_help()
sys.exit(1)
def handleError():
global exitStatus
exitStatus = 2
log.err(failure.Failure())
raise
def generateRSAkey(options):
from Crypto.PublicKey import RSA
print 'Generating public/private rsa key pair.'
key = RSA.generate(int(options['bits']), randbytes.secureRandom)
_saveKey(key, options)
def generateDSAkey(options):
from Crypto.PublicKey import DSA
print 'Generating public/private dsa key pair.'
key = DSA.generate(int(options['bits']), randbytes.secureRandom)
_saveKey(key, options)
def printFingerprint(options):
if not options['filename']:
filename = os.path.expanduser('~/.ssh/id_rsa')
options['filename'] = raw_input('Enter file in which the key is (%s): ' % filename)
if os.path.exists(options['filename']+'.pub'):
options['filename'] += '.pub'
try:
key = keys.Key.fromFile(options['filename'])
obj = key.keyObject
print '%s %s %s' % (
obj.size() + 1,
key.fingerprint(),
os.path.basename(options['filename']))
except:
sys.exit('bad key')
def changePassPhrase(options):
if not options['filename']:
filename = os.path.expanduser('~/.ssh/id_rsa')
options['filename'] = raw_input(
'Enter file in which the key is (%s): ' % filename)
try:
key = keys.Key.fromFile(options['filename']).keyObject
except keys.EncryptedKeyError as e:
# Raised if password not supplied for an encrypted key
if not options.get('pass'):
options['pass'] = getpass.getpass('Enter old passphrase: ')
try:
key = keys.Key.fromFile(
options['filename'], passphrase=options['pass']).keyObject
except keys.BadKeyError:
sys.exit('Could not change passphrase: old passphrase error')
except keys.EncryptedKeyError as e:
sys.exit('Could not change passphrase: %s' % (e,))
except keys.BadKeyError as e:
sys.exit('Could not change passphrase: %s' % (e,))
if not options.get('newpass'):
while 1:
p1 = getpass.getpass(
'Enter new passphrase (empty for no passphrase): ')
p2 = getpass.getpass('Enter same passphrase again: ')
if p1 == p2:
break
print 'Passphrases do not match. Try again.'
options['newpass'] = p1
try:
newkeydata = keys.Key(key).toString('openssh',
extra=options['newpass'])
except Exception as e:
sys.exit('Could not change passphrase: %s' % (e,))
try:
keys.Key.fromString(newkeydata, passphrase=options['newpass'])
except (keys.EncryptedKeyError, keys.BadKeyError) as e:
sys.exit('Could not change passphrase: %s' % (e,))
fd = open(options['filename'], 'w')
fd.write(newkeydata)
fd.close()
print 'Your identification has been saved with the new passphrase.'
def displayPublicKey(options):
if not options['filename']:
filename = os.path.expanduser('~/.ssh/id_rsa')
options['filename'] = raw_input('Enter file in which the key is (%s): ' % filename)
try:
key = keys.Key.fromFile(options['filename']).keyObject
except keys.EncryptedKeyError:
if not options.get('pass'):
options['pass'] = getpass.getpass('Enter passphrase: ')
key = keys.Key.fromFile(
options['filename'], passphrase = options['pass']).keyObject
print keys.Key(key).public().toString('openssh')
def _saveKey(key, options):
if not options['filename']:
kind = keys.objectType(key)
kind = {'ssh-rsa':'rsa','ssh-dss':'dsa'}[kind]
filename = os.path.expanduser('~/.ssh/id_%s'%kind)
options['filename'] = raw_input('Enter file in which to save the key (%s): '%filename).strip() or filename
if os.path.exists(options['filename']):
print '%s already exists.' % options['filename']
yn = raw_input('Overwrite (y/n)? ')
if yn[0].lower() != 'y':
sys.exit()
if options.get('no-passphrase'):
options['pass'] = b''
elif not options['pass']:
while 1:
p1 = getpass.getpass('Enter passphrase (empty for no passphrase): ')
p2 = getpass.getpass('Enter same passphrase again: ')
if p1 == p2:
break
print 'Passphrases do not match. Try again.'
options['pass'] = p1
keyObj = keys.Key(key)
comment = '%s@%s' % (getpass.getuser(), socket.gethostname())
filepath.FilePath(options['filename']).setContent(
keyObj.toString('openssh', options['pass']))
os.chmod(options['filename'], 33152)
filepath.FilePath(options['filename'] + '.pub').setContent(
keyObj.public().toString('openssh', comment))
print 'Your identification has been saved in %s' % options['filename']
print 'Your public key has been saved in %s.pub' % options['filename']
print 'The key fingerprint is:'
print keyObj.fingerprint()
if __name__ == '__main__':
run()
| bdh1011/wau | venv/lib/python2.7/site-packages/twisted/conch/scripts/ckeygen.py | Python | mit | 7,540 |
class fooexception(Exception):
def __init__(self, msg):
Exception.__init__(self)
print msg
def __init__(self):
Exception.__init__(self)
print "i am a fooexception"
data = 2
raise "foo"
raise "foo", data
# before
raise fooexception # on-line
# after
# before
raise fooexception, "bla" # on-line
# after
raise fooexception, [1, 2, 3]
raise fooexception, range(3)
raise fooexception, (1, 2, 3)
raise fooexception, (1, 2, 3), 1
# after
raise fooexception, (1, 2, 3), "foo" # on-line
# after
raise fooexception, (1, 2, 3), (1, 2, 3)
raise fooexception, (1, 2, 3), [1, 2, 3]
# after
raise fooexception, (1, 2, 3), range(1) # on-line
# after
raise fooexception, (1, 2, 3), (1 + 1)
raise
raise fooexception, (1, 2, 3), 1 + 1
raise | aptana/Pydev | tests/org.python.pydev.refactoring.tests/src/python/rewriter/testRaise.py | Python | epl-1.0 | 769 |
import itertools
from zope.interface import implements
from allmydata.interfaces import IDownloadStatus
class ReadEvent:
def __init__(self, ev, ds):
self._ev = ev
self._ds = ds
def update(self, bytes, decrypttime, pausetime):
self._ev["bytes_returned"] += bytes
self._ev["decrypt_time"] += decrypttime
self._ev["paused_time"] += pausetime
def finished(self, finishtime):
self._ev["finish_time"] = finishtime
self._ds.update_last_timestamp(finishtime)
class SegmentEvent:
def __init__(self, ev, ds):
self._ev = ev
self._ds = ds
def activate(self, when):
if self._ev["active_time"] is None:
self._ev["active_time"] = when
def deliver(self, when, start, length, decodetime):
assert self._ev["active_time"] is not None
self._ev["finish_time"] = when
self._ev["success"] = True
self._ev["decode_time"] = decodetime
self._ev["segment_start"] = start
self._ev["segment_length"] = length
self._ds.update_last_timestamp(when)
def error(self, when):
self._ev["finish_time"] = when
self._ev["success"] = False
self._ds.update_last_timestamp(when)
class DYHBEvent:
def __init__(self, ev, ds):
self._ev = ev
self._ds = ds
def error(self, when):
self._ev["finish_time"] = when
self._ev["success"] = False
self._ds.update_last_timestamp(when)
def finished(self, shnums, when):
self._ev["finish_time"] = when
self._ev["success"] = True
self._ev["response_shnums"] = shnums
self._ds.update_last_timestamp(when)
class BlockRequestEvent:
def __init__(self, ev, ds):
self._ev = ev
self._ds = ds
def finished(self, received, when):
self._ev["finish_time"] = when
self._ev["success"] = True
self._ev["response_length"] = received
self._ds.update_last_timestamp(when)
def error(self, when):
self._ev["finish_time"] = when
self._ev["success"] = False
self._ds.update_last_timestamp(when)
class DownloadStatus:
# There is one DownloadStatus for each CiphertextFileNode. The status
# object will keep track of all activity for that node.
implements(IDownloadStatus)
statusid_counter = itertools.count(0)
def __init__(self, storage_index, size):
self.storage_index = storage_index
self.size = size
self.counter = self.statusid_counter.next()
self.helper = False
self.first_timestamp = None
self.last_timestamp = None
# all four of these _events lists are sorted by start_time, because
# they are strictly append-only (some elements are later mutated in
# place, but none are removed or inserted in the middle).
# self.read_events tracks read() requests. It is a list of dicts,
# each with the following keys:
# start,length (of data requested)
# start_time
# finish_time (None until finished)
# bytes_returned (starts at 0, grows as segments are delivered)
# decrypt_time (time spent in decrypt, None for ciphertext-only reads)
# paused_time (time spent paused by client via pauseProducing)
self.read_events = []
# self.segment_events tracks segment requests and their resolution.
# It is a list of dicts:
# segment_number
# start_time
# active_time (None until work has begun)
# decode_time (time spent in decode, None until delievered)
# finish_time (None until resolved)
# success (None until resolved, then boolean)
# segment_start (file offset of first byte, None until delivered)
# segment_length (None until delivered)
self.segment_events = []
# self.dyhb_requests tracks "do you have a share" requests and
# responses. It is a list of dicts:
# server (instance of IServer)
# start_time
# success (None until resolved, then boolean)
# response_shnums (tuple, None until successful)
# finish_time (None until resolved)
self.dyhb_requests = []
# self.block_requests tracks share-data requests and responses. It is
# a list of dicts:
# server (instance of IServer)
# shnum,
# start,length, (of data requested)
# start_time
# finish_time (None until resolved)
# success (None until resolved, then bool)
# response_length (None until success)
self.block_requests = []
self.known_shares = [] # (server, shnum)
self.problems = []
self.misc_events = []
def add_misc_event(self, what, start, finish=None):
self.misc_events.append( {"what": what,
"start_time": start,
"finish_time": finish,
} )
def add_read_event(self, start, length, when):
if self.first_timestamp is None:
self.first_timestamp = when
r = { "start": start,
"length": length,
"start_time": when,
"finish_time": None,
"bytes_returned": 0,
"decrypt_time": 0,
"paused_time": 0,
}
self.read_events.append(r)
return ReadEvent(r, self)
def add_segment_request(self, segnum, when):
if self.first_timestamp is None:
self.first_timestamp = when
r = { "segment_number": segnum,
"start_time": when,
"active_time": None,
"finish_time": None,
"success": None,
"decode_time": None,
"segment_start": None,
"segment_length": None,
}
self.segment_events.append(r)
return SegmentEvent(r, self)
def add_dyhb_request(self, server, when):
r = { "server": server,
"start_time": when,
"success": None,
"response_shnums": None,
"finish_time": None,
}
self.dyhb_requests.append(r)
return DYHBEvent(r, self)
def add_block_request(self, server, shnum, start, length, when):
r = { "server": server,
"shnum": shnum,
"start": start,
"length": length,
"start_time": when,
"finish_time": None,
"success": None,
"response_length": None,
}
self.block_requests.append(r)
return BlockRequestEvent(r, self)
def update_last_timestamp(self, when):
if self.last_timestamp is None or when > self.last_timestamp:
self.last_timestamp = when
def add_known_share(self, server, shnum): # XXX use me
self.known_shares.append( (server, shnum) )
def add_problem(self, p):
self.problems.append(p)
# IDownloadStatus methods
def get_counter(self):
return self.counter
def get_storage_index(self):
return self.storage_index
def get_size(self):
return self.size
def get_status(self):
# mention all outstanding segment requests
outstanding = set()
errorful = set()
outstanding = set([s_ev["segment_number"]
for s_ev in self.segment_events
if s_ev["finish_time"] is None])
errorful = set([s_ev["segment_number"]
for s_ev in self.segment_events
if s_ev["success"] is False])
def join(segnums):
if len(segnums) == 1:
return "segment %s" % list(segnums)[0]
else:
return "segments %s" % (",".join([str(i)
for i in sorted(segnums)]))
error_s = ""
if errorful:
error_s = "; errors on %s" % join(errorful)
if outstanding:
s = "fetching %s" % join(outstanding)
else:
s = "idle"
return s + error_s
def get_progress(self):
# measure all read events that aren't completely done, return the
# total percentage complete for them
if not self.read_events:
return 0.0
total_outstanding, total_received = 0, 0
for r_ev in self.read_events:
if r_ev["finish_time"] is None:
total_outstanding += r_ev["length"]
total_received += r_ev["bytes_returned"]
# else ignore completed requests
if not total_outstanding:
return 1.0
return 1.0 * total_received / total_outstanding
def using_helper(self):
return False
def get_active(self):
# a download is considered active if it has at least one outstanding
# read() call
for r_ev in self.read_events:
if r_ev["finish_time"] is None:
return True
return False
def get_started(self):
return self.first_timestamp
def get_results(self):
return None # TODO
| pombredanne/tahoe-lafs | src/allmydata/immutable/downloader/status.py | Python | gpl-2.0 | 9,220 |
# -*- coding: utf-8 -*-
from ..internal.misc import json
from ..internal.MultiAccount import MultiAccount
class OverLoadMe(MultiAccount):
__name__ = "OverLoadMe"
__type__ = "account"
__version__ = "0.13"
__status__ = "testing"
__config__ = [("mh_mode", "all;listed;unlisted", "Filter hosters to use", "all"),
("mh_list", "str", "Hoster list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12)]
__description__ = """Over-Load.me account plugin"""
__license__ = "GPLv3"
__authors__ = [("marley", "marley@over-load.me")]
def grab_hosters(self, user, password, data):
html = self.load("https://api.over-load.me/hoster.php",
get={'auth': "0001-cb1f24dadb3aa487bda5afd3b76298935329be7700cd7-5329be77-00cf-1ca0135f"})
return [x for x in map(
str.strip, html.replace("\"", "").split(",")) if x]
def grab_info(self, user, password, data):
html = self.load("https://api.over-load.me/account.php",
get={'user': user,
'auth': password}).strip()
data = json.loads(html)
self.log_debug(data)
#: Check for premium
if data['membership'] == "Free":
return {'premium': False, 'validuntil': None, 'trafficleft': None}
else:
return {'premium': True,
'validuntil': data['expirationunix'],
'trafficleft': -1}
def signin(self, user, password, data):
html = self.load("https://api.over-load.me/account.php",
get={'user': user,
'auth': password}).strip()
data = json.loads(html)
if data['err'] == 1:
self.fail_login()
| Arno-Nymous/pyload | module/plugins/accounts/OverLoadMe.py | Python | gpl-3.0 | 1,824 |
from test import support
import unittest
import sys
# Skip test if nis module does not exist.
nis = support.import_module('nis')
class NisTests(unittest.TestCase):
def test_maps(self):
try:
maps = nis.maps()
except nis.error as msg:
# NIS is probably not active, so this test isn't useful
self.skipTest(str(msg))
try:
# On some systems, this map is only accessible to the
# super user
maps.remove("passwd.adjunct.byname")
except ValueError:
pass
done = 0
for nismap in maps:
mapping = nis.cat(nismap)
for k, v in mapping.items():
if not k:
continue
if nis.match(k, nismap) != v:
self.fail("NIS match failed for key `%s' in map `%s'" % (k, nismap))
else:
# just test the one key, otherwise this test could take a
# very long time
done = 1
break
if done:
break
if __name__ == '__main__':
unittest.main()
| Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/test/test_nis.py | Python | gpl-3.0 | 1,167 |
# -*- coding: utf-8 -*-
"""
Created on Fri July 6 11:04:03 2015
@author: boland
"""
import os
import datetime
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
from scipy import signal
from obspy import read
from scipy.signal import argrelextrema
from info_dataless import locs_from_dataless
from scipy import interpolate
from matplotlib.colors import LogNorm
import pickle
import fiona
from shapely import geometry
from shapely.geometry import asPolygon, Polygon
from math import sqrt, radians, cos, sin, asin
from info_dataless import locs_from_dataless
from descartes.patch import PolygonPatch
from matplotlib.colors import LogNorm
from scipy.spatial import ConvexHull
from scipy.cluster.vq import kmeans
from shapely.affinity import scale
from matplotlib.path import Path
from scipy.interpolate import griddata
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------------------------
class InShape:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. This class uses
the shapely module.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def shape_poly(self):
with fiona.open(self.boundary) as fiona_collection:
# In this case, we'll assume the shapefile only has one later
shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
self.polygon = geometry.asShape( shapefile_record['geometry'] )
return self.polygon
def point_check(self, coord):
"""
Function that takes a single (2,1) shape input, converts the points
into a shapely.geometry.Point object and then checks if the coord
is contained within the shapefile.
"""
self.polygon = self.shape_poly()
point = geometry.Point(coord[0], coord[1])
if self.polygon.contains(point):
return coord
def shape_bounds(self):
"""
Function that returns the bounding box coordinates xmin,xmax,ymin,ymax
"""
self.polygon = self.shape_poly()
return self.polygon.bounds
def shape_buffer(self, shape=None, size=1., res=1):
"""
Function that returns a new polygon of the larger buffered points.
Can import polygon into function if desired. Default is
self.shape_poly()
"""
if shape is None:
self.polygon = self.shape_poly()
return asPolygon(self.polygon.buffer(size, resolution=res)\
.exterior)
def extract_poly_coords(self, poly):
if poly.type == 'Polygon':
exterior_coords = poly.exterior.coords[:]
elif poly.type == 'MultiPolygon':
exterior_coords = []
for part in poly:
epc = np.asarray(self.extract_poly_coords(part)) # Recursive call
exterior_coords.append(epc)
else:
raise ValueError('Unhandled geometry type: ' + repr(poly.type))
return np.vstack(exterior_coords)
def external_coords(self, shape=None, buff=None, size=1., res=1):
"""
Function that returns the external coords of a buffered shapely
polygon. Note that shape variable input
MUST be a shapely Polygon object.
"""
if shape is not None and buff is not None:
poly = self.shape_buffer(shape=shape, size=size, res=res)
elif shape is not None:
poly = shape
else:
poly = self.shape_poly()
exterior_coords = self.extract_poly_coords(poly)
return exterior_coords
#------------------------------------------------------------------------------
# IMPORT PATHS TO MSEED FILES
#------------------------------------------------------------------------------
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
f, Pxx_spec = signal.welch(wave, fs, 'flattop', nperseg=1024, scaling='spectrum')
#plt.semilogy(f, np.sqrt(Pxx_spec))
if len(f) >= 256:
column = np.column_stack((f[:255], np.abs(np.sqrt(Pxx_spec)[:255])))
return column
else:
return 0.
# x = np.linspace(0, 10, 1000)
# f_interp = interp1d(np.sqrt(Pxx_spec),f, kind='cubic')
#x.reverse()
#y.reverse()
# print f_interp(x)
#f,np.sqrt(Pxx_spec),'o',
# plt.figure()
# plt.plot(x,f_interp(x),'-' )
# plt.show()
def paths_sort(path):
"""
Function defined for customised sorting of the abs_paths list
and will be used in conjunction with the sorted() built in python
function in order to produce file paths in chronological order.
"""
base_name = os.path.basename(path)
stat_name = base_name.split('.')[0]
date = base_name.split('.')[1]
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
return date, stat_name
except Exception as e:
a=4
def paths(folder_path, extension):
"""
Function that returns a list of desired absolute paths called abs_paths
of files that contains a given extension e.g. .txt should be entered as
folder_path, txt. This function will run recursively through and find
any and all files within this folder with that extension!
"""
abs_paths = []
for root, dirs, files in os.walk(folder_path):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1] == '.{}'.format(extension):
abs_paths.append(fullpath)
abs_paths = sorted(abs_paths, key=paths_sort)
return abs_paths
# import background shapefile location
shape_path = "/home/boland/Dropbox/University/UniMelb\
/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
# generate shape object
# Generate InShape class
SHAPE = InShape(shape_path)
# Create shapely polygon from imported shapefile
UNIQUE_SHAPE = SHAPE.shape_poly()
# set plotting limits for shapefile boundaries
lonmin, latmin, lonmax, latmax = SHAPE.shape_bounds()
print lonmin, latmin, lonmax, latmax
#lonmin, lonmax, latmin, latmax = SHAPE.plot_lims()
dataless_path = 'ALL_AUSTRALIA.870093.dataless'
stat_locs = locs_from_dataless(dataless_path)
#folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
extension = 'mseed'
paths_list = paths(folder_path, extension)
t0_total = datetime.datetime.now()
figs_counter = 0
pickle_file = '/storage/ANT/spectral_density/station_pds_maxima/\
S Network 2014/noise_info0_SNetwork2014.pickle'
f = open(name=pickle_file, mode='rb')
noise_info0 = pickle.load(f)
f.close()
# dump noise_info1
fig = plt.figure(figsize=(15,10), dpi=1000)
plt.title('Average Seismic Noise First Peak Maximum PDS\n S Network | 2014')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
patch = PolygonPatch(UNIQUE_SHAPE, facecolor='white',\
edgecolor='k', zorder=1)
ax = fig.add_subplot(111)
ax.add_patch(patch)
#create 5000 Random points distributed within the circle radius 100
x, y = noise_info0[:,0], noise_info0[:,1]
points = np.column_stack((x,y))
xmin, xmax = np.min(x), np.max(x)
ymin, ymax = np.min(y), np.max(y)
values = noise_info0[:,2]
#now we create a grid of values, interpolated from our random sample above
y = np.linspace(ymin, ymax, 100)
x = np.linspace(xmin, xmax, 100)
gridx, gridy = np.meshgrid(x, y)
heat_field = griddata(points, values, (gridx, gridy), method='cubic',fill_value=0)
print heat_field
heat_field = np.where(heat_field < 0, 1, heat_field)
heat_field = np.ma.masked_where(heat_field==0,heat_field)
plt.pcolor(gridx, gridy, heat_field,
cmap='rainbow',alpha=0.5, norm=LogNorm(vmin=100, vmax=3e4),
zorder=2)
plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
norm=LogNorm(vmin=100, vmax=3e4), s=35, cmap='rainbow', zorder=3)
#cmin, cmax = np.min(noise_info0[:,2]), np.max(noise_info0[:,2])
#sc = plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
# norm=LogNorm(vmin=100, vmax=3e4), s=50, cmap=cm, zorder=2)
col = plt.colorbar()
col.ax.set_ylabel('Maximum Power Density Spectrum (V RMS)')
ax.set_xlim(lonmin-0.05*abs(lonmax-lonmin), \
lonmax+0.05*abs(lonmax-lonmin))
ax.set_ylim(latmin-0.05*abs(latmax-latmin), \
latmax+0.05*abs(latmax-latmin))
fig.savefig('station_pds_maxima/check1.svg', format='SVG')
| boland1992/seissuite_iran | seissuite/spectrum/extrema_pickle.py | Python | gpl-3.0 | 9,300 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import connection
from django.db import models, migrations
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.management import update_all_contenttypes
def create_notifications(apps, schema_editor):
update_all_contenttypes(verbosity=0)
sql="""
INSERT INTO notifications_watched (object_id, created_date, content_type_id, user_id, project_id)
SELECT userstory_id AS object_id, now() AS created_date, {content_type_id} AS content_type_id, user_id, project_id
FROM userstories_userstory_watchers INNER JOIN userstories_userstory ON userstories_userstory_watchers.userstory_id = userstories_userstory.id""".format(content_type_id=ContentType.objects.get(model='userstory').id)
cursor = connection.cursor()
cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('notifications', '0004_watched'),
('userstories', '0009_remove_userstory_is_archived'),
]
operations = [
migrations.RunPython(create_notifications),
migrations.RemoveField(
model_name='userstory',
name='watchers',
),
]
| CMLL/taiga-back | taiga/projects/userstories/migrations/0010_remove_userstory_watchers.py | Python | agpl-3.0 | 1,220 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT, Logger, TestTimeout
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
# How many worker threads?
W_THREADS = 2
# Define oversize denial condition
OVERSIZE_CONDITION_NAME = "amqp:connection:forced"
OVERSIZE_CONDITION_DESC = "Message size exceeded"
#
# DISPATCH-975 Detect that an oversize message is blocked.
# These tests check basic blocking where the the sender is blocked by
# the ingress routers. It does not check compound blocking where
# oversize is allowed or denied by an ingress edge router but also
# denied by the uplink interior router.
class OversizeMessageTransferTest(MessagingHandler):
"""
This test connects a sender and a receiver. Then it tries to send _count_
number of messages of the given size through the router or router network.
With expect_block=True the ingress router should detect the sender's oversize
message and close the sender connection. The receiver may receive
aborted message indications but that is not guaranteed. If any aborted
messages are received then the count must be at most one.
The test is a success when the sender receives a connection error with
oversize indication and the receiver has not received too many aborts.
With expect_block=False sender messages should be received normally.
The test is a success when n_accepted == count.
"""
def __init__(self, sender_host, receiver_host, test_address,
message_size=100000, count=10, expect_block=True, print_to_console=False):
super(OversizeMessageTransferTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.test_address = test_address
self.msg_size = message_size
self.count = count
self.expect_block = expect_block
self.sender_conn = None
self.receiver_conn = None
self.error = None
self.sender = None
self.receiver = None
self.proxy = None
self.n_sent = 0
self.n_rcvd = 0
self.n_accepted = 0
self.n_rejected = 0
self.n_aborted = 0
self.n_connection_error = 0
self.shut_down = False
self.logger = Logger(title=("OversizeMessageTransferTest - %s" % (self.test_address)), print_to_console=print_to_console)
self.log_unhandled = False
def timeout(self):
self.error = "Timeout Expired: n_sent=%d n_rcvd=%d n_rejected=%d n_aborted=%d" % \
(self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted)
self.logger.log("self.timeout " + self.error)
self._shut_down_test()
def on_start(self, event):
self.logger.log("on_start")
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.logger.log("on_start: opening receiver connection to %s" % (self.receiver_host.addresses[0]))
self.receiver_conn = event.container.connect(self.receiver_host.addresses[0])
self.logger.log("on_start: opening sender connection to %s" % (self.sender_host.addresses[0]))
self.sender_conn = event.container.connect(self.sender_host.addresses[0])
self.logger.log("on_start: Creating receiver")
self.receiver = event.container.create_receiver(self.receiver_conn, self.test_address)
self.logger.log("on_start: Creating sender")
self.sender = event.container.create_sender(self.sender_conn, self.test_address)
self.logger.log("on_start: done")
def send(self):
while self.sender.credit > 0 and self.n_sent < self.count:
# construct message in indentifiable chunks
body_msg = ""
padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[self.n_sent % 30]
while len(body_msg) < self.msg_size:
chunk = "[%s:%d:%d" % (self.test_address, self.n_sent, len(body_msg))
padlen = 50 - len(chunk)
chunk += padchar * padlen
body_msg += chunk
if len(body_msg) > self.msg_size:
body_msg = body_msg[:self.msg_size]
self.logger.log("send. address:%s message:%d of %s length=%d" %
(self.test_address, self.n_sent, self.count, self.msg_size))
m = Message(body=body_msg)
self.sender.send(m)
self.n_sent += 1
def on_sendable(self, event):
if event.sender == self.sender:
self.logger.log("on_sendable")
self.send()
def on_message(self, event):
if self.expect_block:
# All messages should violate maxMessageSize.
# Receiving any is an error.
self.error = "Received a message. Expected to receive no messages."
self.logger.log(self.error)
self._shut_down_test()
else:
self.n_rcvd += 1
self.accept(event.delivery)
self._check_done()
def on_connection_remote_close(self, event):
if self.shut_down:
return
if event.connection == self.sender_conn:
if event.connection.remote_condition is not None:
if event.connection.remote_condition.name == OVERSIZE_CONDITION_NAME and \
event.connection.remote_condition.description == OVERSIZE_CONDITION_DESC:
self.logger.log("on_connection_remote_close: sender closed with correct condition")
self.n_connection_error += 1
self.sender_conn.close()
self.sender_conn = None
else:
# sender closed but for wrong reason
self.error = "sender close error: Expected name: %s, description: %s, but received name: %s, description: %s" % (
OVERSIZE_CONDITION_NAME, OVERSIZE_CONDITION_DESC,
event.connection.remote_condition.name, event.connection.remote_condition.description)
self.logger.log(self.error)
else:
self.error = "sender close error: Expected a remote_condition but there was none."
self.logger.log(self.error)
else:
# connection error but not for sender
self.error = "unexpected connection close error: wrong connection closed."
self.logger.log(self.error)
self._check_done()
def _shut_down_test(self):
self.shut_down = True
if self.timer:
self.timer.cancel()
self.timer = None
if self.sender:
self.sender.close()
self.sender = None
if self.receiver:
self.receiver.close()
self.receiver = None
if self.sender_conn:
self.sender_conn.close()
self.sender_conn = None
if self.receiver_conn:
self.receiver_conn.close()
self.receiver_conn = None
def _check_done(self):
current = ("check_done: sent=%d rcvd=%d rejected=%d aborted=%d connection_error:%d" %
(self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted, self.n_connection_error))
self.logger.log(current)
if self.error is not None:
self.logger.log("TEST FAIL")
self._shut_down_test()
else:
done = (self.n_connection_error == 1) \
if self.expect_block else \
(self.n_sent == self.count and self.n_rcvd == self.count)
if done:
self.logger.log("TEST DONE!!!")
# self.log_unhandled = True # verbose debugging
self._shut_down_test()
def on_rejected(self, event):
self.n_rejected += 1
if self.expect_block:
self.logger.log("on_rejected: entry")
self._check_done()
else:
self.error = "Unexpected on_reject"
self.logger.log(self.error)
self._check_done()
def on_aborted(self, event):
self.logger.log("on_aborted")
self.n_aborted += 1
self._check_done()
def on_error(self, event):
self.error = "Container error"
self.logger.log(self.error)
self._shut_down_test()
def on_unhandled(self, method, *args):
if self.log_unhandled:
self.logger.log("on_unhandled: method: %s, args: %s" % (method, args))
def run(self):
try:
Container(self).run()
except Exception as e:
self.error = "Container run exception: %s" % (e)
self.logger.log(self.error)
self.logger.dump()
# For the next test case define max sizes for each router.
# These are the configured maxMessageSize values
EA1_MAX_SIZE = 50000
INTA_MAX_SIZE = 100000
INTB_MAX_SIZE = 150000
EB1_MAX_SIZE = 200000
# DISPATCH-1645 S32 max size is chosen to expose signed 32-bit
# wraparound bug. Sizes with bit 31 set look negative when used as
# C 'int' and prevent any message from passing policy checks.
S32_MAX_SIZE = 2**31
# Interior routers enforce max size directly.
# Edge routers are also checked by the attached interior router.
# Block tests that use edge routers that send messages to the network must
# account for the fact that the attached interior router will apply
# another max size. These tests do not check against EB1 max for the
# sender if the receiver is on EA1, INTA, or INTB since INTB's max
# would kick an and cause a false positive.
# Tests that check for allowing near-max sizes use the minimum of
# the edge router's max and the attached interior router's max.
# The bytes-over and bytes-under max that should trigger allow or deny.
# Messages with content this much over should be blocked while
# messages with content this much under should be allowed.
# * client overhead is typically 16 bytes or so
# * interrouter overhead is much larger with annotations
OVER_UNDER = 200
class MaxMessageSizeBlockOversize(TestCase):
"""
verify that maxMessageSize blocks oversize messages
"""
@classmethod
def setUpClass(cls):
"""Start the router"""
super(MaxMessageSizeBlockOversize, cls).setUpClass()
def router(name, mode, max_size, extra):
config = [
('router', {'mode': mode,
'id': name,
'allowUnsettledMulticast': 'yes',
'workerThreads': W_THREADS}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('policy', {'maxConnections': 100, 'enableVhostPolicy': 'true', 'maxMessageSize': max_size, 'defaultVhost': '$default'}),
('vhost', {'hostname': '$default',
'allowUnknownUser': 'true',
'groups': {
'$default': {
'users': '*',
'maxConnections': 100,
'remoteHosts': '*',
'sources': '*',
'targets': '*',
'allowAnonymousSender': 'true',
'allowWaypointLinks': 'true',
'allowDynamicSource': 'true'
}
}
})
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
# configuration:
# two edge routers connected via 2 interior routers with max sizes
#
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# | 50,000| | 100,000 | | 150,000 | |200,000|
# +-------+ +---------+ +---------+ +-------+
#
# Note:
# * Messages whose senders connect to INT.A or INT.B are subject to max message size
# defined for the ingress router only.
# * Message whose senders connect to EA1 or EA2 are subject to max message size
# defined for the ingress router. If the message is forwarded through the
# connected interior router then the message is subject to another max message size
# defined by the interior router.
cls.routers = []
interrouter_port = cls.tester.get_port()
cls.INTA_edge_port = cls.tester.get_port()
cls.INTB_edge_port = cls.tester.get_port()
router('INT.A', 'interior', INTA_MAX_SIZE,
[('listener', {'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'edge', 'port': cls.INTA_edge_port})])
cls.INT_A = cls.routers[0]
cls.INT_A.listener = cls.INT_A.addresses[0]
router('INT.B', 'interior', INTB_MAX_SIZE,
[('connector', {'name': 'connectorToA',
'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'edge',
'port': cls.INTB_edge_port})])
cls.INT_B = cls.routers[1]
cls.INT_B.listener = cls.INT_B.addresses[0]
router('EA1', 'edge', EA1_MAX_SIZE,
[('listener', {'name': 'rc', 'role': 'route-container',
'port': cls.tester.get_port()}),
('connector', {'name': 'uplink', 'role': 'edge',
'port': cls.INTA_edge_port})])
cls.EA1 = cls.routers[2]
cls.EA1.listener = cls.EA1.addresses[0]
router('EB1', 'edge', EB1_MAX_SIZE,
[('connector', {'name': 'uplink',
'role': 'edge',
'port': cls.INTB_edge_port,
'maxFrameSize': 1024}),
('listener', {'name': 'rc', 'role': 'route-container',
'port': cls.tester.get_port()})])
cls.EB1 = cls.routers[3]
cls.EB1.listener = cls.EB1.addresses[0]
router('S32', 'standalone', S32_MAX_SIZE, [])
cls.S32 = cls.routers[4]
cls.S32.listener = cls.S32.addresses[0]
cls.INT_A.wait_router_connected('INT.B')
cls.INT_B.wait_router_connected('INT.A')
cls.EA1.wait_connectors()
cls.EB1.wait_connectors()
def test_40_block_oversize_INTA_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.INT_A,
"e40",
message_size=INTA_MAX_SIZE + OVER_UNDER,
expect_block=True,
print_to_console=False)
test.run()
if test.error is not None:
test.logger.log("test_40 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_41_block_oversize_INTA_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.INT_B,
"e41",
message_size=INTA_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_41 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_42_block_oversize_INTA_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.EA1,
"e42",
message_size=INTA_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_42 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_43_block_oversize_INTA_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.EB1,
"e43",
message_size=INTA_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_43 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_44_block_oversize_INTB_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.INT_A,
"e44",
message_size=INTB_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_44 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_45_block_oversize_INTB_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.INT_B,
"e45",
message_size=INTB_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_45 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_46_block_oversize_INTB_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.EA1,
"e46",
message_size=INTB_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_46 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_47_block_oversize_INTB_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.EB1,
"e47",
message_size=INTB_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_47 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_48_block_oversize_EA1_INTA(self):
if EA1_MAX_SIZE >= INTA_MAX_SIZE:
self.skipTest("EA1 sending to INT.A may be blocked by EA1 limit and also by INT.A limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.INT_A,
"e48",
message_size=EA1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_48 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_49_block_oversize_EA1_INTB(self):
if EA1_MAX_SIZE >= INTA_MAX_SIZE:
self.skipTest("EA1 sending to INT.B may be blocked by EA1 limit and also by INT.A limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.INT_B,
"e49",
message_size=EA1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_49 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4a_block_oversize_EA1_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.EA1,
"e4a",
message_size=EA1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4a test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4b_block_oversize_EA1_EB1(self):
if EA1_MAX_SIZE >= INTA_MAX_SIZE:
self.skipTest("EA1 sending to EB1 may be blocked by EA1 limit and also by INT.A limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.EB1,
"e4b",
message_size=EA1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4b test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4c_block_oversize_EB1_INTA(self):
if EB1_MAX_SIZE > INTB_MAX_SIZE:
self.skipTest("EB1 sending to INT.A may be blocked by EB1 limit and also by INT.B limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.INT_A,
"e4c",
message_size=EB1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4c test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4d_block_oversize_EB1_INTB(self):
if EB1_MAX_SIZE > INTB_MAX_SIZE:
self.skipTest("EB1 sending to INT.B may be blocked by EB1 limit and also by INT.B limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.INT_B,
"e4d",
message_size=EB1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4d test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4e_block_oversize_EB1_EA1(self):
if EB1_MAX_SIZE > INTB_MAX_SIZE:
self.skipTest("EB1 sending to EA1 may be blocked by EB1 limit and also by INT.B limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.EA1,
"e4e",
message_size=EB1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4e test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4f_block_oversize_EB1_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.EB1,
"e4f",
message_size=EB1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4f test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
#
# tests under maxMessageSize should not block
#
def test_50_allow_undersize_INTA_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.INT_A,
"e50",
message_size=INTA_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_50 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_51_allow_undersize_INTA_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.INT_B,
"e51",
message_size=INTA_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_51 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_52_allow_undersize_INTA_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.EA1,
"e52",
message_size=INTA_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_52 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_53_allow_undersize_INTA_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.EB1,
"e53",
message_size=INTA_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_53 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_54_allow_undersize_INTB_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.INT_A,
"e54",
message_size=INTB_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_54 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_55_allow_undersize_INTB_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.INT_B,
"e55",
message_size=INTB_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_55 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_56_allow_undersize_INTB_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.EA1,
"e56",
message_size=INTB_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_56 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_57_allow_undersize_INTB_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.EB1,
"e57",
message_size=INTB_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_57 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_58_allow_undersize_EA1_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.INT_A,
"e58",
message_size=min(EA1_MAX_SIZE, INTA_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_58 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_59_allow_undersize_EA1_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.INT_B,
"e59",
message_size=min(EA1_MAX_SIZE, INTA_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_59 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5a_allow_undersize_EA1_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.EA1,
"e5a",
message_size=min(EA1_MAX_SIZE, INTA_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5a test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5b_allow_undersize_EA1_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.EB1,
"e5b",
message_size=min(EA1_MAX_SIZE, INTA_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5b test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5c_allow_undersize_EB1_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.INT_A,
"e5c",
message_size=min(EB1_MAX_SIZE, INTB_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5c test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5d_allow_undersize_EB1_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.INT_B,
"e5d",
message_size=min(EB1_MAX_SIZE, INTB_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5d test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5e_allow_undersize_EB1_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.EA1,
"e5e",
message_size=min(EB1_MAX_SIZE, INTB_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5e test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5f_allow_undersize_EB1_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.EB1,
"e5f",
message_size=min(EB1_MAX_SIZE, INTB_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5f test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_s32_allow_gt_signed_32bit_max(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.S32,
MaxMessageSizeBlockOversize.S32,
"s32",
message_size=200,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_s32 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
if __name__ == '__main__':
unittest.main(main_module())
| ErnieAllen/qpid-dispatch | tests/system_tests_policy_oversize_basic.py | Python | apache-2.0 | 36,740 |
# Borrowed and modified from xbmcswift
import logging
import xbmc
from pulsar.addon import ADDON_ID
class XBMCHandler(logging.StreamHandler):
xbmc_levels = {
'DEBUG': 0,
'INFO': 2,
'WARNING': 3,
'ERROR': 4,
'LOGCRITICAL': 5,
}
def emit(self, record):
xbmc_level = self.xbmc_levels.get(record.levelname)
xbmc.log(self.format(record), xbmc_level)
def _get_logger():
logger = logging.getLogger(ADDON_ID)
logger.setLevel(logging.DEBUG)
handler = XBMCHandler()
handler.setFormatter(logging.Formatter('[%(name)s] %(message)s'))
logger.addHandler(handler)
return logger
log = _get_logger()
| steeve/plugin.video.pulsar | resources/site-packages/pulsar/logger.py | Python | bsd-3-clause | 680 |
# The MIT License (MIT)
#
# Copyright (c) 2013 Numenta, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from cept import Cept
from version import version as __version__
| ilblackdragon/nupic-hackathon-2014 | pycept/pycept/__init__.py | Python | mit | 1,184 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for large portions of L{twisted.mail}.
"""
import os
import errno
import shutil
import pickle
import StringIO
import email.message
import email.parser
import tempfile
import signal
import time
from hashlib import md5
from zope.interface.verify import verifyClass
from zope.interface import Interface, implementer
from twisted.trial import unittest
from twisted.mail import smtp
from twisted.mail import pop3
from twisted.names import dns
from twisted.internet import protocol
from twisted.internet import defer
from twisted.internet.defer import Deferred
from twisted.internet import reactor
from twisted.internet import interfaces
from twisted.internet import task
from twisted.internet.error import DNSLookupError, CannotListenError
from twisted.internet.error import ProcessDone, ProcessTerminated
from twisted.internet import address
from twisted.python import failure
from twisted.python.filepath import FilePath
from twisted.python import log
from twisted.mail.relaymanager import _AttemptManager
from twisted.test.proto_helpers import MemoryReactorClock, StringTransport
from twisted import mail
import twisted.mail.mail
import twisted.mail.maildir
import twisted.mail.relay
import twisted.mail.relaymanager
import twisted.mail.protocols
import twisted.mail.alias
from twisted.names.error import DNSNameError
from twisted.names.dns import RRHeader, Record_CNAME, Record_MX
from twisted import cred
import twisted.cred.credentials
import twisted.cred.checkers
import twisted.cred.portal
from twisted.test.proto_helpers import LineSendingProtocol
class DomainWithDefaultsTests(unittest.TestCase):
def testMethods(self):
d = dict([(x, x + 10) for x in range(10)])
d = mail.mail.DomainWithDefaultDict(d, 'Default')
self.assertEqual(len(d), 10)
self.assertEqual(list(iter(d)), range(10))
self.assertEqual(list(d.iterkeys()), list(iter(d)))
items = list(d.iteritems())
items.sort()
self.assertEqual(items, [(x, x + 10) for x in range(10)])
values = list(d.itervalues())
values.sort()
self.assertEqual(values, range(10, 20))
items = d.items()
items.sort()
self.assertEqual(items, [(x, x + 10) for x in range(10)])
values = d.values()
values.sort()
self.assertEqual(values, range(10, 20))
for x in range(10):
self.assertEqual(d[x], x + 10)
self.assertEqual(d.get(x), x + 10)
self.assertTrue(x in d)
del d[2], d[4], d[6]
self.assertEqual(len(d), 7)
self.assertEqual(d[2], 'Default')
self.assertEqual(d[4], 'Default')
self.assertEqual(d[6], 'Default')
d.update({'a': None, 'b': (), 'c': '*'})
self.assertEqual(len(d), 10)
self.assertEqual(d['a'], None)
self.assertEqual(d['b'], ())
self.assertEqual(d['c'], '*')
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(d.setdefault('key', 'value'), 'value')
self.assertEqual(d['key'], 'value')
self.assertEqual(d.popitem(), ('key', 'value'))
self.assertEqual(len(d), 0)
dcopy = d.copy()
self.assertEqual(d.domains, dcopy.domains)
self.assertEqual(d.default, dcopy.default)
def _stringificationTest(self, stringifier):
"""
Assert that the class name of a L{mail.mail.DomainWithDefaultDict}
instance and the string-formatted underlying domain dictionary both
appear in the string produced by the given string-returning function.
@type stringifier: one-argument callable
@param stringifier: either C{str} or C{repr}, to be used to get a
string to make assertions against.
"""
domain = mail.mail.DomainWithDefaultDict({}, 'Default')
self.assertIn(domain.__class__.__name__, stringifier(domain))
domain['key'] = 'value'
self.assertIn(str({'key': 'value'}), stringifier(domain))
def test_str(self):
"""
L{DomainWithDefaultDict.__str__} should return a string including
the class name and the domain mapping held by the instance.
"""
self._stringificationTest(str)
def test_repr(self):
"""
L{DomainWithDefaultDict.__repr__} should return a string including
the class name and the domain mapping held by the instance.
"""
self._stringificationTest(repr)
def test_has_keyDeprecation(self):
"""
has_key is now deprecated.
"""
sut = mail.mail.DomainWithDefaultDict({}, 'Default')
sut.has_key('anything')
message = (
'twisted.mail.mail.DomainWithDefaultDict.has_key was deprecated '
'in Twisted 16.3.0. Use the `in` keyword instead.'
)
warnings = self.flushWarnings(
[self.test_has_keyDeprecation])
self.assertEqual(1, len(warnings))
self.assertEqual(DeprecationWarning, warnings[0]['category'])
self.assertEqual(message, warnings[0]['message'])
class BounceTests(unittest.TestCase):
def setUp(self):
self.domain = mail.mail.BounceDomain()
def testExists(self):
self.assertRaises(smtp.AddressError, self.domain.exists, "any user")
def testRelay(self):
self.assertEqual(
self.domain.willRelay("random q emailer", "protocol"),
False
)
def testAddUser(self):
self.domain.addUser("bob", "password")
self.assertRaises(smtp.SMTPBadRcpt, self.domain.exists, "bob")
class BounceWithSMTPServerTests(unittest.TestCase):
"""
Tests for L{twisted.mail.mail.BounceDomain} with
L{twisted.mail.smtp.SMTPServer}.
"""
def test_rejected(self):
"""
Incoming emails to a SMTP server with L{twisted.mail.mail.BounceDomain}
are rejected.
"""
service = mail.mail.MailService()
domain = mail.mail.BounceDomain()
service.addDomain(b'foo.com', domain)
factory = mail.protocols.SMTPFactory(service)
protocol = factory.buildProtocol(None)
deliverer = mail.protocols.SMTPDomainDelivery(service, None, None)
protocol.delivery = deliverer
transport = StringTransport()
protocol.makeConnection(transport)
protocol.lineReceived(b'HELO baz.net')
protocol.lineReceived(b'MAIL FROM:<a@baz.net>')
protocol.lineReceived(b'RCPT TO:<any@foo.com>')
protocol.lineReceived(b'QUIT')
self.assertTrue(transport.disconnecting)
protocol.connectionLost(None)
self.assertEqual(transport.value().strip().split(b'\r\n')[-2],
b'550 Cannot receive for specified address')
class FileMessageTests(unittest.TestCase):
def setUp(self):
self.name = "fileMessage.testFile"
self.final = "final.fileMessage.testFile"
self.f = open(self.name, 'w')
self.fp = mail.mail.FileMessage(self.f, self.name, self.final)
def tearDown(self):
try:
self.f.close()
except:
pass
try:
os.remove(self.name)
except:
pass
try:
os.remove(self.final)
except:
pass
def testFinalName(self):
return self.fp.eomReceived().addCallback(self._cbFinalName)
def _cbFinalName(self, result):
self.assertEqual(result, self.final)
self.assertTrue(self.f.closed)
self.assertFalse(os.path.exists(self.name))
def testContents(self):
contents = "first line\nsecond line\nthird line\n"
for line in contents.splitlines():
self.fp.lineReceived(line)
self.fp.eomReceived()
with open(self.final) as f:
self.assertEqual(f.read(), contents)
def testInterrupted(self):
contents = "first line\nsecond line\n"
for line in contents.splitlines():
self.fp.lineReceived(line)
self.fp.connectionLost()
self.assertFalse(os.path.exists(self.name))
self.assertFalse(os.path.exists(self.final))
class MailServiceTests(unittest.TestCase):
def setUp(self):
self.service = mail.mail.MailService()
def testFactories(self):
f = self.service.getPOP3Factory()
self.assertTrue(isinstance(f, protocol.ServerFactory))
self.assertTrue(f.buildProtocol(('127.0.0.1', 12345)), pop3.POP3)
f = self.service.getSMTPFactory()
self.assertTrue(isinstance(f, protocol.ServerFactory))
self.assertTrue(f.buildProtocol(('127.0.0.1', 12345)), smtp.SMTP)
f = self.service.getESMTPFactory()
self.assertTrue(isinstance(f, protocol.ServerFactory))
self.assertTrue(f.buildProtocol(('127.0.0.1', 12345)), smtp.ESMTP)
def testPortals(self):
o1 = object()
o2 = object()
self.service.portals['domain'] = o1
self.service.portals[''] = o2
self.assertTrue(self.service.lookupPortal('domain') is o1)
self.assertTrue(self.service.defaultPortal() is o2)
class StringListMailboxTests(unittest.TestCase):
"""
Tests for L{StringListMailbox}, an in-memory only implementation of
L{pop3.IMailbox}.
"""
def test_listOneMessage(self):
"""
L{StringListMailbox.listMessages} returns the length of the message at
the offset into the mailbox passed to it.
"""
mailbox = mail.maildir.StringListMailbox(["abc", "ab", "a"])
self.assertEqual(mailbox.listMessages(0), 3)
self.assertEqual(mailbox.listMessages(1), 2)
self.assertEqual(mailbox.listMessages(2), 1)
def test_listAllMessages(self):
"""
L{StringListMailbox.listMessages} returns a list of the lengths of all
messages if not passed an index.
"""
mailbox = mail.maildir.StringListMailbox(["a", "abc", "ab"])
self.assertEqual(mailbox.listMessages(), [1, 3, 2])
def test_getMessage(self):
"""
L{StringListMailbox.getMessage} returns a file-like object from which
the contents of the message at the given offset into the mailbox can be
read.
"""
mailbox = mail.maildir.StringListMailbox(["foo", "real contents"])
self.assertEqual(mailbox.getMessage(1).read(), "real contents")
def test_getUidl(self):
"""
L{StringListMailbox.getUidl} returns a unique identifier for the
message at the given offset into the mailbox.
"""
mailbox = mail.maildir.StringListMailbox(["foo", "bar"])
self.assertNotEqual(mailbox.getUidl(0), mailbox.getUidl(1))
def test_deleteMessage(self):
"""
L{StringListMailbox.deleteMessage} marks a message for deletion causing
further requests for its length to return 0.
"""
mailbox = mail.maildir.StringListMailbox(["foo"])
mailbox.deleteMessage(0)
self.assertEqual(mailbox.listMessages(0), 0)
self.assertEqual(mailbox.listMessages(), [0])
def test_undeleteMessages(self):
"""
L{StringListMailbox.undeleteMessages} causes any messages marked for
deletion to be returned to their original state.
"""
mailbox = mail.maildir.StringListMailbox(["foo"])
mailbox.deleteMessage(0)
mailbox.undeleteMessages()
self.assertEqual(mailbox.listMessages(0), 3)
self.assertEqual(mailbox.listMessages(), [3])
def test_sync(self):
"""
L{StringListMailbox.sync} causes any messages as marked for deletion to
be permanently deleted.
"""
mailbox = mail.maildir.StringListMailbox(["foo"])
mailbox.deleteMessage(0)
mailbox.sync()
mailbox.undeleteMessages()
self.assertEqual(mailbox.listMessages(0), 0)
self.assertEqual(mailbox.listMessages(), [0])
class FailingMaildirMailboxAppendMessageTask(mail.maildir._MaildirMailboxAppendMessageTask):
_openstate = True
_writestate = True
_renamestate = True
def osopen(self, fn, attr, mode):
if self._openstate:
return os.open(fn, attr, mode)
else:
raise OSError(errno.EPERM, "Faked Permission Problem")
def oswrite(self, fh, data):
if self._writestate:
return os.write(fh, data)
else:
raise OSError(errno.ENOSPC, "Faked Space problem")
def osrename(self, oldname, newname):
if self._renamestate:
return os.rename(oldname, newname)
else:
raise OSError(errno.EPERM, "Faked Permission Problem")
class _AppendTestMixin(object):
"""
Mixin for L{MaildirMailbox.appendMessage} test cases which defines a helper
for serially appending multiple messages to a mailbox.
"""
def _appendMessages(self, mbox, messages):
"""
Deliver the given messages one at a time. Delivery is serialized to
guarantee a predictable order in the mailbox (overlapped message deliver
makes no guarantees about which message which appear first).
"""
results = []
def append():
for m in messages:
d = mbox.appendMessage(m)
d.addCallback(results.append)
yield d
d = task.cooperate(append()).whenDone()
d.addCallback(lambda ignored: results)
return d
class MaildirAppendStringTests(unittest.TestCase, _AppendTestMixin):
"""
Tests for L{MaildirMailbox.appendMessage} when invoked with a C{str}.
"""
def setUp(self):
self.d = self.mktemp()
mail.maildir.initializeMaildir(self.d)
def _append(self, ignored, mbox):
d = mbox.appendMessage('TEST')
return self.assertFailure(d, Exception)
def _setState(self, ignored, mbox, rename=None, write=None, open=None):
"""
Change the behavior of future C{rename}, C{write}, or C{open} calls made
by the mailbox C{mbox}.
@param rename: If not L{None}, a new value for the C{_renamestate}
attribute of the mailbox's append factory. The original value will
be restored at the end of the test.
@param write: Like C{rename}, but for the C{_writestate} attribute.
@param open: Like C{rename}, but for the C{_openstate} attribute.
"""
if rename is not None:
self.addCleanup(
setattr, mbox.AppendFactory, '_renamestate',
mbox.AppendFactory._renamestate)
mbox.AppendFactory._renamestate = rename
if write is not None:
self.addCleanup(
setattr, mbox.AppendFactory, '_writestate',
mbox.AppendFactory._writestate)
mbox.AppendFactory._writestate = write
if open is not None:
self.addCleanup(
setattr, mbox.AppendFactory, '_openstate',
mbox.AppendFactory._openstate)
mbox.AppendFactory._openstate = open
def test_append(self):
"""
L{MaildirMailbox.appendMessage} returns a L{Deferred} which fires when
the message has been added to the end of the mailbox.
"""
mbox = mail.maildir.MaildirMailbox(self.d)
mbox.AppendFactory = FailingMaildirMailboxAppendMessageTask
d = self._appendMessages(mbox, ["X" * i for i in range(1, 11)])
d.addCallback(self.assertEqual, [None] * 10)
d.addCallback(self._cbTestAppend, mbox)
return d
def _cbTestAppend(self, ignored, mbox):
"""
Check that the mailbox has the expected number (ten) of messages in it,
and that each has the expected contents, and that they are in the same
order as that in which they were appended.
"""
self.assertEqual(len(mbox.listMessages()), 10)
self.assertEqual(
[len(mbox.getMessage(i).read()) for i in range(10)],
range(1, 11))
# test in the right order: last to first error location.
self._setState(None, mbox, rename=False)
d = self._append(None, mbox)
d.addCallback(self._setState, mbox, rename=True, write=False)
d.addCallback(self._append, mbox)
d.addCallback(self._setState, mbox, write=True, open=False)
d.addCallback(self._append, mbox)
d.addCallback(self._setState, mbox, open=True)
return d
class MaildirAppendFileTests(unittest.TestCase, _AppendTestMixin):
"""
Tests for L{MaildirMailbox.appendMessage} when invoked with a C{str}.
"""
def setUp(self):
self.d = self.mktemp()
mail.maildir.initializeMaildir(self.d)
def test_append(self):
"""
L{MaildirMailbox.appendMessage} returns a L{Deferred} which fires when
the message has been added to the end of the mailbox.
"""
mbox = mail.maildir.MaildirMailbox(self.d)
messages = []
for i in xrange(1, 11):
temp = tempfile.TemporaryFile()
temp.write("X" * i)
temp.seek(0, 0)
messages.append(temp)
self.addCleanup(temp.close)
d = self._appendMessages(mbox, messages)
d.addCallback(self._cbTestAppend, mbox)
return d
def _cbTestAppend(self, result, mbox):
"""
Check that the mailbox has the expected number (ten) of messages in it,
and that each has the expected contents, and that they are in the same
order as that in which they were appended.
"""
self.assertEqual(len(mbox.listMessages()), 10)
self.assertEqual(
[len(mbox.getMessage(i).read()) for i in range(10)],
range(1, 11))
class MaildirTests(unittest.TestCase):
def setUp(self):
self.d = self.mktemp()
mail.maildir.initializeMaildir(self.d)
def tearDown(self):
shutil.rmtree(self.d)
def testInitializer(self):
d = self.d
trash = os.path.join(d, '.Trash')
self.assertTrue(os.path.exists(d) and os.path.isdir(d))
self.assertTrue(os.path.exists(os.path.join(d, 'new')))
self.assertTrue(os.path.exists(os.path.join(d, 'cur')))
self.assertTrue(os.path.exists(os.path.join(d, 'tmp')))
self.assertTrue(os.path.isdir(os.path.join(d, 'new')))
self.assertTrue(os.path.isdir(os.path.join(d, 'cur')))
self.assertTrue(os.path.isdir(os.path.join(d, 'tmp')))
self.assertTrue(os.path.exists(os.path.join(trash, 'new')))
self.assertTrue(os.path.exists(os.path.join(trash, 'cur')))
self.assertTrue(os.path.exists(os.path.join(trash, 'tmp')))
self.assertTrue(os.path.isdir(os.path.join(trash, 'new')))
self.assertTrue(os.path.isdir(os.path.join(trash, 'cur')))
self.assertTrue(os.path.isdir(os.path.join(trash, 'tmp')))
def test_nameGenerator(self):
"""
Each call to L{_MaildirNameGenerator.generate} returns a unique
string suitable for use as the basename of a new message file. The
names are ordered such that those generated earlier sort less than
those generated later.
"""
clock = task.Clock()
clock.advance(0.05)
generator = mail.maildir._MaildirNameGenerator(clock)
firstName = generator.generate()
clock.advance(0.05)
secondName = generator.generate()
self.assertTrue(firstName < secondName)
def test_mailbox(self):
"""
Exercise the methods of L{IMailbox} as implemented by
L{MaildirMailbox}.
"""
j = os.path.join
n = mail.maildir._generateMaildirName
msgs = [j(b, n()) for b in ('cur', 'new') for x in range(5)]
# Toss a few files into the mailbox
i = 1
for f in msgs:
with open(j(self.d, f), 'w') as fObj:
fObj.write('x' * i)
i = i + 1
mb = mail.maildir.MaildirMailbox(self.d)
self.assertEqual(mb.listMessages(), range(1, 11))
self.assertEqual(mb.listMessages(1), 2)
self.assertEqual(mb.listMessages(5), 6)
self.assertEqual(mb.getMessage(6).read(), 'x' * 7)
self.assertEqual(mb.getMessage(1).read(), 'x' * 2)
d = {}
for i in range(10):
u = mb.getUidl(i)
self.assertFalse(u in d)
d[u] = None
p, f = os.path.split(msgs[5])
mb.deleteMessage(5)
self.assertEqual(mb.listMessages(5), 0)
self.assertTrue(os.path.exists(j(self.d, '.Trash', 'cur', f)))
self.assertFalse(os.path.exists(j(self.d, msgs[5])))
mb.undeleteMessages()
self.assertEqual(mb.listMessages(5), 6)
self.assertFalse(os.path.exists(j(self.d, '.Trash', 'cur', f)))
self.assertTrue(os.path.exists(j(self.d, msgs[5])))
class AbstractMaildirDomainTests(unittest.TestCase):
"""
Tests for L{twisted.mail.maildir.AbstractMaildirDomain}.
"""
def test_interface(self):
"""
L{maildir.AbstractMaildirDomain} implements L{mail.IAliasableDomain}.
"""
verifyClass(mail.mail.IAliasableDomain,
mail.maildir.AbstractMaildirDomain)
class MaildirDirdbmDomainTests(unittest.TestCase):
"""
Tests for L{MaildirDirdbmDomain}.
"""
def setUp(self):
"""
Create a temporary L{MaildirDirdbmDomain} and parent
L{MailService} before running each test.
"""
self.P = self.mktemp()
self.S = mail.mail.MailService()
self.D = mail.maildir.MaildirDirdbmDomain(self.S, self.P)
def tearDown(self):
"""
Remove the temporary C{maildir} directory when the test has
finished.
"""
shutil.rmtree(self.P)
def test_addUser(self):
"""
L{MaildirDirdbmDomain.addUser} accepts a user and password
argument. It stores those in a C{dbm} dictionary
attribute and creates a directory for each user.
"""
toAdd = (('user1', 'pwd1'), ('user2', 'pwd2'), ('user3', 'pwd3'))
for (u, p) in toAdd:
self.D.addUser(u, p)
for (u, p) in toAdd:
self.assertTrue(u in self.D.dbm)
self.assertEqual(self.D.dbm[u], p)
self.assertTrue(os.path.exists(os.path.join(self.P, u)))
def test_credentials(self):
"""
L{MaildirDirdbmDomain.getCredentialsCheckers} initializes and
returns one L{ICredentialsChecker} checker by default.
"""
creds = self.D.getCredentialsCheckers()
self.assertEqual(len(creds), 1)
self.assertTrue(cred.checkers.ICredentialsChecker.providedBy(creds[0]))
self.assertTrue(cred.credentials.IUsernamePassword in creds[0].credentialInterfaces)
def test_requestAvatar(self):
"""
L{MaildirDirdbmDomain.requestAvatar} raises L{NotImplementedError}
unless it is supplied with an L{pop3.IMailbox} interface.
When called with an L{pop3.IMailbox}, it returns a 3-tuple
containing L{pop3.IMailbox}, an implementation of that interface
and a NOOP callable.
"""
class ISomething(Interface):
pass
self.D.addUser('user', 'password')
self.assertRaises(
NotImplementedError,
self.D.requestAvatar, 'user', None, ISomething
)
t = self.D.requestAvatar('user', None, pop3.IMailbox)
self.assertEqual(len(t), 3)
self.assertTrue(t[0] is pop3.IMailbox)
self.assertTrue(pop3.IMailbox.providedBy(t[1]))
t[2]()
def test_requestAvatarId(self):
"""
L{DirdbmDatabase.requestAvatarId} raises L{UnauthorizedLogin} if
supplied with invalid user credentials.
When called with valid credentials, L{requestAvatarId} returns
the username associated with the supplied credentials.
"""
self.D.addUser('user', 'password')
database = self.D.getCredentialsCheckers()[0]
creds = cred.credentials.UsernamePassword('user', 'wrong password')
self.assertRaises(
cred.error.UnauthorizedLogin,
database.requestAvatarId, creds
)
creds = cred.credentials.UsernamePassword('user', 'password')
self.assertEqual(database.requestAvatarId(creds), 'user')
def test_userDirectory(self):
"""
L{MaildirDirdbmDomain.userDirectory} is supplied with a user name
and returns the path to that user's maildir subdirectory.
Calling L{MaildirDirdbmDomain.userDirectory} with a
non-existent user returns the 'postmaster' directory if there
is a postmaster or returns L{None} if there is no postmaster.
"""
self.D.addUser('user', 'password')
self.assertEqual(self.D.userDirectory('user'),
os.path.join(self.D.root, 'user'))
self.D.postmaster = False
self.assertIdentical(self.D.userDirectory('nouser'), None)
self.D.postmaster = True
self.assertEqual(self.D.userDirectory('nouser'),
os.path.join(self.D.root, 'postmaster'))
@implementer(mail.mail.IAliasableDomain)
class StubAliasableDomain(object):
"""
Minimal testable implementation of IAliasableDomain.
"""
def exists(self, user):
"""
No test coverage for invocations of this method on domain objects,
so we just won't implement it.
"""
raise NotImplementedError()
def addUser(self, user, password):
"""
No test coverage for invocations of this method on domain objects,
so we just won't implement it.
"""
raise NotImplementedError()
def getCredentialsCheckers(self):
"""
This needs to succeed in order for other tests to complete
successfully, but we don't actually assert anything about its
behavior. Return an empty list. Sometime later we should return
something else and assert that a portal got set up properly.
"""
return []
def setAliasGroup(self, aliases):
"""
Just record the value so the test can check it later.
"""
self.aliasGroup = aliases
class ServiceDomainTests(unittest.TestCase):
def setUp(self):
self.S = mail.mail.MailService()
self.D = mail.protocols.DomainDeliveryBase(self.S, None)
self.D.service = self.S
self.D.protocolName = 'TEST'
self.D.host = 'hostname'
self.tmpdir = self.mktemp()
domain = mail.maildir.MaildirDirdbmDomain(self.S, self.tmpdir)
domain.addUser('user', 'password')
self.S.addDomain('test.domain', domain)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testAddAliasableDomain(self):
"""
Test that adding an IAliasableDomain to a mail service properly sets
up alias group references and such.
"""
aliases = object()
domain = StubAliasableDomain()
self.S.aliases = aliases
self.S.addDomain('example.com', domain)
self.assertIdentical(domain.aliasGroup, aliases)
def testReceivedHeader(self):
hdr = self.D.receivedHeader(
('remotehost', '123.232.101.234'),
smtp.Address('<someguy@someplace>'),
['user@host.name']
)
fp = StringIO.StringIO(hdr)
emailParser = email.parser.Parser()
m = emailParser.parse(fp)
self.assertEqual(len(m.items()), 1)
self.assertIn('Received', m)
def testValidateTo(self):
user = smtp.User('user@test.domain', 'helo', None, 'wherever@whatever')
return defer.maybeDeferred(self.D.validateTo, user
).addCallback(self._cbValidateTo
)
def _cbValidateTo(self, result):
self.assertTrue(callable(result))
def testValidateToBadUsername(self):
user = smtp.User('resu@test.domain', 'helo', None, 'wherever@whatever')
return self.assertFailure(
defer.maybeDeferred(self.D.validateTo, user),
smtp.SMTPBadRcpt)
def testValidateToBadDomain(self):
user = smtp.User('user@domain.test', 'helo', None, 'wherever@whatever')
return self.assertFailure(
defer.maybeDeferred(self.D.validateTo, user),
smtp.SMTPBadRcpt)
def testValidateFrom(self):
helo = ('hostname', '127.0.0.1')
origin = smtp.Address('<user@hostname>')
self.assertTrue(self.D.validateFrom(helo, origin) is origin)
helo = ('hostname', '1.2.3.4')
origin = smtp.Address('<user@hostname>')
self.assertTrue(self.D.validateFrom(helo, origin) is origin)
helo = ('hostname', '1.2.3.4')
origin = smtp.Address('<>')
self.assertTrue(self.D.validateFrom(helo, origin) is origin)
self.assertRaises(
smtp.SMTPBadSender,
self.D.validateFrom, None, origin
)
class VirtualPOP3Tests(unittest.TestCase):
def setUp(self):
self.tmpdir = self.mktemp()
self.S = mail.mail.MailService()
self.D = mail.maildir.MaildirDirdbmDomain(self.S, self.tmpdir)
self.D.addUser('user', 'password')
self.S.addDomain('test.domain', self.D)
portal = cred.portal.Portal(self.D)
map(portal.registerChecker, self.D.getCredentialsCheckers())
self.S.portals[''] = self.S.portals['test.domain'] = portal
self.P = mail.protocols.VirtualPOP3()
self.P.service = self.S
self.P.magic = '<unit test magic>'
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testAuthenticateAPOP(self):
resp = md5(self.P.magic + 'password').hexdigest()
return self.P.authenticateUserAPOP('user', resp
).addCallback(self._cbAuthenticateAPOP
)
def _cbAuthenticateAPOP(self, result):
self.assertEqual(len(result), 3)
self.assertEqual(result[0], pop3.IMailbox)
self.assertTrue(pop3.IMailbox.providedBy(result[1]))
result[2]()
def testAuthenticateIncorrectUserAPOP(self):
resp = md5(self.P.magic + 'password').hexdigest()
return self.assertFailure(
self.P.authenticateUserAPOP('resu', resp),
cred.error.UnauthorizedLogin)
def testAuthenticateIncorrectResponseAPOP(self):
resp = md5('wrong digest').hexdigest()
return self.assertFailure(
self.P.authenticateUserAPOP('user', resp),
cred.error.UnauthorizedLogin)
def testAuthenticatePASS(self):
return self.P.authenticateUserPASS('user', 'password'
).addCallback(self._cbAuthenticatePASS
)
def _cbAuthenticatePASS(self, result):
self.assertEqual(len(result), 3)
self.assertEqual(result[0], pop3.IMailbox)
self.assertTrue(pop3.IMailbox.providedBy(result[1]))
result[2]()
def testAuthenticateBadUserPASS(self):
return self.assertFailure(
self.P.authenticateUserPASS('resu', 'password'),
cred.error.UnauthorizedLogin)
def testAuthenticateBadPasswordPASS(self):
return self.assertFailure(
self.P.authenticateUserPASS('user', 'wrong password'),
cred.error.UnauthorizedLogin)
class empty(smtp.User):
def __init__(self):
pass
class RelayTests(unittest.TestCase):
def testExists(self):
service = mail.mail.MailService()
domain = mail.relay.DomainQueuer(service)
doRelay = [
address.UNIXAddress('/var/run/mail-relay'),
address.IPv4Address('TCP', '127.0.0.1', 12345),
]
dontRelay = [
address.IPv4Address('TCP', '192.168.2.1', 62),
address.IPv4Address('TCP', '1.2.3.4', 1943),
]
for peer in doRelay:
user = empty()
user.orig = 'user@host'
user.dest = 'tsoh@resu'
user.protocol = empty()
user.protocol.transport = empty()
user.protocol.transport.getPeer = lambda: peer
self.assertTrue(callable(domain.exists(user)))
for peer in dontRelay:
user = empty()
user.orig = 'some@place'
user.protocol = empty()
user.protocol.transport = empty()
user.protocol.transport.getPeer = lambda: peer
user.dest = 'who@cares'
self.assertRaises(smtp.SMTPBadRcpt, domain.exists, user)
class RelayerTests(unittest.TestCase):
def setUp(self):
self.tmpdir = self.mktemp()
os.mkdir(self.tmpdir)
self.messageFiles = []
for i in range(10):
name = os.path.join(self.tmpdir, 'body-%d' % (i,))
with open(name + '-H', 'w') as f:
pickle.dump(['from-%d' % (i,), 'to-%d' % (i,)], f)
f = open(name + '-D', 'w')
f.write(name)
f.seek(0, 0)
self.messageFiles.append(name)
self.R = mail.relay.RelayerMixin()
self.R.loadMessages(self.messageFiles)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testMailFrom(self):
for i in range(10):
self.assertEqual(self.R.getMailFrom(), 'from-%d' % (i,))
self.R.sentMail(250, None, None, None, None)
self.assertEqual(self.R.getMailFrom(), None)
def testMailTo(self):
for i in range(10):
self.assertEqual(self.R.getMailTo(), ['to-%d' % (i,)])
self.R.sentMail(250, None, None, None, None)
self.assertEqual(self.R.getMailTo(), None)
def testMailData(self):
for i in range(10):
name = os.path.join(self.tmpdir, 'body-%d' % (i,))
self.assertEqual(self.R.getMailData().read(), name)
self.R.sentMail(250, None, None, None, None)
self.assertEqual(self.R.getMailData(), None)
class Manager:
def __init__(self):
self.success = []
self.failure = []
self.done = []
def notifySuccess(self, factory, message):
self.success.append((factory, message))
def notifyFailure(self, factory, message):
self.failure.append((factory, message))
def notifyDone(self, factory):
self.done.append(factory)
class ManagedRelayerTests(unittest.TestCase):
def setUp(self):
self.manager = Manager()
self.messages = range(0, 20, 2)
self.factory = object()
self.relay = mail.relaymanager.ManagedRelayerMixin(self.manager)
self.relay.messages = self.messages[:]
self.relay.names = self.messages[:]
self.relay.factory = self.factory
def testSuccessfulSentMail(self):
for i in self.messages:
self.relay.sentMail(250, None, None, None, None)
self.assertEqual(
self.manager.success,
[(self.factory, m) for m in self.messages]
)
def testFailedSentMail(self):
for i in self.messages:
self.relay.sentMail(550, None, None, None, None)
self.assertEqual(
self.manager.failure,
[(self.factory, m) for m in self.messages]
)
def testConnectionLost(self):
self.relay.connectionLost(failure.Failure(Exception()))
self.assertEqual(self.manager.done, [self.factory])
class DirectoryQueueTests(unittest.TestCase):
def setUp(self):
# This is almost a test case itself.
self.tmpdir = self.mktemp()
os.mkdir(self.tmpdir)
self.queue = mail.relaymanager.Queue(self.tmpdir)
self.queue.noisy = False
for m in range(25):
hdrF, msgF = self.queue.createNewMessage()
with hdrF:
pickle.dump(['header', m], hdrF)
msgF.lineReceived('body: %d' % (m,))
msgF.eomReceived()
self.queue.readDirectory()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testWaiting(self):
self.assertTrue(self.queue.hasWaiting())
self.assertEqual(len(self.queue.getWaiting()), 25)
waiting = self.queue.getWaiting()
self.queue.setRelaying(waiting[0])
self.assertEqual(len(self.queue.getWaiting()), 24)
self.queue.setWaiting(waiting[0])
self.assertEqual(len(self.queue.getWaiting()), 25)
def testRelaying(self):
for m in self.queue.getWaiting():
self.queue.setRelaying(m)
self.assertEqual(
len(self.queue.getRelayed()),
25 - len(self.queue.getWaiting())
)
self.assertFalse(self.queue.hasWaiting())
relayed = self.queue.getRelayed()
self.queue.setWaiting(relayed[0])
self.assertEqual(len(self.queue.getWaiting()), 1)
self.assertEqual(len(self.queue.getRelayed()), 24)
def testDone(self):
msg = self.queue.getWaiting()[0]
self.queue.setRelaying(msg)
self.queue.done(msg)
self.assertEqual(len(self.queue.getWaiting()), 24)
self.assertEqual(len(self.queue.getRelayed()), 0)
self.assertFalse(msg in self.queue.getWaiting())
self.assertFalse(msg in self.queue.getRelayed())
def testEnvelope(self):
envelopes = []
for msg in self.queue.getWaiting():
envelopes.append(self.queue.getEnvelope(msg))
envelopes.sort()
for i in range(25):
self.assertEqual(
envelopes.pop(0),
['header', i]
)
from twisted.names import server
from twisted.names import client
from twisted.names import common
class TestAuthority(common.ResolverBase):
def __init__(self):
common.ResolverBase.__init__(self)
self.addresses = {}
def _lookup(self, name, cls, type, timeout = None):
if name in self.addresses and type == dns.MX:
results = []
for a in self.addresses[name]:
hdr = dns.RRHeader(
name, dns.MX, dns.IN, 60, dns.Record_MX(0, a)
)
results.append(hdr)
return defer.succeed((results, [], []))
return defer.fail(failure.Failure(dns.DomainError(name)))
def setUpDNS(self):
self.auth = TestAuthority()
factory = server.DNSServerFactory([self.auth])
protocol = dns.DNSDatagramProtocol(factory)
while 1:
self.port = reactor.listenTCP(0, factory, interface='127.0.0.1')
portNumber = self.port.getHost().port
try:
self.udpPort = reactor.listenUDP(portNumber, protocol, interface='127.0.0.1')
except CannotListenError:
self.port.stopListening()
else:
break
self.resolver = client.Resolver(servers=[('127.0.0.1', portNumber)])
def tearDownDNS(self):
dl = []
dl.append(defer.maybeDeferred(self.port.stopListening))
dl.append(defer.maybeDeferred(self.udpPort.stopListening))
try:
self.resolver._parseCall.cancel()
except:
pass
return defer.DeferredList(dl)
class MXTests(unittest.TestCase):
"""
Tests for L{mail.relaymanager.MXCalculator}.
"""
def setUp(self):
setUpDNS(self)
self.clock = task.Clock()
self.mx = mail.relaymanager.MXCalculator(self.resolver, self.clock)
def tearDown(self):
return tearDownDNS(self)
def test_defaultClock(self):
"""
L{MXCalculator}'s default clock is C{twisted.internet.reactor}.
"""
self.assertIdentical(
mail.relaymanager.MXCalculator(self.resolver).clock,
reactor)
def testSimpleSuccess(self):
self.auth.addresses['test.domain'] = ['the.email.test.domain']
return self.mx.getMX('test.domain').addCallback(self._cbSimpleSuccess)
def _cbSimpleSuccess(self, mx):
self.assertEqual(mx.preference, 0)
self.assertEqual(str(mx.name), 'the.email.test.domain')
def testSimpleFailure(self):
self.mx.fallbackToDomain = False
return self.assertFailure(self.mx.getMX('test.domain'), IOError)
def testSimpleFailureWithFallback(self):
return self.assertFailure(self.mx.getMX('test.domain'), DNSLookupError)
def _exchangeTest(self, domain, records, correctMailExchange):
"""
Issue an MX request for the given domain and arrange for it to be
responded to with the given records. Verify that the resulting mail
exchange is the indicated host.
@type domain: C{str}
@type records: C{list} of L{RRHeader}
@type correctMailExchange: C{str}
@rtype: L{Deferred}
"""
class DummyResolver(object):
def lookupMailExchange(self, name):
if name == domain:
return defer.succeed((
records,
[],
[]))
return defer.fail(DNSNameError(domain))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(domain)
def gotMailExchange(record):
self.assertEqual(str(record.name), correctMailExchange)
d.addCallback(gotMailExchange)
return d
def test_mailExchangePreference(self):
"""
The MX record with the lowest preference is returned by
L{MXCalculator.getMX}.
"""
domain = "example.com"
good = "good.example.com"
bad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, bad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, good)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(2, bad))]
return self._exchangeTest(domain, records, good)
def test_badExchangeExcluded(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
which is not also marked as bad.
"""
domain = "example.com"
good = "good.example.com"
bad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, bad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, good))]
self.mx.markBad(bad)
return self._exchangeTest(domain, records, good)
def test_fallbackForAllBadExchanges(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
if all the MX records in the response have been marked bad.
"""
domain = "example.com"
bad = "bad.example.com"
worse = "worse.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, bad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, worse))]
self.mx.markBad(bad)
self.mx.markBad(worse)
return self._exchangeTest(domain, records, bad)
def test_badExchangeExpires(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
if it was last marked bad longer than L{MXCalculator.timeOutBadMX}
seconds ago.
"""
domain = "example.com"
good = "good.example.com"
previouslyBad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, previouslyBad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, good))]
self.mx.markBad(previouslyBad)
self.clock.advance(self.mx.timeOutBadMX)
return self._exchangeTest(domain, records, previouslyBad)
def test_goodExchangeUsed(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
if it was marked good after it was marked bad.
"""
domain = "example.com"
good = "good.example.com"
previouslyBad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, previouslyBad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, good))]
self.mx.markBad(previouslyBad)
self.mx.markGood(previouslyBad)
self.clock.advance(self.mx.timeOutBadMX)
return self._exchangeTest(domain, records, previouslyBad)
def test_successWithoutResults(self):
"""
If an MX lookup succeeds but the result set is empty,
L{MXCalculator.getMX} should try to look up an I{A} record for the
requested name and call back its returned Deferred with that
address.
"""
ip = '1.2.3.4'
domain = 'example.org'
class DummyResolver(object):
"""
Fake resolver which will respond to an MX lookup with an empty
result set.
@ivar mx: A dictionary mapping hostnames to three-tuples of
results to be returned from I{MX} lookups.
@ivar a: A dictionary mapping hostnames to addresses to be
returned from I{A} lookups.
"""
mx = {domain: ([], [], [])}
a = {domain: ip}
def lookupMailExchange(self, domain):
return defer.succeed(self.mx[domain])
def getHostByName(self, domain):
return defer.succeed(self.a[domain])
self.mx.resolver = DummyResolver()
d = self.mx.getMX(domain)
d.addCallback(self.assertEqual, Record_MX(name=ip))
return d
def test_failureWithSuccessfulFallback(self):
"""
Test that if the MX record lookup fails, fallback is enabled, and an A
record is available for the name, then the Deferred returned by
L{MXCalculator.getMX} ultimately fires with a Record_MX instance which
gives the address in the A record for the name.
"""
class DummyResolver(object):
"""
Fake resolver which will fail an MX lookup but then succeed a
getHostByName call.
"""
def lookupMailExchange(self, domain):
return defer.fail(DNSNameError())
def getHostByName(self, domain):
return defer.succeed("1.2.3.4")
self.mx.resolver = DummyResolver()
d = self.mx.getMX("domain")
d.addCallback(self.assertEqual, Record_MX(name="1.2.3.4"))
return d
def test_cnameWithoutGlueRecords(self):
"""
If an MX lookup returns a single CNAME record as a result, MXCalculator
will perform an MX lookup for the canonical name indicated and return
the MX record which results.
"""
alias = "alias.example.com"
canonical = "canonical.example.com"
exchange = "mail.example.com"
class DummyResolver(object):
"""
Fake resolver which will return a CNAME for an MX lookup of a name
which is an alias and an MX for an MX lookup of the canonical name.
"""
def lookupMailExchange(self, domain):
if domain == alias:
return defer.succeed((
[RRHeader(name=domain,
type=Record_CNAME.TYPE,
payload=Record_CNAME(canonical))],
[], []))
elif domain == canonical:
return defer.succeed((
[RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, exchange))],
[], []))
else:
return defer.fail(DNSNameError(domain))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(alias)
d.addCallback(self.assertEqual, Record_MX(name=exchange))
return d
def test_cnameChain(self):
"""
If L{MXCalculator.getMX} encounters a CNAME chain which is longer than
the length specified, the returned L{Deferred} should errback with
L{CanonicalNameChainTooLong}.
"""
class DummyResolver(object):
"""
Fake resolver which generates a CNAME chain of infinite length in
response to MX lookups.
"""
chainCounter = 0
def lookupMailExchange(self, domain):
self.chainCounter += 1
name = 'x-%d.example.com' % (self.chainCounter,)
return defer.succeed((
[RRHeader(name=domain,
type=Record_CNAME.TYPE,
payload=Record_CNAME(name))],
[], []))
cnameLimit = 3
self.mx.resolver = DummyResolver()
d = self.mx.getMX("mail.example.com", cnameLimit)
self.assertFailure(
d, twisted.mail.relaymanager.CanonicalNameChainTooLong)
def cbChainTooLong(error):
self.assertEqual(error.args[0], Record_CNAME("x-%d.example.com" % (cnameLimit + 1,)))
self.assertEqual(self.mx.resolver.chainCounter, cnameLimit + 1)
d.addCallback(cbChainTooLong)
return d
def test_cnameWithGlueRecords(self):
"""
If an MX lookup returns a CNAME and the MX record for the CNAME, the
L{Deferred} returned by L{MXCalculator.getMX} should be called back
with the name from the MX record without further lookups being
attempted.
"""
lookedUp = []
alias = "alias.example.com"
canonical = "canonical.example.com"
exchange = "mail.example.com"
class DummyResolver(object):
def lookupMailExchange(self, domain):
if domain != alias or lookedUp:
# Don't give back any results for anything except the alias
# or on any request after the first.
return ([], [], [])
return defer.succeed((
[RRHeader(name=alias,
type=Record_CNAME.TYPE,
payload=Record_CNAME(canonical)),
RRHeader(name=canonical,
type=Record_MX.TYPE,
payload=Record_MX(name=exchange))],
[], []))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(alias)
d.addCallback(self.assertEqual, Record_MX(name=exchange))
return d
def test_cnameLoopWithGlueRecords(self):
"""
If an MX lookup returns two CNAME records which point to each other,
the loop should be detected and the L{Deferred} returned by
L{MXCalculator.getMX} should be errbacked with L{CanonicalNameLoop}.
"""
firstAlias = "cname1.example.com"
secondAlias = "cname2.example.com"
class DummyResolver(object):
def lookupMailExchange(self, domain):
return defer.succeed((
[RRHeader(name=firstAlias,
type=Record_CNAME.TYPE,
payload=Record_CNAME(secondAlias)),
RRHeader(name=secondAlias,
type=Record_CNAME.TYPE,
payload=Record_CNAME(firstAlias))],
[], []))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(firstAlias)
self.assertFailure(d, twisted.mail.relaymanager.CanonicalNameLoop)
return d
def testManyRecords(self):
self.auth.addresses['test.domain'] = [
'mx1.test.domain', 'mx2.test.domain', 'mx3.test.domain'
]
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsSuccessfulLookup
)
def _cbManyRecordsSuccessfulLookup(self, mx):
self.assertTrue(str(mx.name).split('.', 1)[0] in ('mx1', 'mx2', 'mx3'))
self.mx.markBad(str(mx.name))
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsDifferentResult, mx
)
def _cbManyRecordsDifferentResult(self, nextMX, mx):
self.assertNotEqual(str(mx.name), str(nextMX.name))
self.mx.markBad(str(nextMX.name))
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsLastResult, mx, nextMX
)
def _cbManyRecordsLastResult(self, lastMX, mx, nextMX):
self.assertNotEqual(str(mx.name), str(lastMX.name))
self.assertNotEqual(str(nextMX.name), str(lastMX.name))
self.mx.markBad(str(lastMX.name))
self.mx.markGood(str(nextMX.name))
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsRepeatSpecificResult, nextMX
)
def _cbManyRecordsRepeatSpecificResult(self, againMX, nextMX):
self.assertEqual(str(againMX.name), str(nextMX.name))
class LiveFireExerciseTests(unittest.TestCase):
if interfaces.IReactorUDP(reactor, None) is None:
skip = "UDP support is required to determining MX records"
def setUp(self):
setUpDNS(self)
self.tmpdirs = [
'domainDir', 'insertionDomain', 'insertionQueue',
'destinationDomain', 'destinationQueue'
]
def tearDown(self):
for d in self.tmpdirs:
if os.path.exists(d):
shutil.rmtree(d)
return tearDownDNS(self)
def testLocalDelivery(self):
service = mail.mail.MailService()
service.smtpPortal.registerChecker(cred.checkers.AllowAnonymousAccess())
domain = mail.maildir.MaildirDirdbmDomain(service, 'domainDir')
domain.addUser('user', 'password')
service.addDomain('test.domain', domain)
service.portals[''] = service.portals['test.domain']
map(service.portals[''].registerChecker, domain.getCredentialsCheckers())
service.setQueue(mail.relay.DomainQueuer(service))
f = service.getSMTPFactory()
self.smtpServer = reactor.listenTCP(0, f, interface='127.0.0.1')
client = LineSendingProtocol([
'HELO meson',
'MAIL FROM: <user@hostname>',
'RCPT TO: <user@test.domain>',
'DATA',
'This is the message',
'.',
'QUIT'
])
done = Deferred()
f = protocol.ClientFactory()
f.protocol = lambda: client
f.clientConnectionLost = lambda *args: done.callback(None)
reactor.connectTCP('127.0.0.1', self.smtpServer.getHost().port, f)
def finished(ign):
mbox = domain.requestAvatar('user', None, pop3.IMailbox)[1]
msg = mbox.getMessage(0).read()
self.assertNotEqual(msg.find('This is the message'), -1)
return self.smtpServer.stopListening()
done.addCallback(finished)
return done
def testRelayDelivery(self):
# Here is the service we will connect to and send mail from
insServ = mail.mail.MailService()
insServ.smtpPortal.registerChecker(cred.checkers.AllowAnonymousAccess())
domain = mail.maildir.MaildirDirdbmDomain(insServ, 'insertionDomain')
insServ.addDomain('insertion.domain', domain)
os.mkdir('insertionQueue')
insServ.setQueue(mail.relaymanager.Queue('insertionQueue'))
insServ.domains.setDefaultDomain(mail.relay.DomainQueuer(insServ))
manager = mail.relaymanager.SmartHostSMTPRelayingManager(insServ.queue)
manager.fArgs += ('test.identity.hostname',)
helper = mail.relaymanager.RelayStateHelper(manager, 1)
# Yoink! Now the internet obeys OUR every whim!
manager.mxcalc = mail.relaymanager.MXCalculator(self.resolver)
# And this is our whim.
self.auth.addresses['destination.domain'] = ['127.0.0.1']
f = insServ.getSMTPFactory()
self.insServer = reactor.listenTCP(0, f, interface='127.0.0.1')
# Here is the service the previous one will connect to for final
# delivery
destServ = mail.mail.MailService()
destServ.smtpPortal.registerChecker(cred.checkers.AllowAnonymousAccess())
domain = mail.maildir.MaildirDirdbmDomain(destServ, 'destinationDomain')
domain.addUser('user', 'password')
destServ.addDomain('destination.domain', domain)
os.mkdir('destinationQueue')
destServ.setQueue(mail.relaymanager.Queue('destinationQueue'))
helper = mail.relaymanager.RelayStateHelper(manager, 1)
helper.startService()
f = destServ.getSMTPFactory()
self.destServer = reactor.listenTCP(0, f, interface='127.0.0.1')
# Update the port number the *first* relay will connect to, because we can't use
# port 25
manager.PORT = self.destServer.getHost().port
client = LineSendingProtocol([
'HELO meson',
'MAIL FROM: <user@wherever>',
'RCPT TO: <user@destination.domain>',
'DATA',
'This is the message',
'.',
'QUIT'
])
done = Deferred()
f = protocol.ClientFactory()
f.protocol = lambda: client
f.clientConnectionLost = lambda *args: done.callback(None)
reactor.connectTCP('127.0.0.1', self.insServer.getHost().port, f)
def finished(ign):
# First part of the delivery is done. Poke the queue manually now
# so we don't have to wait for the queue to be flushed.
delivery = manager.checkState()
def delivered(ign):
mbox = domain.requestAvatar('user', None, pop3.IMailbox)[1]
msg = mbox.getMessage(0).read()
self.assertNotEqual(msg.find('This is the message'), -1)
self.insServer.stopListening()
self.destServer.stopListening()
helper.stopService()
delivery.addCallback(delivered)
return delivery
done.addCallback(finished)
return done
aliasFile = StringIO.StringIO("""\
# Here's a comment
# woop another one
testuser: address1,address2, address3,
continuation@address, |/bin/process/this
usertwo:thisaddress,thataddress, lastaddress
lastuser: :/includable, /filename, |/program, address
""")
class LineBufferMessage:
def __init__(self):
self.lines = []
self.eom = False
self.lost = False
def lineReceived(self, line):
self.lines.append(line)
def eomReceived(self):
self.eom = True
return defer.succeed('<Whatever>')
def connectionLost(self):
self.lost = True
class AliasTests(unittest.TestCase):
lines = [
'First line',
'Next line',
'',
'After a blank line',
'Last line'
]
def setUp(self):
aliasFile.seek(0)
def testHandle(self):
result = {}
lines = [
'user: another@host\n',
'nextuser: |/bin/program\n',
'user: me@again\n',
'moreusers: :/etc/include/filename\n',
'multiuser: first@host, second@host,last@anotherhost',
]
for l in lines:
mail.alias.handle(result, l, 'TestCase', None)
self.assertEqual(result['user'], ['another@host', 'me@again'])
self.assertEqual(result['nextuser'], ['|/bin/program'])
self.assertEqual(result['moreusers'], [':/etc/include/filename'])
self.assertEqual(result['multiuser'], ['first@host', 'second@host', 'last@anotherhost'])
def testFileLoader(self):
domains = {'': object()}
result = mail.alias.loadAliasFile(domains, fp=aliasFile)
self.assertEqual(len(result), 3)
group = result['testuser']
s = str(group)
for a in ('address1', 'address2', 'address3', 'continuation@address', '/bin/process/this'):
self.assertNotEqual(s.find(a), -1)
self.assertEqual(len(group), 5)
group = result['usertwo']
s = str(group)
for a in ('thisaddress', 'thataddress', 'lastaddress'):
self.assertNotEqual(s.find(a), -1)
self.assertEqual(len(group), 3)
group = result['lastuser']
s = str(group)
self.assertEqual(s.find('/includable'), -1)
for a in ('/filename', 'program', 'address'):
self.assertNotEqual(s.find(a), -1, '%s not found' % a)
self.assertEqual(len(group), 3)
def testMultiWrapper(self):
msgs = LineBufferMessage(), LineBufferMessage(), LineBufferMessage()
msg = mail.alias.MultiWrapper(msgs)
for L in self.lines:
msg.lineReceived(L)
return msg.eomReceived().addCallback(self._cbMultiWrapper, msgs)
def _cbMultiWrapper(self, ignored, msgs):
for m in msgs:
self.assertTrue(m.eom)
self.assertFalse(m.lost)
self.assertEqual(self.lines, m.lines)
def testFileAlias(self):
tmpfile = self.mktemp()
a = mail.alias.FileAlias(tmpfile, None, None)
m = a.createMessageReceiver()
for l in self.lines:
m.lineReceived(l)
return m.eomReceived().addCallback(self._cbTestFileAlias, tmpfile)
def _cbTestFileAlias(self, ignored, tmpfile):
with open(tmpfile) as f:
lines = f.readlines()
self.assertEqual([L[:-1] for L in lines], self.lines)
class DummyDomain(object):
"""
Test domain for L{AddressAliasTests}.
"""
def __init__(self, address):
self.address = address
def exists(self, user, memo=None):
"""
@returns: When a C{memo} is passed in this will raise a
L{smtp.SMTPBadRcpt} exception, otherwise a boolean
indicating if the C{user} and string version of
L{self.address} are equal or not.
@rtype: C{bool}
"""
if memo:
raise mail.smtp.SMTPBadRcpt('ham')
return lambda: user == str(self.address)
class AddressAliasTests(unittest.TestCase):
"""
Tests for L{twisted.mail.alias.AddressAlias}.
"""
def setUp(self):
"""
Setup an L{AddressAlias}.
"""
self.address = mail.smtp.Address('foo@bar')
domains = {self.address.domain: DummyDomain(self.address)}
self.alias = mail.alias.AddressAlias(self.address, domains,
self.address)
def test_createMessageReceiver(self):
"""
L{createMessageReceiever} calls C{exists()} on the domain object
which key matches the C{alias} passed to L{AddressAlias}.
"""
self.assertTrue(self.alias.createMessageReceiver())
def test_str(self):
"""
The string presentation of L{AddressAlias} includes the alias.
"""
self.assertEqual(str(self.alias), '<Address foo@bar>')
def test_resolve(self):
"""
L{resolve} will look for additional aliases when an C{aliasmap}
dictionary is passed, and returns L{None} if none were found.
"""
self.assertEqual(self.alias.resolve({self.address: 'bar'}), None)
def test_resolveWithoutAliasmap(self):
"""
L{resolve} returns L{None} when the alias could not be found in the
C{aliasmap} and no L{mail.smtp.User} with this alias exists either.
"""
self.assertEqual(self.alias.resolve({}), None)
class DummyProcess(object):
__slots__ = ['onEnd']
class MockProcessAlias(mail.alias.ProcessAlias):
"""
An alias processor that doesn't actually launch processes.
"""
def spawnProcess(self, proto, program, path):
"""
Don't spawn a process.
"""
class MockAliasGroup(mail.alias.AliasGroup):
"""
An alias group using C{MockProcessAlias}.
"""
processAliasFactory = MockProcessAlias
class StubProcess(object):
"""
Fake implementation of L{IProcessTransport}.
@ivar signals: A list of all the signals which have been sent to this fake
process.
"""
def __init__(self):
self.signals = []
def loseConnection(self):
"""
No-op implementation of disconnection.
"""
def signalProcess(self, signal):
"""
Record a signal sent to this process for later inspection.
"""
self.signals.append(signal)
class ProcessAliasTests(unittest.TestCase):
"""
Tests for alias resolution.
"""
if interfaces.IReactorProcess(reactor, None) is None:
skip = "IReactorProcess not supported"
lines = [
'First line',
'Next line',
'',
'After a blank line',
'Last line'
]
def exitStatus(self, code):
"""
Construct a status from the given exit code.
@type code: L{int} between 0 and 255 inclusive.
@param code: The exit status which the code will represent.
@rtype: L{int}
@return: A status integer for the given exit code.
"""
# /* Macros for constructing status values. */
# #define __W_EXITCODE(ret, sig) ((ret) << 8 | (sig))
status = (code << 8) | 0
# Sanity check
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(os.WEXITSTATUS(status), code)
self.assertFalse(os.WIFSIGNALED(status))
return status
def signalStatus(self, signal):
"""
Construct a status from the given signal.
@type signal: L{int} between 0 and 255 inclusive.
@param signal: The signal number which the status will represent.
@rtype: L{int}
@return: A status integer for the given signal.
"""
# /* If WIFSIGNALED(STATUS), the terminating signal. */
# #define __WTERMSIG(status) ((status) & 0x7f)
# /* Nonzero if STATUS indicates termination by a signal. */
# #define __WIFSIGNALED(status) \
# (((signed char) (((status) & 0x7f) + 1) >> 1) > 0)
status = signal
# Sanity check
self.assertTrue(os.WIFSIGNALED(status))
self.assertEqual(os.WTERMSIG(status), signal)
self.assertFalse(os.WIFEXITED(status))
return status
def setUp(self):
"""
Replace L{smtp.DNSNAME} with a well-known value.
"""
self.DNSNAME = smtp.DNSNAME
smtp.DNSNAME = ''
def tearDown(self):
"""
Restore the original value of L{smtp.DNSNAME}.
"""
smtp.DNSNAME = self.DNSNAME
def test_processAlias(self):
"""
Standard call to C{mail.alias.ProcessAlias}: check that the specified
script is called, and that the input is correctly transferred to it.
"""
sh = FilePath(self.mktemp())
sh.setContent("""\
#!/bin/sh
rm -f process.alias.out
while read i; do
echo $i >> process.alias.out
done""")
os.chmod(sh.path, 0o700)
a = mail.alias.ProcessAlias(sh.path, None, None)
m = a.createMessageReceiver()
for l in self.lines:
m.lineReceived(l)
def _cbProcessAlias(ignored):
with open('process.alias.out') as f:
lines = f.readlines()
self.assertEqual([L[:-1] for L in lines], self.lines)
return m.eomReceived().addCallback(_cbProcessAlias)
def test_processAliasTimeout(self):
"""
If the alias child process does not exit within a particular period of
time, the L{Deferred} returned by L{MessageWrapper.eomReceived} should
fail with L{ProcessAliasTimeout} and send the I{KILL} signal to the
child process..
"""
reactor = task.Clock()
transport = StubProcess()
proto = mail.alias.ProcessAliasProtocol()
proto.makeConnection(transport)
receiver = mail.alias.MessageWrapper(proto, None, reactor)
d = receiver.eomReceived()
reactor.advance(receiver.completionTimeout)
def timedOut(ignored):
self.assertEqual(transport.signals, ['KILL'])
# Now that it has been killed, disconnect the protocol associated
# with it.
proto.processEnded(
ProcessTerminated(self.signalStatus(signal.SIGKILL)))
self.assertFailure(d, mail.alias.ProcessAliasTimeout)
d.addCallback(timedOut)
return d
def test_earlyProcessTermination(self):
"""
If the process associated with an L{mail.alias.MessageWrapper} exits
before I{eomReceived} is called, the L{Deferred} returned by
I{eomReceived} should fail.
"""
transport = StubProcess()
protocol = mail.alias.ProcessAliasProtocol()
protocol.makeConnection(transport)
receiver = mail.alias.MessageWrapper(protocol, None, None)
protocol.processEnded(failure.Failure(ProcessDone(0)))
return self.assertFailure(receiver.eomReceived(), ProcessDone)
def _terminationTest(self, status):
"""
Verify that if the process associated with an
L{mail.alias.MessageWrapper} exits with the given status, the
L{Deferred} returned by I{eomReceived} fails with L{ProcessTerminated}.
"""
transport = StubProcess()
protocol = mail.alias.ProcessAliasProtocol()
protocol.makeConnection(transport)
receiver = mail.alias.MessageWrapper(protocol, None, None)
protocol.processEnded(
failure.Failure(ProcessTerminated(status)))
return self.assertFailure(receiver.eomReceived(), ProcessTerminated)
def test_errorProcessTermination(self):
"""
If the process associated with an L{mail.alias.MessageWrapper} exits
with a non-zero exit code, the L{Deferred} returned by I{eomReceived}
should fail.
"""
return self._terminationTest(self.exitStatus(1))
def test_signalProcessTermination(self):
"""
If the process associated with an L{mail.alias.MessageWrapper} exits
because it received a signal, the L{Deferred} returned by
I{eomReceived} should fail.
"""
return self._terminationTest(self.signalStatus(signal.SIGHUP))
def test_aliasResolution(self):
"""
Check that the C{resolve} method of alias processors produce the correct
set of objects:
- direct alias with L{mail.alias.AddressAlias} if a simple input is passed
- aliases in a file with L{mail.alias.FileWrapper} if an input in the format
'/file' is given
- aliases resulting of a process call wrapped by L{mail.alias.MessageWrapper}
if the format is '|process'
"""
aliases = {}
domain = {'': TestDomain(aliases, ['user1', 'user2', 'user3'])}
A1 = MockAliasGroup(['user1', '|echo', '/file'], domain, 'alias1')
A2 = MockAliasGroup(['user2', 'user3'], domain, 'alias2')
A3 = mail.alias.AddressAlias('alias1', domain, 'alias3')
aliases.update({
'alias1': A1,
'alias2': A2,
'alias3': A3,
})
res1 = A1.resolve(aliases)
r1 = map(str, res1.objs)
r1.sort()
expected = map(str, [
mail.alias.AddressAlias('user1', None, None),
mail.alias.MessageWrapper(DummyProcess(), 'echo'),
mail.alias.FileWrapper('/file'),
])
expected.sort()
self.assertEqual(r1, expected)
res2 = A2.resolve(aliases)
r2 = map(str, res2.objs)
r2.sort()
expected = map(str, [
mail.alias.AddressAlias('user2', None, None),
mail.alias.AddressAlias('user3', None, None)
])
expected.sort()
self.assertEqual(r2, expected)
res3 = A3.resolve(aliases)
r3 = map(str, res3.objs)
r3.sort()
expected = map(str, [
mail.alias.AddressAlias('user1', None, None),
mail.alias.MessageWrapper(DummyProcess(), 'echo'),
mail.alias.FileWrapper('/file'),
])
expected.sort()
self.assertEqual(r3, expected)
def test_cyclicAlias(self):
"""
Check that a cycle in alias resolution is correctly handled.
"""
aliases = {}
domain = {'': TestDomain(aliases, [])}
A1 = mail.alias.AddressAlias('alias2', domain, 'alias1')
A2 = mail.alias.AddressAlias('alias3', domain, 'alias2')
A3 = mail.alias.AddressAlias('alias1', domain, 'alias3')
aliases.update({
'alias1': A1,
'alias2': A2,
'alias3': A3
})
self.assertEqual(aliases['alias1'].resolve(aliases), None)
self.assertEqual(aliases['alias2'].resolve(aliases), None)
self.assertEqual(aliases['alias3'].resolve(aliases), None)
A4 = MockAliasGroup(['|echo', 'alias1'], domain, 'alias4')
aliases['alias4'] = A4
res = A4.resolve(aliases)
r = map(str, res.objs)
r.sort()
expected = map(str, [
mail.alias.MessageWrapper(DummyProcess(), 'echo')
])
expected.sort()
self.assertEqual(r, expected)
class TestDomain:
def __init__(self, aliases, users):
self.aliases = aliases
self.users = users
def exists(self, user, memo=None):
user = user.dest.local
if user in self.users:
return lambda: mail.alias.AddressAlias(user, None, None)
try:
a = self.aliases[user]
except:
raise smtp.SMTPBadRcpt(user)
else:
aliases = a.resolve(self.aliases, memo)
if aliases:
return lambda: aliases
raise smtp.SMTPBadRcpt(user)
class DummyQueue(object):
"""
A fake relay queue to use for testing.
This queue doesn't keep track of which messages are waiting to be relayed
or are in the process of being relayed.
@ivar directory: See L{__init__}.
"""
def __init__(self, directory):
"""
@type directory: L{bytes}
@param directory: The pathname of the directory holding messages in the
queue.
"""
self.directory = directory
def done(self, message):
"""
Remove a message from the queue.
@type message: L{bytes}
@param message: The base filename of a message.
"""
message = os.path.basename(message)
os.remove(self.getPath(message) + '-D')
os.remove(self.getPath(message) + '-H')
def getEnvelopeFile(self, message):
"""
Get the envelope file for a message in the queue.
@type message: L{bytes}
@param message: The base filename of a message.
@rtype: L{file}
@return: The envelope file for the message.
"""
return open(os.path.join(self.directory, message+'-H'), 'rb')
def getPath(self, message):
"""
Return the full base pathname of a message in the queue.
@type message: L{bytes}
@param message: The base filename of a message.
@rtype: L{bytes}
@return: The full base pathname of the message.
"""
return os.path.join(self.directory, message)
def createNewMessage(self):
"""
Create a new message in the queue.
@rtype: 2-L{tuple} of (E{1}) L{file}, (E{2}) L{FileMessage}
@return: The envelope file and a message receiver for a new message in
the queue.
"""
fname = "%s_%s" % (time.time(), id(self))
headerFile = open(os.path.join(self.directory, fname+'-H'), 'wb')
tempFilename = os.path.join(self.directory, fname+'-C')
finalFilename = os.path.join(self.directory, fname+'-D')
messageFile = open(tempFilename, 'wb')
return headerFile, mail.mail.FileMessage(messageFile, tempFilename,
finalFilename)
def setWaiting(self, message):
"""
Ignore the request to mark a message as waiting to be relayed.
@type message: L{bytes}
@param message: The base filename of a message.
"""
pass
class DummySmartHostSMTPRelayingManager(object):
"""
A fake smart host to use for testing.
@type managed: L{dict} of L{bytes} -> L{list} of
L{list} of L{bytes}
@ivar managed: A mapping of a string identifying a managed relayer to
filenames of messages the managed relayer is responsible for.
@ivar queue: See L{__init__}.
"""
def __init__(self, queue):
"""
Initialize the minimum necessary members of a smart host.
@type queue: L{DummyQueue}
@param queue: A queue that can be used for testing purposes.
"""
self.managed = {}
self.queue = queue
class _AttemptManagerTests(unittest.TestCase):
"""
Test the behavior of L{_AttemptManager}.
@type tmpdir: L{bytes}
@ivar tmpdir: The path to a temporary directory holding the message files.
@type reactor: L{MemoryReactorClock}
@ivar reactor: The reactor used for test purposes.
@type eventLog: L{None} or L{dict} of L{bytes} -> L{object}
@ivar eventLog: Information about the last informational log message
generated or none if no log message has been generated.
@type noisyAttemptMgr: L{_AttemptManager}
@ivar noisyAttemptMgr: An attempt manager which generates informational
log messages.
@type quietAttemptMgr: L{_AttemptManager}
@ivar quietAttemptMgr: An attempt manager which does not generate
informational log messages.
@type noisyMessage: L{bytes}
@ivar noisyMessage: The full base pathname of the message to be used with
the noisy attempt manager.
@type quietMessage: L{bytes}
@ivar quietMessage: The full base pathname of the message to be used with
the quiet.
"""
def setUp(self):
"""
Set up a temporary directory for the queue, attempt managers with the
noisy flag on and off, message files for use with each attempt manager,
and a reactor. Also, register to be notified when log messages are
generated.
"""
self.tmpdir = self.mktemp()
os.mkdir(self.tmpdir)
self.reactor = MemoryReactorClock()
self.eventLog = None
log.addObserver(self._logObserver)
self.noisyAttemptMgr = _AttemptManager(
DummySmartHostSMTPRelayingManager(DummyQueue(self.tmpdir)),
True, self.reactor)
self.quietAttemptMgr = _AttemptManager(
DummySmartHostSMTPRelayingManager(DummyQueue(self.tmpdir)),
False, self.reactor)
noisyBaseName = "noisyMessage"
quietBaseName = "quietMessage"
self.noisyMessage = os.path.join(self.tmpdir, noisyBaseName)
self.quietMessage = os.path.join(self.tmpdir, quietBaseName)
open(self.noisyMessage+'-D', "w").close()
open(self.quietMessage+'-D', "w").close()
self.noisyAttemptMgr.manager.managed['noisyRelayer'] = [
noisyBaseName]
self.quietAttemptMgr.manager.managed['quietRelayer'] = [
quietBaseName]
with open(self.noisyMessage+'-H', 'w') as envelope:
pickle.dump(['from-noisy@domain', 'to-noisy@domain'], envelope)
with open(self.quietMessage+'-H', 'w') as envelope:
pickle.dump(['from-quiet@domain', 'to-quiet@domain'], envelope)
def tearDown(self):
"""
Unregister for log events and remove the temporary directory.
"""
log.removeObserver(self._logObserver)
shutil.rmtree(self.tmpdir)
def _logObserver(self, eventDict):
"""
A log observer.
@type eventDict: L{dict} of L{bytes} -> L{object}
@param eventDict: Information about the last informational log message
generated.
"""
self.eventLog = eventDict
def test_initNoisyDefault(self):
"""
When an attempt manager is created without the noisy parameter, the
noisy instance variable should default to true.
"""
am = _AttemptManager(DummySmartHostSMTPRelayingManager(
DummyQueue(self.tmpdir)))
self.assertTrue(am.noisy)
def test_initNoisy(self):
"""
When an attempt manager is created with the noisy parameter set to
true, the noisy instance variable should be set to true.
"""
self.assertTrue(self.noisyAttemptMgr.noisy)
def test_initQuiet(self):
"""
When an attempt manager is created with the noisy parameter set to
false, the noisy instance variable should be set to false.
"""
self.assertFalse(self.quietAttemptMgr.noisy)
def test_initReactorDefault(self):
"""
When an attempt manager is created without the reactor parameter, the
reactor instance variable should default to the global reactor.
"""
am = _AttemptManager(DummySmartHostSMTPRelayingManager(
DummyQueue(self.tmpdir)))
self.assertEqual(am.reactor, reactor)
def test_initReactor(self):
"""
When an attempt manager is created with a reactor provided, the
reactor instance variable should default to that reactor.
"""
self.assertEqual(self.noisyAttemptMgr.reactor, self.reactor)
def test_notifySuccessNoisy(self):
"""
For an attempt manager with the noisy flag set, notifySuccess should
result in a log message.
"""
self.noisyAttemptMgr.notifySuccess('noisyRelayer', self.noisyMessage)
self.assertTrue(self.eventLog)
def test_notifySuccessQuiet(self):
"""
For an attempt manager with the noisy flag not set, notifySuccess
should result in no log message.
"""
self.quietAttemptMgr.notifySuccess('quietRelayer', self.quietMessage)
self.assertFalse(self.eventLog)
def test_notifyFailureNoisy(self):
"""
For an attempt manager with the noisy flag set, notifyFailure should
result in a log message.
"""
self.noisyAttemptMgr.notifyFailure('noisyRelayer', self.noisyMessage)
self.assertTrue(self.eventLog)
def test_notifyFailureQuiet(self):
"""
For an attempt manager with the noisy flag not set, notifyFailure
should result in no log message.
"""
self.quietAttemptMgr.notifyFailure('quietRelayer', self.quietMessage)
self.assertFalse(self.eventLog)
def test_notifyDoneNoisy(self):
"""
For an attempt manager with the noisy flag set, notifyDone should
result in a log message.
"""
self.noisyAttemptMgr.notifyDone('noisyRelayer')
self.assertTrue(self.eventLog)
def test_notifyDoneQuiet(self):
"""
For an attempt manager with the noisy flag not set, notifyDone
should result in no log message.
"""
self.quietAttemptMgr.notifyDone('quietRelayer')
self.assertFalse(self.eventLog)
def test_notifyNoConnectionNoisy(self):
"""
For an attempt manager with the noisy flag set, notifyNoConnection
should result in a log message.
"""
self.noisyAttemptMgr.notifyNoConnection('noisyRelayer')
self.assertTrue(self.eventLog)
self.reactor.advance(60)
def test_notifyNoConnectionQuiet(self):
"""
For an attempt manager with the noisy flag not set, notifyNoConnection
should result in no log message.
"""
self.quietAttemptMgr.notifyNoConnection('quietRelayer')
self.assertFalse(self.eventLog)
self.reactor.advance(60)
from twisted.python.runtime import platformType
import types
if platformType != "posix":
for o in locals().values():
if isinstance(o, (types.ClassType, type)) and issubclass(o, unittest.TestCase):
o.skip = "twisted.mail only works on posix"
| EricMuller/mynotes-backend | requirements/twisted/Twisted-17.1.0/src/twisted/mail/test/test_mail.py | Python | mit | 84,944 |
"""Test inter-conversion of different polynomial classes.
This tests the convert and cast methods of all the polynomial classes.
"""
from __future__ import division, absolute_import, print_function
import operator as op
from numbers import Number
import pytest
import numpy as np
from numpy.polynomial import (
Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE)
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
from numpy.compat import long
#
# fixtures
#
classes = (
Polynomial, Legendre, Chebyshev, Laguerre,
Hermite, HermiteE
)
classids = tuple(cls.__name__ for cls in classes)
@pytest.fixture(params=classes, ids=classids)
def Poly(request):
return request.param
#
# helper functions
#
random = np.random.random
def assert_poly_almost_equal(p1, p2, msg=""):
try:
assert_(np.all(p1.domain == p2.domain))
assert_(np.all(p1.window == p2.window))
assert_almost_equal(p1.coef, p2.coef)
except AssertionError:
msg = "Result: %s\nTarget: %s", (p1, p2)
raise AssertionError(msg)
#
# Test conversion methods that depend on combinations of two classes.
#
Poly1 = Poly
Poly2 = Poly
def test_conversion(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = p1.convert(kind=Poly2, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
def test_cast(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = Poly2.cast(p1, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
#
# test methods that depend on one class
#
def test_identity(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
x = np.linspace(d[0], d[1], 11)
p = Poly.identity(domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_almost_equal(p(x), x)
def test_basis(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.basis(5, domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_equal(p.coef, [0]*5 + [1])
def test_fromroots(Poly):
# check that requested roots are zeros of a polynomial
# of correct degree, domain, and window.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
r = random((5,))
p1 = Poly.fromroots(r, domain=d, window=w)
assert_equal(p1.degree(), len(r))
assert_equal(p1.domain, d)
assert_equal(p1.window, w)
assert_almost_equal(p1(r), 0)
# check that polynomial is monic
pdom = Polynomial.domain
pwin = Polynomial.window
p2 = Polynomial.cast(p1, domain=pdom, window=pwin)
assert_almost_equal(p2.coef[-1], 1)
def test_fit(Poly):
def f(x):
return x*(x - 1)*(x - 2)
x = np.linspace(0, 3)
y = f(x)
# check default value of domain and window
p = Poly.fit(x, y, 3)
assert_almost_equal(p.domain, [0, 3])
assert_almost_equal(p(x), y)
assert_equal(p.degree(), 3)
# check with given domains and window
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.fit(x, y, 3, domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
# check with class domain default
p = Poly.fit(x, y, 3, [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
p = Poly.fit(x, y, [0, 1, 2, 3], [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
# check that fit accepts weights.
w = np.zeros_like(x)
z = y + random(y.shape)*.25
w[::2] = 1
p1 = Poly.fit(x[::2], z[::2], 3)
p2 = Poly.fit(x, z, 3, w=w)
p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w)
assert_almost_equal(p1(x), p2(x))
assert_almost_equal(p2(x), p3(x))
def test_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(p1 == p1)
assert_(not p1 == p2)
assert_(not p1 == p3)
assert_(not p1 == p4)
def test_not_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(not p1 != p1)
assert_(p1 != p2)
assert_(p1 != p3)
assert_(p1 != p4)
def test_add(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 + p2
assert_poly_almost_equal(p2 + p1, p3)
assert_poly_almost_equal(p1 + c2, p3)
assert_poly_almost_equal(c2 + p1, p3)
assert_poly_almost_equal(p1 + tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) + p1, p3)
assert_poly_almost_equal(p1 + np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) + p1, p3)
assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.add, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.add, p1, Polynomial([0]))
def test_sub(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 - p2
assert_poly_almost_equal(p2 - p1, -p3)
assert_poly_almost_equal(p1 - c2, p3)
assert_poly_almost_equal(c2 - p1, -p3)
assert_poly_almost_equal(p1 - tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) - p1, -p3)
assert_poly_almost_equal(p1 - np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) - p1, -p3)
assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.sub, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.sub, p1, Polynomial([0]))
def test_mul(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 * p2
assert_poly_almost_equal(p2 * p1, p3)
assert_poly_almost_equal(p1 * c2, p3)
assert_poly_almost_equal(c2 * p1, p3)
assert_poly_almost_equal(p1 * tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) * p1, p3)
assert_poly_almost_equal(p1 * np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) * p1, p3)
assert_poly_almost_equal(p1 * 2, p1 * Poly([2]))
assert_poly_almost_equal(2 * p1, p1 * Poly([2]))
assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mul, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mul, p1, Polynomial([0]))
def test_floordiv(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 // p2, p1)
assert_poly_almost_equal(p4 // c2, p1)
assert_poly_almost_equal(c4 // p2, p1)
assert_poly_almost_equal(p4 // tuple(c2), p1)
assert_poly_almost_equal(tuple(c4) // p2, p1)
assert_poly_almost_equal(p4 // np.array(c2), p1)
assert_poly_almost_equal(np.array(c4) // p2, p1)
assert_poly_almost_equal(2 // p2, Poly([0]))
assert_poly_almost_equal(p2 // 2, 0.5*p2)
assert_raises(
TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(
TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.floordiv, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.floordiv, p1, Polynomial([0]))
def test_truediv(Poly):
# true division is valid only if the denominator is a Number and
# not a python bool.
p1 = Poly([1,2,3])
p2 = p1 * 5
for stype in np.ScalarType:
if not issubclass(stype, Number) or issubclass(stype, bool):
continue
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in (int, long, float):
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in [complex]:
s = stype(5, 0)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for s in [tuple(), list(), dict(), bool(), np.array([1])]:
assert_raises(TypeError, op.truediv, p2, s)
assert_raises(TypeError, op.truediv, s, p2)
for ptype in classes:
assert_raises(TypeError, op.truediv, p2, ptype(1))
def test_mod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 % p2, p3)
assert_poly_almost_equal(p4 % c2, p3)
assert_poly_almost_equal(c4 % p2, p3)
assert_poly_almost_equal(p4 % tuple(c2), p3)
assert_poly_almost_equal(tuple(c4) % p2, p3)
assert_poly_almost_equal(p4 % np.array(c2), p3)
assert_poly_almost_equal(np.array(c4) % p2, p3)
assert_poly_almost_equal(2 % p2, Poly([2]))
assert_poly_almost_equal(p2 % 2, Poly([0]))
assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mod, p1, Polynomial([0]))
def test_divmod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
quo, rem = divmod(p4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, c2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(c4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, tuple(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(tuple(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, np.array(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(np.array(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p2, 2)
assert_poly_almost_equal(quo, 0.5*p2)
assert_poly_almost_equal(rem, Poly([0]))
quo, rem = divmod(2, p2)
assert_poly_almost_equal(quo, Poly([0]))
assert_poly_almost_equal(rem, Poly([2]))
assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, divmod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, divmod, p1, Polynomial([0]))
def test_roots(Poly):
d = Poly.domain * 1.25 + .25
w = Poly.window
tgt = np.linspace(d[0], d[1], 5)
res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots())
assert_almost_equal(res, tgt)
# default domain and window
res = np.sort(Poly.fromroots(tgt).roots())
assert_almost_equal(res, tgt)
def test_degree(Poly):
p = Poly.basis(5)
assert_equal(p.degree(), 5)
def test_copy(Poly):
p1 = Poly.basis(5)
p2 = p1.copy()
assert_(p1 == p2)
assert_(p1 is not p2)
assert_(p1.coef is not p2.coef)
assert_(p1.domain is not p2.domain)
assert_(p1.window is not p2.window)
def test_integ(Poly):
P = Polynomial
# Check defaults
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
# Check with k
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(k=1))
p2 = P.cast(p0.integ(2, k=[1, 1]))
assert_poly_almost_equal(p1, P([1, 2, 3, 4]))
assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1]))
# Check with lbnd
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(lbnd=1))
p2 = P.cast(p0.integ(2, lbnd=1))
assert_poly_almost_equal(p1, P([-9, 2, 3, 4]))
assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1]))
# Check scaling
d = 2*Poly.domain
p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d)
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
def test_deriv(Poly):
# Check that the derivative is the inverse of integration. It is
# assumes that the integration has been checked elsewhere.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p1 = Poly([1, 2, 3], domain=d, window=w)
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
# default domain and window
p1 = Poly([1, 2, 3])
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
def test_linspace(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly([1, 2, 3], domain=d, window=w)
# check default domain
xtgt = np.linspace(d[0], d[1], 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20)
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
# check specified domain
xtgt = np.linspace(0, 2, 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20, domain=[0, 2])
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
def test_pow(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
tgt = Poly([1], domain=d, window=w)
tst = Poly([1, 2, 3], domain=d, window=w)
for i in range(5):
assert_poly_almost_equal(tst**i, tgt)
tgt = tgt * tst
# default domain and window
tgt = Poly([1])
tst = Poly([1, 2, 3])
for i in range(5):
assert_poly_almost_equal(tst**i, tgt)
tgt = tgt * tst
# check error for invalid powers
assert_raises(ValueError, op.pow, tgt, 1.5)
assert_raises(ValueError, op.pow, tgt, -1)
def test_call(Poly):
P = Polynomial
d = Poly.domain
x = np.linspace(d[0], d[1], 11)
# Check defaults
p = Poly.cast(P([1, 2, 3]))
tgt = 1 + x*(2 + 3*x)
res = p(x)
assert_almost_equal(res, tgt)
def test_cutdeg(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.cutdeg, .5)
assert_raises(ValueError, p.cutdeg, -1)
assert_equal(len(p.cutdeg(3)), 3)
assert_equal(len(p.cutdeg(2)), 3)
assert_equal(len(p.cutdeg(1)), 2)
assert_equal(len(p.cutdeg(0)), 1)
def test_truncate(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.truncate, .5)
assert_raises(ValueError, p.truncate, 0)
assert_equal(len(p.truncate(4)), 3)
assert_equal(len(p.truncate(3)), 3)
assert_equal(len(p.truncate(2)), 2)
assert_equal(len(p.truncate(1)), 1)
def test_trim(Poly):
c = [1, 1e-6, 1e-12, 0]
p = Poly(c)
assert_equal(p.trim().coef, c[:3])
assert_equal(p.trim(1e-10).coef, c[:2])
assert_equal(p.trim(1e-5).coef, c[:1])
def test_mapparms(Poly):
# check with defaults. Should be identity.
d = Poly.domain
w = Poly.window
p = Poly([1], domain=d, window=w)
assert_almost_equal([0, 1], p.mapparms())
#
w = 2*d + 1
p = Poly([1], domain=d, window=w)
assert_almost_equal([1, 2], p.mapparms())
def test_ufunc_override(Poly):
p = Poly([1, 2, 3])
x = np.ones(3)
assert_raises(TypeError, np.add, p, x)
assert_raises(TypeError, np.add, x, p)
class TestLatexRepr(object):
"""Test the latex repr used by ipython """
def as_latex(self, obj):
# right now we ignore the formatting of scalars in our tests, since
# it makes them too verbose. Ideally, the formatting of scalars will
# be fixed such that tests below continue to pass
obj._repr_latex_scalar = lambda x: str(x)
try:
return obj._repr_latex_()
finally:
del obj._repr_latex_scalar
def test_simple_polynomial(self):
# default input
p = Polynomial([1, 2, 3])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$')
# translated input
p = Polynomial([1, 2, 3], domain=[-2, 0])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$')
# scaled input
p = Polynomial([1, 2, 3], domain=[-0.5, 0.5])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$')
# affine input
p = Polynomial([1, 2, 3], domain=[-1, 0])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$')
def test_basis_func(self):
p = Chebyshev([1, 2, 3])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$')
# affine input - check no surplus parens are added
p = Chebyshev([1, 2, 3], domain=[-1, 0])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$')
def test_multichar_basis_func(self):
p = HermiteE([1, 2, 3])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$')
#
# Test class method that only exists for some classes
#
class TestInterpolate(object):
def f(self, x):
return x * (x - 1) * (x - 2)
def test_raises(self):
assert_raises(ValueError, Chebyshev.interpolate, self.f, -1)
assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.)
def test_dimensions(self):
for deg in range(1, 5):
assert_(Chebyshev.interpolate(self.f, deg).degree() == deg)
def test_approximation(self):
def powx(x, p):
return x**p
x = np.linspace(0, 2, 10)
for deg in range(0, 10):
for t in range(0, deg + 1):
p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,))
assert_almost_equal(p(x), powx(x, t), decimal=12)
| kubaszostak/gdal-dragndrop | osgeo/apps/Python27/Lib/site-packages/numpy/polynomial/tests/test_classes.py | Python | mit | 20,056 |
"""Prepare rendering of popular smart grid actions widget"""
from apps.widgets.smartgrid import smartgrid
def supply(request, page_name):
"""Supply view_objects content, which are the popular actions from the smart grid game."""
_ = request
num_results = 5 if page_name != "status" else None
#contruct a dictionary containing the most popular tasks.
#The keys are the type of the task and the values are a list of tasks."""
popular_tasks = {
"Activity": smartgrid.get_popular_actions("activity", "approved", num_results),
"Commitment": smartgrid.get_popular_actions("commitment", "approved", num_results),
"Event": smartgrid.get_popular_actions("event", "pending", num_results),
"Excursion": smartgrid.get_popular_actions("excursion", "pending", num_results),
}
count = len(popular_tasks)
return {
"popular_tasks": popular_tasks,
"no_carousel": page_name == "status",
"range": count,
}
| yongwen/makahiki | makahiki/apps/widgets/popular_tasks/views.py | Python | mit | 997 |
#!/usr/bin/env python
import string
import logging
from .HTMLElement import HTMLElement
log = logging.getLogger("Thug")
class TAnimateColor(HTMLElement):
def __init__(self, doc, tag):
self.doc = doc
self.tag = tag
self._values = ""
def get_values(self):
return self._values
def set_values(self, values):
if all(c in string.printable for c in values) is False:
log.ThugLogging.log_exploit_event(self.doc.window.url,
"Microsoft Internet Explorer",
"Microsoft Internet Explorer CButton Object Use-After-Free Vulnerability (CVE-2012-4792)",
cve = 'CVE-2012-4792',
forward = True)
log.DFT.check_shellcode(values)
self._values = values
values = property(get_values, set_values)
| tweemeterjop/thug | thug/DOM/W3C/HTML/TAnimateColor.py | Python | gpl-2.0 | 948 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# progressbar - Text progress bar library for Python.
# Copyright (c) 2005 Nilton Volpato
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''Default ProgressBar widgets'''
from __future__ import division, absolute_import, with_statement
import datetime
import math
import abc
class AbstractWidget(object):
__metaclass__ = abc.ABCMeta
def format_updatable(updatable, pbar):
if hasattr(updatable, 'update'):
return updatable.update(pbar)
else:
return updatable
class Widget(AbstractWidget):
'''The base class for all widgets
The ProgressBar will call the widget's update value when the widget should
be updated. The widget's size may change between calls, but the widget may
display incorrectly if the size changes drastically and repeatedly.
The boolean TIME_SENSITIVE informs the ProgressBar that it should be
updated more often because it is time sensitive.
'''
TIME_SENSITIVE = False
@abc.abstractmethod
def update(self, pbar):
'''Updates the widget.
pbar - a reference to the calling ProgressBar
'''
class WidgetHFill(Widget):
'''The base class for all variable width widgets.
This widget is much like the \\hfill command in TeX, it will expand to
fill the line. You can use more than one in the same line, and they will
all have the same width, and together will fill the line.
'''
@abc.abstractmethod
def update(self, pbar, width):
'''Updates the widget providing the total width the widget must fill.
pbar - a reference to the calling ProgressBar
width - The total width the widget must fill
'''
class Timer(Widget):
'Widget which displays the elapsed seconds.'
TIME_SENSITIVE = True
def __init__(self, format='Elapsed Time: %s'):
self.format = format
@staticmethod
def format_time(seconds):
'Formats time as the string "HH:MM:SS".'
return str(datetime.timedelta(seconds=int(seconds)))
def update(self, pbar):
'Updates the widget to show the elapsed time.'
return self.format % self.format_time(pbar.seconds_elapsed)
class ETA(Timer):
'Widget which attempts to estimate the time of arrival.'
TIME_SENSITIVE = True
def _eta(self, pbar):
elapsed = pbar.seconds_elapsed
return elapsed * pbar.maxval / pbar.currval - elapsed
def update(self, pbar):
'Updates the widget to show the ETA or total time when finished.'
if pbar.currval == 0:
return 'ETA: --:--:--'
elif pbar.finished:
return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
else:
return 'ETA: %s' % self.format_time(self._eta(pbar))
class AdaptiveETA(ETA):
'''Widget which attempts to estimate the time of arrival.
Uses a sampled average of the speed based on the 10 last updates.
Very convenient for resuming the progress halfway.
'''
TIME_SENSITIVE = True
def __init__(self, num_samples=10, **kwargs):
ETA.__init__(self, **kwargs)
self.num_samples = num_samples
self.samples = []
self.sample_vals = []
self.last_sample_val = None
def _eta(self, pbar):
samples = self.samples
sample_vals = self.sample_vals
if pbar.currval != self.last_sample_val:
# Update the last sample counter, we only update if currval has
# changed
self.last_sample_val = pbar.currval
# Add a sample but limit the size to `num_samples`
samples.append(pbar.seconds_elapsed)
sample_vals.append(pbar.currval)
if len(samples) > self.num_samples:
samples.pop(0)
sample_vals.pop(0)
if len(samples) <= 1:
# No samples so just return the normal ETA calculation
return ETA._eta(self, pbar)
todo = pbar.maxval - pbar.currval
items = sample_vals[-1] - sample_vals[0]
duration = float(samples[-1] - samples[0])
per_item = duration / items
return todo * per_item
class FileTransferSpeed(Widget):
'Widget for showing the transfer speed (useful for file transfers).'
format = '%6.2f %s%s/s'
prefixes = ' kMGTPEZY'
def __init__(self, unit='B'):
self.unit = unit
def _speed(self, pbar):
speed = pbar.currval / pbar.seconds_elapsed
power = int(math.log(speed, 1000))
scaled = speed / 1000. ** power
return scaled, power
def update(self, pbar):
'Updates the widget with the current SI prefixed speed.'
if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0
scaled = power = 0
else:
scaled, power = self._speed(pbar)
return self.format % (scaled, self.prefixes[power], self.unit)
class AdaptiveTransferSpeed(FileTransferSpeed):
'Widget for showing the transfer speed, based on the last X samples'
def __init__(self, num_samples=10):
FileTransferSpeed.__init__(self)
self.num_samples = num_samples
self.samples = []
self.sample_vals = []
self.last_sample_val = None
def _speed(self, pbar):
samples = self.samples
sample_vals = self.sample_vals
if pbar.currval != self.last_sample_val:
# Update the last sample counter, we only update if currval has
# changed
self.last_sample_val = pbar.currval
# Add a sample but limit the size to `num_samples`
samples.append(pbar.seconds_elapsed)
sample_vals.append(pbar.currval)
if len(samples) > self.num_samples:
samples.pop(0)
sample_vals.pop(0)
if len(samples) <= 1:
# No samples so just return the parent's calculation
return FileTransferSpeed._speed(self, pbar)
items = sample_vals[-1] - sample_vals[0]
duration = float(samples[-1] - samples[0])
speed = items / duration
power = int(math.log(speed, 1000))
scaled = speed / 1000. ** power
return scaled, power
class AnimatedMarker(Widget):
'''An animated marker for the progress bar which defaults to appear as if
it were rotating.
'''
def __init__(self, markers='|/-\\'):
self.markers = markers
self.curmark = -1
def update(self, pbar):
'''Updates the widget to show the next marker or the first marker when
finished'''
if pbar.finished:
return self.markers[0]
self.curmark = (self.curmark + 1) % len(self.markers)
return self.markers[self.curmark]
# Alias for backwards compatibility
RotatingMarker = AnimatedMarker
class Counter(Widget):
'Displays the current count'
def __init__(self, format='%d'):
self.format = format
def update(self, pbar):
return self.format % pbar.currval
class Percentage(Widget):
'Displays the current percentage as a number with a percent sign.'
def update(self, pbar):
return '%3d%%' % pbar.percentage()
class FormatLabel(Timer):
'Displays a formatted label'
mapping = {
'elapsed': ('seconds_elapsed', Timer.format_time),
'finished': ('finished', None),
'last_update': ('last_update_time', None),
'max': ('maxval', None),
'seconds': ('seconds_elapsed', None),
'start': ('start_time', None),
'value': ('currval', None)
}
def __init__(self, format):
self.format = format
def update(self, pbar):
context = {}
for name, (key, transform) in self.mapping.items():
try:
value = getattr(pbar, key)
if transform is None:
context[name] = value
else:
context[name] = transform(value)
except: # pragma: no cover
pass
return self.format % context
class SimpleProgress(Widget):
'Returns progress as a count of the total (e.g.: "5 of 47")'
def __init__(self, sep=' of '):
self.sep = sep
def update(self, pbar):
return '%d%s%d' % (pbar.currval, self.sep, pbar.maxval)
class Bar(WidgetHFill):
'A progress bar which stretches to fill the line.'
def __init__(self, marker='#', left='|', right='|', fill=' ',
fill_left=True):
'''Creates a customizable progress bar.
marker - string or updatable object to use as a marker
left - string or updatable object to use as a left border
right - string or updatable object to use as a right border
fill - character to use for the empty part of the progress bar
fill_left - whether to fill from the left or the right
'''
self.marker = marker
self.left = left
self.right = right
self.fill = fill
self.fill_left = fill_left
def update(self, pbar, width):
'Updates the progress bar and its subcomponents'
left, marked, right = (format_updatable(i, pbar) for i in
(self.left, self.marker, self.right))
width -= len(left) + len(right)
# Marked must *always* have length of 1
if pbar.maxval:
marked *= int(pbar.currval / pbar.maxval * width)
else: # pragma: no cover
marked = ''
if self.fill_left:
return '%s%s%s' % (left, marked.ljust(width, self.fill), right)
else:
return '%s%s%s' % (left, marked.rjust(width, self.fill), right)
class ReverseBar(Bar):
'A bar which has a marker which bounces from side to side.'
def __init__(self, marker='#', left='|', right='|', fill=' ',
fill_left=False):
'''Creates a customizable progress bar.
marker - string or updatable object to use as a marker
left - string or updatable object to use as a left border
right - string or updatable object to use as a right border
fill - character to use for the empty part of the progress bar
fill_left - whether to fill from the left or the right
'''
self.marker = marker
self.left = left
self.right = right
self.fill = fill
self.fill_left = fill_left
class BouncingBar(Bar):
def update(self, pbar, width):
'Updates the progress bar and its subcomponents'
left, marker, right = (format_updatable(i, pbar) for i in
(self.left, self.marker, self.right))
width -= len(left) + len(right)
if pbar.finished:
return '%s%s%s' % (left, width * marker, right)
position = int(pbar.currval % (width * 2 - 1))
if position > width:
position = width * 2 - position
lpad = self.fill * (position - 1)
rpad = self.fill * (width - len(marker) - len(lpad))
# Swap if we want to bounce the other way
if not self.fill_left:
rpad, lpad = lpad, rpad
return '%s%s%s%s%s' % (left, lpad, marker, rpad, right)
| cms-btv-pog/rootpy | rootpy/extern/progressbar/widgets.py | Python | gpl-3.0 | 11,900 |
from safe.engine.interpolation import make_circular_polygon
from safe.storage.core import read_layer
H = read_layer('/data_area/InaSAFE/public_data/hazard/Marapi.shp')
print H.get_geometry()
# Generate evacuation circle (as a polygon):
radius = 3000
center = H.get_geometry()[0]
Z = make_circular_polygon(center, radius)
Z.write_to_file('Marapi_evac_zone_%im.shp' % radius)
| fivejjs/inasafe | scripts/generate_volcano_evac_zone.py | Python | gpl-3.0 | 376 |
from yowsup.stacks import YowStack
from .layer import EchoLayer
from yowsup.layers import YowLayerEvent
from yowsup.layers.auth import YowCryptLayer, YowAuthenticationProtocolLayer, AuthError
from yowsup.layers.coder import YowCoderLayer
from yowsup.layers.network import YowNetworkLayer
from yowsup.layers.protocol_messages import YowMessagesProtocolLayer
from yowsup.layers.protocol_media import YowMediaProtocolLayer
from yowsup.layers.stanzaregulator import YowStanzaRegulator
from yowsup.layers.protocol_receipts import YowReceiptProtocolLayer
from yowsup.layers.protocol_acks import YowAckProtocolLayer
from yowsup.layers.logger import YowLoggerLayer
from yowsup.layers.protocol_iq import YowIqProtocolLayer
from yowsup.layers.protocol_calls import YowCallsProtocolLayer
from yowsup.layers import YowParallelLayer
class YowsupEchoStack(object):
def __init__(self, credentials, encryptionEnabled = False):
if encryptionEnabled:
from yowsup.layers.axolotl import YowAxolotlLayer
layers = (
EchoLayer,
YowParallelLayer([YowAuthenticationProtocolLayer, YowMessagesProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer, YowMediaProtocolLayer, YowIqProtocolLayer, YowCallsProtocolLayer]),
YowAxolotlLayer,
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
else:
layers = (
EchoLayer,
YowParallelLayer([YowAuthenticationProtocolLayer, YowMessagesProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer, YowMediaProtocolLayer, YowIqProtocolLayer, YowCallsProtocolLayer]),
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
self.stack = YowStack(layers)
self.stack.setCredentials(credentials)
def start(self):
self.stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
try:
self.stack.loop()
except AuthError as e:
print("Authentication Error: %s" % e.message)
| bassijtsma/chatbot | yowsup/demos/echoclient/stack.py | Python | gpl-3.0 | 2,467 |
import unittest, random, sys, time, os
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i
import codecs, unicodedata
print "create some specific small datasets with exp row/col combinations"
print "This injects the full set of single byte UTF8 in a col, except for some special h2o chars"
toDoList = range(0x00, 0x100)
def removeIfThere(d):
if d in toDoList:
toDoList.remove(d)
H2O_COL_SEPARATOR = 0x2c # comma
# H2O_COL_SEPARATOR = 0x1 # hive separator
# removeIfThere(0x1) # hive separator okay if we force comma below
removeIfThere(0x0) # nul. known issue
removeIfThere(0xa) # LF. causes EOL
removeIfThere(0xd) # CR. causes EOL
removeIfThere(0x22) # double quote. known issue
removeIfThere(0x2c) # comma. don't mess up my expected col count
tryList = []
for unicodeNum in toDoList:
unicodeSymbol = unichr(unicodeNum)
tryList.append(
((
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
), 10, 5, [0,0,0,0,0], ['Enum', 'Enum', 'Enum', 'Enum', 'Enum'], unicodeNum)
)
def write_syn_dataset(csvPathname, dataset):
dsf = codecs.open(csvPathname, encoding='utf-8', mode='w+')
encoded = dataset.encode('utf-8')
print "utf8:" , repr(encoded), type(encoded)
print "str or utf8:" , repr(dataset), type(dataset)
dsf.write(dataset)
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(java_heap_GB=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_specific_case4(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
hex_key = "a.hex"
for (dataset, expNumRows, expNumCols, expNaCnt, expType, unicodeNum) in tryList:
csvFilename = 'specific_' + str(expNumRows) + str(expNumCols) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
write_syn_dataset(csvPathname, dataset)
parseResult = h2i.import_parse(path=csvPathname, schema='put', header=0,
# force column separator
hex_key=hex_key, timeoutSecs=10, doSummary=False, separator=H2O_COL_SEPARATOR)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=60)
print "Parsed with special unichr(%s):" % unicodeNum
# print "inspect:", h2o.dump_json(inspect)
numRows = inspect['numRows']
self.assertEqual(numRows, expNumRows, msg='Using unichr(0x%x) Wrong numRows: %s Expected: %s' % \
(unicodeNum, numRows, expNumRows))
numCols = inspect['numCols']
self.assertEqual(numCols, expNumCols, msg='Using unichr(0x%x) Wrong numCols: %s Expected: %s' % \
(unicodeNum, numCols, expNumCols))
# this is required for the test setup
assert(len(expNaCnt)>=expNumCols)
assert(len(expType)>=expNumCols)
for k in range(expNumCols):
naCnt = inspect['cols'][k]['naCnt']
self.assertEqual(expNaCnt[k], naCnt, msg='Using unichr(0x%x) col: %s naCnt: %d should be: %s' % \
(unicodeNum, k, naCnt, expNaCnt[k]))
stype = inspect['cols'][k]['type']
self.assertEqual(expType[k], stype, msg='Using unichr(0x%x) col: %s type: %s should be: %s' % \
(unicodeNum, k, stype, expType[k]))
if __name__ == '__main__':
h2o.unit_main()
| rowhit/h2o-2 | py/testdir_single_jvm/test_parse_specific_case4.py | Python | apache-2.0 | 3,961 |
# Natural Language Toolkit: Stemmer Interface
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
class StemmerI(object):
"""
A processing interface for removing morphological affixes from
words. This process is known as X{stemming}.
"""
def stem(self, token):
"""
Strip affixes from the token and return the stem.
@param token: The token that should be stemmed.
@type token: L{string}
"""
raise NotImplementedError()
| hectormartinez/rougexstem | taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/stem/api.py | Python | apache-2.0 | 715 |
# pylint: disable=E1101
from datetime import datetime, timedelta
from functools import partial
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import (Series, TimeSeries, DataFrame, Panel, Index,
isnull, notnull, Timestamp)
from pandas.core.groupby import DataError
from pandas.tseries.index import date_range
from pandas.tseries.tdi import timedelta_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
from pandas.tseries.frequencies import MONTHS, DAYS
import pandas.tseries.offsets as offsets
import pandas as pd
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
class TestResample(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEqual(g.ngroups, 2593)
self.assertTrue(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEqual(len(r.columns), 10)
self.assertEqual(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=exp_idx)
assert_series_equal(result, expected)
self.assertEqual(result.index.name, 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
exp_idx = date_range('1/1/2000 00:05', periods=3, freq='5min', name='index')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()], index=exp_idx)
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_how(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00',
freq='min', name='index')
s = Series(np.random.randn(14), index=rng)
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
args = ['sum', 'mean', 'std', 'sem', 'max', 'min',
'median', 'first', 'last', 'ohlc']
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
inds = date_range('1/1/2000', periods=4, freq='5min', name='index')
for arg in args:
if arg == 'ohlc':
func = _ohlc
else:
func = arg
try:
result = s.resample('5min', how=arg,
closed='right', label='right')
expected = s.groupby(grouplist).agg(func)
self.assertEqual(result.index.name, 'index')
if arg == 'ohlc':
expected = DataFrame(expected.values.tolist())
expected.columns = ['open', 'high', 'low', 'close']
expected.index = Index(inds, name='index')
assert_frame_equal(result, expected)
else:
expected.index = inds
assert_series_equal(result, expected)
except BaseException as exc:
exc.args += ('how=%s' % arg,)
raise
def test_resample_how_callables(self):
# GH 7929
data = np.arange(5, dtype=np.int64)
ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d')
df = pd.DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class fn_class:
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M", how=fn)
df_lambda = df.resample("M", how=lambda x: str(type(x)))
df_partial = df.resample("M", how=partial(fn))
df_partial2 = df.resample("M", how=partial(fn, a=2))
df_class = df.resample("M", how=fn_class())
assert_frame_equal(df_standard, df_lambda)
assert_frame_equal(df_standard, df_partial)
assert_frame_equal(df_standard, df_partial2)
assert_frame_equal(df_standard, df_class)
def test_resample_with_timedeltas(self):
expected = DataFrame({'A' : np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = pd.timedelta_range('0 days',freq='30T',periods=50)
df = DataFrame({'A' : np.arange(1480)},index=pd.to_timedelta(np.arange(1480),unit='T'))
result = df.resample('30T',how='sum')
assert_frame_equal(result, expected)
def test_resample_rounding(self):
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
from pandas.compat import StringIO
df = pd.read_csv(StringIO(data), parse_dates={'timestamp': ['date', 'time']}, index_col='timestamp')
df.index.name = None
result = df.resample('6s', how='sum')
expected = DataFrame({'value' : [4,9,4,2]},index=date_range('2014-11-08',freq='6s',periods=4))
assert_frame_equal(result,expected)
result = df.resample('7s', how='sum')
expected = DataFrame({'value' : [4,10,4,1]},index=date_range('2014-11-08',freq='7s',periods=4))
assert_frame_equal(result,expected)
result = df.resample('11s', how='sum')
expected = DataFrame({'value' : [11,8]},index=date_range('2014-11-08',freq='11s',periods=2))
assert_frame_equal(result,expected)
result = df.resample('13s', how='sum')
expected = DataFrame({'value' : [13,6]},index=date_range('2014-11-08',freq='13s',periods=2))
assert_frame_equal(result,expected)
result = df.resample('17s', how='sum')
expected = DataFrame({'value' : [16,3]},index=date_range('2014-11-08',freq='17s',periods=2))
assert_frame_equal(result,expected)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun', how='last')
self.assertEqual(len(result), 3)
self.assertTrue((result.index.dayofweek == [6, 6, 6]).all())
self.assertEqual(result.iloc[0], s['1/2/2005'])
self.assertEqual(result.iloc[1], s['1/9/2005'])
self.assertEqual(result.iloc[2], s.iloc[-1])
result = s.resample('W-MON', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [0, 0]).all())
self.assertEqual(result.iloc[0], s['1/3/2005'])
self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [1, 1]).all())
self.assertEqual(result.iloc[0], s['1/4/2005'])
self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [2, 2]).all())
self.assertEqual(result.iloc[0], s['1/5/2005'])
self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [3, 3]).all())
self.assertEqual(result.iloc[0], s['1/6/2005'])
self.assertEqual(result.iloc[1], s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [4, 4]).all())
self.assertEqual(result.iloc[0], s['1/7/2005'])
self.assertEqual(result.iloc[1], s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEqual(len(result), 7)
self.assertTrue((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
self.assertEqual(result.iloc[0], s['1/2/2005'])
self.assertEqual(result.iloc[1], s['1/3/2005'])
self.assertEqual(result.iloc[5], s['1/9/2005'])
self.assertEqual(result.index.name, 'index')
def test_resample_upsampling_picked_but_not_correct(self):
# Test for issue #3020
dates = date_range('01-Jan-2014','05-Jan-2014', freq='D')
series = Series(1, index=dates)
result = series.resample('D')
self.assertEqual(result.index[0], dates[0])
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the resample frequency
import datetime
s = Series(np.arange(1.,6),index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
expected = Series(np.arange(1.,6),index=date_range('19750101',periods=5,freq='D'))
result = s.resample('D',how='count')
assert_series_equal(result,Series(1,index=expected.index))
result1 = s.resample('D',how='sum')
result2 = s.resample('D',how='mean')
result3 = s.resample('D')
assert_series_equal(result1,expected)
assert_series_equal(result2,expected)
assert_series_equal(result3,expected)
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A')
assert_series_equal(result['A'], df['A'].resample('A'))
result = df.resample('M')
assert_series_equal(result['A'], df['A'].resample('M'))
df.resample('M', kind='period')
df.resample('W-WED', kind='period')
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right',
loffset=timedelta(minutes=1))
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset='1min')
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset=Minute(1))
assert_series_equal(result, expected)
self.assertEqual(result.index.freq, Minute(5))
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun', how='last')
expected = ser.resample('w-sun', how='last', loffset=-bday)
self.assertEqual(result.index[0] - bday, expected.index[0])
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min', fill_method='pad')
self.assertEqual(len(result), 12961)
self.assertEqual(result[0], s[0])
self.assertEqual(result[-1], s[-1])
self.assertEqual(result.index.name, 'index')
def test_resample_extra_index_point(self):
# GH 9756
index = DatetimeIndex(start='20150101', end='20150331', freq='BM')
expected = DataFrame({'A' : Series([21,41,63], index=index)})
index = DatetimeIndex(start='20150101', end='20150331', freq='B')
df = DataFrame({'A' : Series(range(len(index)),index=index)},dtype='int64')
result = df.resample('BM', how='last')
assert_frame_equal(result, expected)
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t', fill_method='ffill', limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min', how='ohlc')
self.assertEqual(len(result), len(expect))
self.assertEqual(len(result.columns), 4)
xs = result.iloc[-2]
self.assertEqual(xs['open'], s[-6])
self.assertEqual(xs['high'], s[-6:-1].max())
self.assertEqual(xs['low'], s[-6:-1].min())
self.assertEqual(xs['close'], s[-2])
xs = result.iloc[0]
self.assertEqual(xs['open'], s[0])
self.assertEqual(xs['high'], s[:5].max())
self.assertEqual(xs['low'], s[:5].min())
self.assertEqual(xs['close'], s[4])
def test_resample_ohlc_dataframe(self):
df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H', how='ohlc')
exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'),
df['PRICE'].resample('H', how='ohlc')],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H', how='ohlc')
exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'),
('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'),
('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4,12),index=[2000,2000,2000,2000],columns=[ Period(year=2000,month=i+1,freq='M') for i in range(12) ])
df.iloc[3,:] = np.nan
result = df.resample('Q',axis=1)
expected = df.groupby(lambda x: int((x.month-1)/3),axis=1).mean()
expected.columns = [ Period(year=2000,quarter=i+1,freq='Q') for i in range(4) ]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEqual(len(result), 22)
tm.assertIsInstance(result.index.freq, offsets.DateOffset)
self.assertEqual(result.index.freq, offsets.Hour(8))
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period')
expected = ts.resample('A-DEC')
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period')
expected = ts.resample('A-JUN')
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50',
freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', how='ohlc', closed='right',
label='right')
self.assertTrue((resampled.ix['1/1/2000 00:00'] == ts[0]).all())
exp = _ohlc(ts[1:31])
self.assertTrue((resampled.ix['1/1/2000 00:05'] == exp).all())
exp = _ohlc(ts['1/1/2000 5:55:01':])
self.assertTrue((resampled.ix['1/1/2000 6:00:00'] == exp).all())
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M', how='mean')
expected = ts.groupby(lambda x: x.month).mean()
self.assertEqual(len(result), 2)
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
self.assertRaises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1)
expected = df.T.resample('M').T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1)
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M'))
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2)
expected = p_apply(panel2, lambda x: x.resample('M', axis=1))
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', how=lambda x: x.mean(1), axis=1)
expected = panel.resample('M', how='mean', axis=1)
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', how=lambda x: x.mean(2), axis=2)
expected = panel.resample('M', how='mean', axis=2)
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the middle
# of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left')
expected = ts.resample(freq, closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_single_group(self):
mysum = lambda x: x.sum()
rng = date_range('2000-1-1', '2000-2-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M', how='sum'),
ts.resample('M', how=mysum))
rng = date_range('2000-1-1', '2000-1-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M', how='sum'),
ts.resample('M', how=mysum))
# GH 3849
s = Series([30.1, 31.6], index=[Timestamp('20070915 15:30:00'),
Timestamp('20070915 15:40:00')])
expected = Series([0.75], index=[Timestamp('20070915')])
result = s.resample('D', how=lambda x: np.std(x))
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2)
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
self.assertTrue(resampled.index.equals(exp_rng))
def test_resample_base_with_timedeltaindex(self):
# GH 10530
rng = timedelta_range(start = '0s', periods = 25, freq = 's')
ts = Series(np.random.randn(len(rng)), index = rng)
with_base = ts.resample('2s', base = 5)
without_base = ts.resample('2s')
exp_without_base = timedelta_range(start = '0s', end = '25s', freq = '2s')
exp_with_base = timedelta_range(start = '5s', end = '29s', freq = '2s')
self.assertTrue(without_base.index.equals(exp_without_base))
self.assertTrue(with_base.index.equals(exp_with_base))
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left')
expected = ts.resample('D', closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period')
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
self.assertTrue(result.index.equals(exp_index))
def test_resample_empty(self):
ts = _simple_ts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assertEqual(len(result), 0)
self.assertEqual(result.index.freqstr, 'A-DEC')
result = ts.resample('A', kind='period')
self.assertEqual(len(result), 0)
self.assertEqual(result.index.freqstr, 'A-DEC')
xp = DataFrame()
rs = xp.resample('A')
assert_frame_equal(xp, rs)
# Empty series were sometimes causing a segfault (for the functions
# with Cython bounds-checking disabled) or an IndexError. We just run
# them to ensure they no longer do. (GH #10228)
for index in tm.all_timeseries_index_generator(0):
for dtype in (np.float, np.int, np.object, 'datetime64[ns]'):
for how in ('count', 'mean', 'min', 'ohlc', 'last', 'prod'):
empty_series = pd.Series([], index, dtype)
try:
empty_series.resample('d', how)
except DataError:
# Ignore these since some combinations are invalid
# (ex: doing mean with dtype of np.object)
pass
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W')
expected = ts.resample('W-SUN')
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
result = ts.resample('M')
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M')
expected = df.resample('M', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left')
exp = df.tshift(1, freq='D').resample('M', kind='period')
exp = exp.to_timestamp(how='end')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q')
expected = df.resample('Q', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left')
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left')
expected = expected.to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h')
resampled = ts.resample('M')
self.assertEqual(len(resampled), 1)
def test_resample_anchored_monthstart(self):
ts = _simple_ts('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
result = ts.resample(freq, how='mean')
def test_resample_anchored_multiday(self):
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pydata/pandas/issues/8683
s = pd.Series(np.random.randn(5),
index=pd.date_range('2014-10-14 23:06:23.206',
periods=3, freq='400L')
| pd.date_range('2014-10-15 23:00:00',
periods=2, freq='2200L'))
# Ensure left closing works
result = s.resample('2200L', 'mean')
self.assertEqual(result.index[-1],
pd.Timestamp('2014-10-15 23:00:02.000'))
# Ensure right closing works
result = s.resample('2200L', 'mean', label='right')
self.assertEqual(result.index[-1],
pd.Timestamp('2014-10-15 23:00:04.200'))
def test_corner_cases(self):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left')
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
self.assertTrue(result.index.equals(ex_index))
len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC')
self.assertEqual(len(result), 0)
# resample to periods
ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period')
self.assertEqual(len(result), 1)
self.assertEqual(result.index[0], Period('2000-04', freq='M'))
def test_anchored_lowercase_buglet(self):
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d')
def test_upsample_apply_functions(self):
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min', how=['mean', 'sum'])
tm.assertIsInstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D', how='sum')
exp = ts.sort_index().resample('D', how='sum')
assert_series_equal(result, exp)
def test_resample_median_bug_1688(self):
for dtype in ['int64','int32','float64','float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype = dtype)
result = df.resample("T", how=lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T", how="median")
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(self):
ts = _simple_ts('1/1/2000', '4/1/2000')
result = ts.resample('M', how=lambda x: x.mean())
exp = ts.resample('M', how='mean')
tm.assert_series_equal(result, exp)
self.assertRaises(Exception, ts.resample, 'M',
how=[lambda x: x.mean(), lambda x: x.std(ddof=1)])
result = ts.resample('M', how={'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
foo_exp = ts.resample('M', how='mean')
foo_exp.name = 'foo'
bar_exp = ts.resample('M', how='std')
bar_exp.name = 'bar'
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
def test_resample_unequal_times(self):
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({'close': 1}, index=bad_ind)
# it works!
df.resample('AS', 'sum')
def test_resample_consistency(self):
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = index=pd.date_range('2002-02-02', periods=4, freq='30T')
s=pd.Series(np.arange(4.), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = pd.date_range(i30[0], i30[-1], freq='10T')
s10 = s.reindex(index=i10, method='bfill')
s10_2 = s.reindex(index=i10, method='bfill', limit=2)
rl = s.reindex_like(s10, method='bfill', limit=2)
r10_2 = s.resample('10Min', fill_method='bfill', limit=2)
r10 = s.resample('10Min', fill_method='bfill')
# s10_2, r10, r10_2, rl should all be equal
assert_series_equal(s10_2, r10)
assert_series_equal(s10_2, r10_2)
assert_series_equal(s10_2, rl)
def test_resample_timegrouper(self):
# GH 7227
dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3),
datetime(2014, 11, 5), datetime(2014, 9, 5),
datetime(2014, 10, 8), datetime(2014, 7, 15)]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame(dict(A=dates, B=np.arange(len(dates))))
result = df.set_index('A').resample('M', how='count')
exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31', '2014-09-30',
'2014-10-31', '2014-11-30'], freq='M', name='A')
expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx)
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(len(dates))))
result = df.set_index('A').resample('M', how='count')
expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]},
index=exp_idx, columns=['B', 'C'])
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
def test_resample_group_info(self): # GH10914
for n, k in product((10000, 100000), (10, 100, 1000)):
dr = date_range(start='2015-08-27', periods=n // 10, freq='T')
ts = Series(np.random.randint(0, n // k, n).astype('int64'),
index=np.random.choice(dr, n))
left = ts.resample('30T', how='nunique')
ix = date_range(start=ts.index.min(),
end=ts.index.max(),
freq='30T')
vals = ts.values
bins = np.searchsorted(ix.values, ts.index, side='right')
sorter = np.lexsort((vals, bins))
vals, bins = vals[sorter], bins[sorter]
mask = np.r_[True, vals[1:] != vals[:-1]]
mask |= np.r_[True, bins[1:] != bins[:-1]]
arr = np.bincount(bins[mask] - 1, minlength=len(ix)).astype('int64',copy=False)
right = Series(arr, index=ix)
assert_series_equal(left, right)
def test_resample_size(self):
n = 10000
dr = date_range('2015-09-19', periods=n, freq='T')
ts = Series(np.random.randn(n), index=np.random.choice(dr, n))
left = ts.resample('7T', how='size')
ix = date_range(start=left.index.min(), end=ts.index.max(), freq='7T')
bins = np.searchsorted(ix.values, ts.index.values, side='right')
val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype('int64',copy=False)
right = Series(val, index=ix)
assert_series_equal(left, right)
def test_resmaple_dst_anchor(self):
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
df = DataFrame([5], index=dti)
assert_frame_equal(df.resample(rule='D', how='sum'),
DataFrame([5], index=df.index.normalize()))
df.resample(rule='MS', how='sum')
assert_frame_equal(df.resample(rule='MS', how='sum'),
DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)],
tz='US/Eastern')))
dti = date_range('2013-09-30', '2013-11-02', freq='30Min', tz='Europe/Paris')
values = range(dti.size)
df = DataFrame({"a": values, "b": values, "c": values}, index=dti, dtype='int64')
how = {"a": "min", "b": "max", "c": "count"}
assert_frame_equal(df.resample("W-MON", how=how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 384, 720, 1056, 1394],
"b": [47, 383, 719, 1055, 1393, 1586],
"c": [48, 336, 336, 336, 338, 193]},
index=date_range('9/30/2013', '11/4/2013',
freq='W-MON', tz='Europe/Paris')),
'W-MON Frequency')
assert_frame_equal(df.resample("2W-MON", how=how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 720, 1394],
"b": [47, 719, 1393, 1586],
"c": [48, 672, 674, 193]},
index=date_range('9/30/2013', '11/11/2013',
freq='2W-MON', tz='Europe/Paris')),
'2W-MON Frequency')
assert_frame_equal(df.resample("MS", how=how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 1538],
"b": [47, 1537, 1586],
"c": [48, 1490, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='MS', tz='Europe/Paris')),
'MS Frequency')
assert_frame_equal(df.resample("2MS", how=how)[["a", "b", "c"]],
DataFrame({"a": [0, 1538],
"b": [1537, 1586],
"c": [1538, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='2MS', tz='Europe/Paris')),
'2MS Frequency')
df_daily = df['10/26/2013':'10/29/2013']
assert_frame_equal(df_daily.resample("D", how={"a": "min", "b": "max", "c": "count"})[["a", "b", "c"]],
DataFrame({"a": [1248, 1296, 1346, 1394],
"b": [1295, 1345, 1393, 1441],
"c": [48, 50, 48, 48]},
index=date_range('10/26/2013', '10/29/2013',
freq='D', tz='Europe/Paris')),
'D Frequency')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def _simple_pts(start, end, freq='D'):
rng = period_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestResamplePeriodIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_annual_upsample_D_s_f(self):
self._check_annual_upsample_cases('D', 'start', 'ffill')
def test_annual_upsample_D_e_f(self):
self._check_annual_upsample_cases('D', 'end', 'ffill')
def test_annual_upsample_D_s_b(self):
self._check_annual_upsample_cases('D', 'start', 'bfill')
def test_annual_upsample_D_e_b(self):
self._check_annual_upsample_cases('D', 'end', 'bfill')
def test_annual_upsample_B_s_f(self):
self._check_annual_upsample_cases('B', 'start', 'ffill')
def test_annual_upsample_B_e_f(self):
self._check_annual_upsample_cases('B', 'end', 'ffill')
def test_annual_upsample_B_s_b(self):
self._check_annual_upsample_cases('B', 'start', 'bfill')
def test_annual_upsample_B_e_b(self):
self._check_annual_upsample_cases('B', 'end', 'bfill')
def test_annual_upsample_M_s_f(self):
self._check_annual_upsample_cases('M', 'start', 'ffill')
def test_annual_upsample_M_e_f(self):
self._check_annual_upsample_cases('M', 'end', 'ffill')
def test_annual_upsample_M_s_b(self):
self._check_annual_upsample_cases('M', 'start', 'bfill')
def test_annual_upsample_M_e_b(self):
self._check_annual_upsample_cases('M', 'end', 'bfill')
def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'):
for month in MONTHS:
ts = _simple_pts('1/1/1990', end, freq='A-%s' % month)
result = ts.resample(targ, fill_method=meth,
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec')
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995',
freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec'), result)
assert_series_equal(ts.resample('a'), result)
def test_not_subperiod(self):
# These are incompatible period rules for resampling
ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed')
self.assertRaises(ValueError, ts.resample, 'a-dec')
self.assertRaises(ValueError, ts.resample, 'q-mar')
self.assertRaises(ValueError, ts.resample, 'M')
self.assertRaises(ValueError, ts.resample, 'w-thu')
def test_basic_upsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec')
resampled = result.resample('D', fill_method='ffill', convention='end')
expected = result.to_timestamp('D', how='end')
expected = expected.asfreq('D', 'ffill').to_period()
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', fill_method='ffill', limit=2,
convention='end')
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D', fill_method='ffill')
exp = df['a'].resample('D', fill_method='ffill')
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M', fill_method='ffill')
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
def test_quarterly_upsample(self):
targets = ['D', 'B', 'M']
for month in MONTHS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_monthly_upsample(self):
targets = ['D', 'B']
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_fill_method_and_how_upsample(self):
# GH2073
s = Series(np.arange(9,dtype='int64'),
index=date_range('2010-01-01', periods=9, freq='Q'))
last = s.resample('M', fill_method='ffill')
both = s.resample('M', how='last', fill_method='ffill').astype('int64')
assert_series_equal(last, both)
def test_weekly_upsample(self):
targets = ['D', 'B']
for day in DAYS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='W-%s' % day)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp')
expected = ts.to_timestamp(how='end').resample('A-DEC')
assert_series_equal(result, expected)
def test_resample_to_quarterly(self):
for month in MONTHS:
ts = _simple_pts('1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month, fill_method='ffill')
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = _simple_pts('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how, fill_method='ffill')
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR', fill_method='ffill')
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = Series(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A')
expected = stamps.resample('A').to_period('A')
assert_series_equal(filled, expected)
filled = s.resample('A', fill_method='ffill')
expected = stamps.resample('A', fill_method='ffill').to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = Series(np.random.randn(5), index=rng)
self.assertRaises(Exception, s.resample, 'A')
def test_resample_5minute(self):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5min')
expected = ts.to_timestamp().resample('5min')
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self):
ts = _simple_pts('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D')
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = _simple_pts('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s')
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_empty(self):
ts = _simple_pts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assertEqual(len(result), 0)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min', how=len)
expected = s.resample('10min', how=len).ix[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU')
self.assertTrue(result.isnull().all())
result = ts.resample('W-THU', fill_method='ffill')[:-1]
expected = ts.asfreq('W-THU', method='ffill')
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W')
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample('W').tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D')
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right')
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period')
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# GH 6397
# comparing an offset that doesn't propogate tz's
rng = date_range('1/1/2011', periods=20000, freq='H')
rng = rng.tz_localize('EST')
ts = DataFrame(index=rng)
ts['first']=np.random.randn(len(rng))
ts['second']=np.cumsum(np.random.randn(len(rng)))
expected = DataFrame({ 'first' : ts.resample('A',how=np.sum)['first'],
'second' : ts.resample('A',how=np.mean)['second'] },columns=['first','second'])
result = ts.resample('A', how={'first':np.sum, 'second':np.mean}).reindex(columns=['first','second'])
assert_frame_equal(result,expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', how='mean', closed='left', label='right')
exp = s[1:].resample('10min', how='mean', closed='left', label='right')
assert_series_equal(result, exp)
result = s.resample('10min', how='mean', closed='left', label='left')
exp = s[1:].resample('10min', how='mean', closed='left', label='left')
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
self.assertTrue(result.index.equals(ex_index))
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A')
exp = ts.to_timestamp().resample('A').to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', how='first', closed='left', label='left')
def test_resample_bms_2752(self):
# GH2753
foo = pd.Series(index=pd.bdate_range('20000101','20000201'))
res1 = foo.resample("BMS")
res2 = foo.resample("BMS").resample("B")
self.assertEqual(res1.index[0], Timestamp('20000103'))
self.assertEqual(res1.index[0], res2.index[0])
# def test_monthly_convention_span(self):
# rng = period_range('2000-01', periods=3, freq='M')
# ts = Series(np.arange(3), index=rng)
# # hacky way to get same thing
# exp_index = period_range('2000-01-01', '2000-03-31', freq='D')
# expected = ts.asfreq('D', how='end').reindex(exp_index)
# expected = expected.fillna(method='bfill')
# result = ts.resample('D', convention='span')
# assert_series_equal(result, expected)
def test_default_right_closed_label(self):
end_freq = ['D', 'Q', 'M', 'D']
end_types = ['M', 'A', 'Q', 'W']
for from_freq, to_freq in zip(end_freq, end_types):
idx = DatetimeIndex(start='8/15/2012', periods=100,
freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq)
assert_frame_equal(resampled, df.resample(to_freq, closed='right',
label='right'))
def test_default_left_closed_label(self):
others = ['MS', 'AS', 'QS', 'D', 'H']
others_freq = ['D', 'Q', 'M', 'H', 'T']
for from_freq, to_freq in zip(others_freq, others):
idx = DatetimeIndex(start='8/15/2012', periods=100,
freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq)
assert_frame_equal(resampled, df.resample(to_freq, closed='left',
label='left'))
def test_all_values_single_bin(self):
# 2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
s = Series(np.random.randn(len(index)), index=index)
result = s.resample("A", how='mean')
tm.assert_almost_equal(result[0], s.mean())
def test_evenly_divisible_with_no_extra_bins(self):
# 4076
# when the frequency is evenly divisible, sometimes extra bins
df = DataFrame(np.random.randn(9, 3), index=date_range('2000-1-1', periods=9))
result = df.resample('5D')
expected = pd.concat([df.iloc[0:5].mean(),df.iloc[5:].mean()],axis=1).T
expected.index = [Timestamp('2000-1-1'),Timestamp('2000-1-6')]
assert_frame_equal(result,expected)
index = date_range(start='2001-5-4', periods=28)
df = DataFrame(
[{'REST_KEY': 1, 'DLY_TRN_QT': 80, 'DLY_SLS_AMT': 90,
'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 +
[{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10,
'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28,
index=index.append(index)).sort_index()
index = date_range('2001-5-4',periods=4,freq='7D')
expected = DataFrame(
[{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14,
'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4,
index=index)
result = df.resample('7D', how='count')
assert_frame_equal(result,expected)
expected = DataFrame(
[{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700,
'COOP_DLY_TRN_QT': 560, 'COOP_DLY_SLS_AMT': 280}] * 4,
index=index)
result = df.resample('7D', how='sum')
assert_frame_equal(result,expected)
class TestTimeGrouper(tm.TestCase):
def setUp(self):
self.ts = Series(np.random.randn(1000),
index=date_range('1/1/2000', periods=1000))
def test_apply(self):
grouper = TimeGrouper('A', label='right', closed='right')
grouped = self.ts.groupby(grouper)
f = lambda x: x.sort_values()[-3:]
applied = grouped.apply(f)
expected = self.ts.groupby(lambda x: x.year).apply(f)
applied.index = applied.index.droplevel(0)
expected.index = expected.index.droplevel(0)
assert_series_equal(applied, expected)
def test_count(self):
self.ts[::3] = np.nan
grouper = TimeGrouper('A', label='right', closed='right')
result = self.ts.resample('A', how='count')
expected = self.ts.groupby(lambda x: x.year).count()
expected.index = result.index
assert_series_equal(result, expected)
def test_numpy_reduction(self):
result = self.ts.resample('A', how='prod', closed='right')
expected = self.ts.groupby(lambda x: x.year).agg(np.prod)
expected.index = result.index
assert_series_equal(result, expected)
def test_apply_iteration(self):
# #2300
N = 1000
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
df = DataFrame({'open': 1, 'close': 2}, index=ind)
tg = TimeGrouper('M')
_, grouper, _ = tg._get_grouper(df)
# Errors
grouped = df.groupby(grouper, group_keys=False)
f = lambda df: df['close'] / df['open']
# it works!
result = grouped.apply(f)
self.assertTrue(result.index.equals(df.index))
def test_panel_aggregation(self):
ind = pd.date_range('1/1/2000', periods=100)
data = np.random.randn(2, len(ind), 4)
wp = pd.Panel(data, items=['Item1', 'Item2'], major_axis=ind,
minor_axis=['A', 'B', 'C', 'D'])
tg = TimeGrouper('M', axis=1)
_, grouper, _ = tg._get_grouper(wp)
bingrouped = wp.groupby(grouper)
binagg = bingrouped.mean()
def f(x):
assert(isinstance(x, Panel))
return x.mean(1)
result = bingrouped.agg(f)
tm.assert_panel_equal(result, binagg)
def test_fails_on_no_datetime_index(self):
index_names = ('Int64Index', 'PeriodIndex', 'Index', 'Float64Index',
'MultiIndex')
index_funcs = (tm.makeIntIndex, tm.makePeriodIndex,
tm.makeUnicodeIndex, tm.makeFloatIndex,
lambda m: tm.makeCustomIndex(m, 2))
n = 2
for name, func in zip(index_names, index_funcs):
index = func(n)
df = DataFrame({'a': np.random.randn(n)}, index=index)
with tm.assertRaisesRegexp(TypeError,
"axis must be a DatetimeIndex, "
"but got an instance of %r" % name):
df.groupby(TimeGrouper('D'))
def test_aggregate_normal(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4)
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, 3, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3),
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'prod', 'var', 'std', 'mean']:
expected = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count', 'sum']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# GH 7453
for func in ['size']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
"""
for func in ['first', 'last']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
for func in ['nth']:
expected = getattr(normal_grouped, func)(3)
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)(3)
assert_frame_equal(expected, dt_result)
"""
# if TimeGrouper is used included, 'first','last' and 'nth' doesn't work yet
def test_aggregate_with_nat(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4).astype('int64')
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT,
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'sum', 'prod']:
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]],
index=[3], columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count']:
normal_result = getattr(normal_grouped, func)()
pad = DataFrame([[0, 0, 0, 0]], index=[3], columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
for func in ['size']:
normal_result = getattr(normal_grouped, func)()
pad = Series([0], index=[3])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
# GH 9925
self.assertEqual(dt_result.index.name, 'key')
# if NaT is included, 'var', 'std', 'mean', 'first','last' and 'nth' doesn't work yet
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tseries/tests/test_resample.py | Python | artistic-2.0 | 65,806 |
# Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Win32 clipboard uses a special format for handling HTML. The basic
problem that the special format is trying to solve is that the user can
select an arbitrary chunk of formatted text that might not be valid HTML.
For instance selecting half-way through a bolded word would contain no </b>
tag. The solution is to encase the fragment in a valid HTML document.
You can read more about this at:
http://msdn.microsoft.com/workshop/networking/clipboard/htmlclipboard.asp
This module deals with converting between the clipboard HTML format and
standard HTML format.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import re
# ----------------------------------------------------------------------------
# Private Functions
# ----------------------------------------------------------------------------
def _findFirst( pattern, src ):
"""
A helper function that simplifies the logic of using regex to find
the first match in a string.
"""
results = re.findall( pattern, src )
if len(results) > 0:
return results[0]
return None
# ----------------------------------------------------------------------------
# HtmlClipboardFormat Object
# ----------------------------------------------------------------------------
class HtmlClipboardFormat:
"""
Encapsulates the conversation between the clipboard HTML
format and standard HTML format.
"""
# The 1.0 HTML clipboard header format.
HEADER_FORMAT = \
"Version:1.0\r\n" \
"StartHTML:%(htmlStart)09d\r\n" \
"EndHTML:%(htmlEnd)09d\r\n" \
"StartFragment:%(fragmentStart)09d\r\n" \
"EndFragment:%(fragmentEnd)09d\r\n" \
"StartSelection:%(fragmentStart)09d\r\n" \
"EndSelection:%(fragmentEnd)09d\r\n" \
"SourceURL:Enso\r\n"
# A generic HTML page.
HTML_PAGE = \
"<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2//EN\">\n" \
"<html>\n<head><title></title></head>\n" \
"<body>%s</body>\n" \
"</html>"
# These regexps find the character offsets of the fragment strings (see
# below) from the HTML clipboard format header.
START_RE = "StartFragment:(\d+)"
END_RE = "EndFragment:(\d+)"
# The Clipboard HTML format uses the following comment strings to mark
# the beginning and end of the text fragment which represents the user's
# actual selection; everything else is envelope.
START_FRAG = "<!-- StartFragment -->"
END_FRAG = "<!-- EndFragment -->"
def __init__( self, html ):
"""
Initializes the class to represent html.
"""
# Preconditions:
assert( type( html ) == unicode )
# The internal storage format is platonic unicode.
self.html = html
@classmethod
def fromClipboardHtml( cls, clipboardHtml ):
"""
Instantiates the class given a string containing the Win32 Html
Clipboard format. The given clipboardHtml is expected to be in
utf-8 and is expected to contain the special start-fragment and
end-fragment markers as defined in the class constants. If it's
not utf-8 or if it doesn't have the right delimiters, this function
logs a warning message and creates an instance empty of text.
"""
# Preconditions:
assert( type( clipboardHtml ) == str )
try:
html = clipboardHtml.decode( "utf-8" )
except UnicodeDecodeError:
# input can't be decoded from utf-8:
logging.warn( "Non-Utf-8 string in fromClipboardHtml." )
return cls( u"" )
start = _findFirst( cls.START_RE, clipboardHtml )
end = _findFirst( cls.END_RE, clipboardHtml )
if start and end:
html = clipboardHtml[ int(start): int(end) ]
html = html.decode( "utf-8" )
return cls( html )
else:
# Start and end not found in input:
logging.warn( "Missing delimiters in fromClipboardHtml." )
return cls( u"" )
@classmethod
def fromHtml( cls, html ):
"""
Instantiates the class given a string containing plain Html.
"""
# Preconditions:
assert( isinstance( html, unicode ) )
return cls( html )
def toClipboardHtml( self ):
"""
Returns the contents in the Win32 Html format.
"""
return self._encodeHtmlFragment( self.html )
def toHtml( self ):
"""
Returns the contents in the plain Html format.
"""
return self.html
def _createHtmlPage( self, fragment ):
"""
Takes an Html fragment and encloses it in a full Html page.
"""
return self.HTML_PAGE % fragment
def _encodeHtmlFragment(self, sourceHtml):
"""
Join all our bits of information into a string formatted as per the
clipboard HTML format spec.
The return value of this function is a Python string
encoded in UTF-8.
"""
# Preconditions:
assert( type( sourceHtml ) == unicode )
# LONGTERM TODO: The above contract statement involving
# .encode().decode() could have damaging performance
# repercussions.
# NOTE: Every time we construct a string, we must encode it to
# UTF-8 *before* we do any position-sensitive operations on
# it, such as taking its length or finding a substring
# position.
if "<body>" in sourceHtml:
htmlheader, fragment = sourceHtml.split( "<body>" )
fragment, footer = fragment.split( "</body>" )
htmlheader = htmlheader + "<body>"
footer = "</body>" + footer
fragment = "".join( [self.START_FRAG,
fragment,
self.END_FRAG] )
html = "".join([ htmlheader, fragment, footer ])
else:
fragment = sourceHtml
html = self._createHtmlPage( fragment )
fragment = fragment.encode( "utf-8" )
html = html.encode( "utf-8" )
assert html == html.decode( "utf-8" ).encode( "utf-8" ), \
"Encoding got out of whack in HtmlClipboardFormat."
# How long is the header going to be?
dummyHeader = self.HEADER_FORMAT % dict( htmlStart = 0,
htmlEnd = 0,
fragmentStart = 0,
fragmentEnd = 0 )
dummyHeader = dummyHeader.encode( "utf-8" )
headerLen = len(dummyHeader)
fragmentStart = html.find( fragment )
fragmentEnd = fragmentStart + len( fragment )
positions = dict( htmlStart = headerLen,
htmlEnd = headerLen + len(html),
fragmentStart = headerLen + fragmentStart,
fragmentEnd = headerLen + fragmentEnd )
header = self.HEADER_FORMAT % positions
header = header.encode( "utf-8" )
result = header + html
# Postconditions:
assert( type( result ) == str )
assert( result == result.decode( "utf-8" ).encode( "utf-8" ) )
return result
| berkus/enso | enso/platform/win32/selection/HtmlClipboardFormat.py | Python | bsd-3-clause | 9,263 |
import numpy as np
from scipy import ndimage as ndi
from scipy import stats
from ..util import img_as_float, pad
from ..feature import peak_local_max
from ..feature.util import _prepare_grayscale_input_2D
from ..feature.corner_cy import _corner_fast
from ._hessian_det_appx import _hessian_matrix_det
from ..transform import integral_image
from .._shared.utils import safe_as_int
def _compute_derivatives(image, mode='constant', cval=0):
"""Compute derivatives in x and y direction using the Sobel operator.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
imx : ndarray
Derivative in x-direction.
imy : ndarray
Derivative in y-direction.
"""
imy = ndi.sobel(image, axis=0, mode=mode, cval=cval)
imx = ndi.sobel(image, axis=1, mode=mode, cval=cval)
return imx, imy
def structure_tensor(image, sigma=1, mode='constant', cval=0):
"""Compute structure tensor using sum of squared differences.
The structure tensor A is defined as::
A = [Axx Axy]
[Axy Ayy]
which is approximated by the weighted sum of squared differences in a local
window around each pixel in the image.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as a
weighting function for the local summation of squared differences.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Examples
--------
>>> from skimage.feature import structure_tensor
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> Axx
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
image = _prepare_grayscale_input_2D(image)
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
# structure tensore
Axx = ndi.gaussian_filter(imx * imx, sigma, mode=mode, cval=cval)
Axy = ndi.gaussian_filter(imx * imy, sigma, mode=mode, cval=cval)
Ayy = ndi.gaussian_filter(imy * imy, sigma, mode=mode, cval=cval)
return Axx, Axy, Ayy
def hessian_matrix(image, sigma=1, mode='constant', cval=0):
"""Compute Hessian matrix.
The Hessian matrix is defined as::
H = [Hxx Hxy]
[Hxy Hyy]
which is computed by convolving the image with the second derivatives
of the Gaussian kernel in the respective x- and y-directions.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Hxx : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hxy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hyy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Examples
--------
>>> from skimage.feature import hessian_matrix
>>> square = np.zeros((5, 5))
>>> square[2, 2] = -1.0 / 1591.54943092
>>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
>>> Hxx
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
"""
image = _prepare_grayscale_input_2D(image)
# Window extent which covers > 99% of the normal distribution.
window_ext = max(1, np.ceil(3 * sigma))
ky, kx = np.mgrid[-window_ext:window_ext + 1, -window_ext:window_ext + 1]
# Second derivative Gaussian kernels.
gaussian_exp = np.exp(-(kx ** 2 + ky ** 2) / (2 * sigma ** 2))
kernel_xx = 1 / (2 * np.pi * sigma ** 4) * (kx ** 2 / sigma ** 2 - 1)
kernel_xx *= gaussian_exp
kernel_xy = 1 / (2 * np.pi * sigma ** 6) * (kx * ky)
kernel_xy *= gaussian_exp
kernel_yy = kernel_xx.transpose()
# Remove small kernel values.
eps = np.finfo(kernel_xx.dtype).eps
kernel_xx[np.abs(kernel_xx) < eps * np.abs(kernel_xx).max()] = 0
kernel_xy[np.abs(kernel_xy) < eps * np.abs(kernel_xy).max()] = 0
kernel_yy[np.abs(kernel_yy) < eps * np.abs(kernel_yy).max()] = 0
Hxx = ndi.convolve(image, kernel_xx, mode=mode, cval=cval)
Hxy = ndi.convolve(image, kernel_xy, mode=mode, cval=cval)
Hyy = ndi.convolve(image, kernel_yy, mode=mode, cval=cval)
return Hxx, Hxy, Hyy
def hessian_matrix_det(image, sigma):
"""Computes the approximate Hessian Determinant over an image.
This method uses box filters over integral images to compute the
approximate Hessian Determinant as described in [1]_.
Parameters
----------
image : array
The image over which to compute Hessian Determinant.
sigma : float
Standard deviation used for the Gaussian kernel, used for the Hessian
matrix.
Returns
-------
out : array
The array of the Determinant of Hessians.
References
----------
.. [1] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
"SURF: Speeded Up Robust Features"
ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
Notes
-----
The running time of this method only depends on size of the image. It is
independent of `sigma` as one would expect. The downside is that the
result for `sigma` less than `3` is not accurate, i.e., not similar to
the result obtained if someone computed the Hessian and took it's
determinant.
"""
image = img_as_float(image)
image = integral_image(image)
return np.array(_hessian_matrix_det(image, sigma))
def _image_orthogonal_matrix22_eigvals(M00, M01, M11):
l1 = (M00 + M11) / 2 + np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
l2 = (M00 + M11) / 2 - np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
return l1, l2
def structure_tensor_eigvals(Axx, Axy, Ayy):
"""Compute Eigen values of structure tensor.
Parameters
----------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Returns
-------
l1 : ndarray
Larger eigen value for each input matrix.
l2 : ndarray
Smaller eigen value for each input matrix.
Examples
--------
>>> from skimage.feature import structure_tensor, structure_tensor_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> structure_tensor_eigvals(Axx, Axy, Ayy)[0]
array([[ 0., 0., 0., 0., 0.],
[ 0., 2., 4., 2., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 2., 4., 2., 0.],
[ 0., 0., 0., 0., 0.]])
"""
return _image_orthogonal_matrix22_eigvals(Axx, Axy, Ayy)
def hessian_matrix_eigvals(Hxx, Hxy, Hyy):
"""Compute Eigen values of Hessian matrix.
Parameters
----------
Hxx : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hxy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hyy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Returns
-------
l1 : ndarray
Larger eigen value for each input matrix.
l2 : ndarray
Smaller eigen value for each input matrix.
Examples
--------
>>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = -1 / 1591.54943092
>>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
>>> hessian_matrix_eigvals(Hxx, Hxy, Hyy)[0]
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
"""
return _image_orthogonal_matrix22_eigvals(Hxx, Hxy, Hyy)
def corner_kitchen_rosenfeld(image, mode='constant', cval=0):
"""Compute Kitchen and Rosenfeld corner measure response image.
The corner measure is calculated as follows::
(imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy)
/ (imx**2 + imy**2)
Where imx and imy are the first and imxx, imxy, imyy the second
derivatives.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
response : ndarray
Kitchen and Rosenfeld response image.
"""
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
imxx, imxy = _compute_derivatives(imx, mode=mode, cval=cval)
imyx, imyy = _compute_derivatives(imy, mode=mode, cval=cval)
numerator = (imxx * imy ** 2 + imyy * imx ** 2 - 2 * imxy * imx * imy)
denominator = (imx ** 2 + imy ** 2)
response = np.zeros_like(image, dtype=np.double)
mask = denominator != 0
response[mask] = numerator[mask] / denominator[mask]
return response
def corner_harris(image, method='k', k=0.05, eps=1e-6, sigma=1):
"""Compute Harris corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as::
det(A) - k * trace(A)**2
or::
2 * det(A) / (trace(A) + eps)
Parameters
----------
image : ndarray
Input image.
method : {'k', 'eps'}, optional
Method to compute the response image from the auto-correlation matrix.
k : float, optional
Sensitivity factor to separate corners from edges, typically in range
`[0, 0.2]`. Small values of k result in detection of sharp corners.
eps : float, optional
Normalisation factor (Noble's corner measure).
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Harris response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_harris, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_harris(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# determinant
detA = Axx * Ayy - Axy ** 2
# trace
traceA = Axx + Ayy
if method == 'k':
response = detA - k * traceA ** 2
else:
response = 2 * detA / (traceA + eps)
return response
def corner_shi_tomasi(image, sigma=1):
"""Compute Shi-Tomasi (Kanade-Tomasi) corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as the smaller eigenvalue of A::
((Axx + Ayy) - sqrt((Axx - Ayy)**2 + 4 * Axy**2)) / 2
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Shi-Tomasi response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_shi_tomasi, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_shi_tomasi(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# minimum eigenvalue of A
response = ((Axx + Ayy) - np.sqrt((Axx - Ayy) ** 2 + 4 * Axy ** 2)) / 2
return response
def corner_foerstner(image, sigma=1):
"""Compute Foerstner corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as::
w = det(A) / trace(A) (size of error ellipse)
q = 4 * det(A) / trace(A)**2 (roundness of error ellipse)
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
w : ndarray
Error ellipse sizes.
q : ndarray
Roundness of error ellipse.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_foerstner, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> w, q = corner_foerstner(square)
>>> accuracy_thresh = 0.5
>>> roundness_thresh = 0.3
>>> foerstner = (q > roundness_thresh) * (w > accuracy_thresh) * w
>>> corner_peaks(foerstner, min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# determinant
detA = Axx * Ayy - Axy ** 2
# trace
traceA = Axx + Ayy
w = np.zeros_like(image, dtype=np.double)
q = np.zeros_like(image, dtype=np.double)
mask = traceA != 0
w[mask] = detA[mask] / traceA[mask]
q[mask] = 4 * detA[mask] / traceA[mask] ** 2
return w, q
def corner_fast(image, n=12, threshold=0.15):
"""Extract FAST corners for a given image.
Parameters
----------
image : 2D ndarray
Input image.
n : int
Minimum number of consecutive pixels out of 16 pixels on the circle
that should all be either brighter or darker w.r.t testpixel.
A point c on the circle is darker w.r.t test pixel p if
`Ic < Ip - threshold` and brighter if `Ic > Ip + threshold`. Also
stands for the n in `FAST-n` corner detector.
threshold : float
Threshold used in deciding whether the pixels on the circle are
brighter, darker or similar w.r.t. the test pixel. Decrease the
threshold when more corners are desired and vice-versa.
Returns
-------
response : ndarray
FAST corner response image.
References
----------
.. [1] Edward Rosten and Tom Drummond
"Machine Learning for high-speed corner detection",
http://www.edwardrosten.com/work/rosten_2006_machine.pdf
.. [2] Wikipedia, "Features from accelerated segment test",
https://en.wikipedia.org/wiki/Features_from_accelerated_segment_test
Examples
--------
>>> from skimage.feature import corner_fast, corner_peaks
>>> square = np.zeros((12, 12))
>>> square[3:9, 3:9] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_fast(square, 9), min_distance=1)
array([[3, 3],
[3, 8],
[8, 3],
[8, 8]])
"""
image = _prepare_grayscale_input_2D(image)
image = np.ascontiguousarray(image)
response = _corner_fast(image, n, threshold)
return response
def corner_subpix(image, corners, window_size=11, alpha=0.99):
"""Determine subpixel position of corners.
A statistical test decides whether the corner is defined as the
intersection of two edges or a single peak. Depending on the classification
result, the subpixel corner location is determined based on the local
covariance of the grey-values. If the significance level for either
statistical test is not sufficient, the corner cannot be classified, and
the output subpixel position is set to NaN.
Parameters
----------
image : ndarray
Input image.
corners : (N, 2) ndarray
Corner coordinates `(row, col)`.
window_size : int, optional
Search window size for subpixel estimation.
alpha : float, optional
Significance level for corner classification.
Returns
-------
positions : (N, 2) ndarray
Subpixel corner positions. NaN for "not classified" corners.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/\
foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_harris, corner_peaks, corner_subpix
>>> img = np.zeros((10, 10))
>>> img[:5, :5] = 1
>>> img[5:, 5:] = 1
>>> img.astype(int)
array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
>>> coords = corner_peaks(corner_harris(img), min_distance=2)
>>> coords_subpix = corner_subpix(img, coords, window_size=7)
>>> coords_subpix
array([[ 4.5, 4.5]])
"""
# window extent in one direction
wext = (window_size - 1) // 2
image = pad(image, pad_width=wext, mode='constant', constant_values=0)
# add pad width, make sure to not modify the input values in-place
corners = safe_as_int(corners + wext)
# normal equation arrays
N_dot = np.zeros((2, 2), dtype=np.double)
N_edge = np.zeros((2, 2), dtype=np.double)
b_dot = np.zeros((2, ), dtype=np.double)
b_edge = np.zeros((2, ), dtype=np.double)
# critical statistical test values
redundancy = window_size ** 2 - 2
t_crit_dot = stats.f.isf(1 - alpha, redundancy, redundancy)
t_crit_edge = stats.f.isf(alpha, redundancy, redundancy)
# coordinates of pixels within window
y, x = np.mgrid[- wext:wext + 1, - wext:wext + 1]
corners_subpix = np.zeros_like(corners, dtype=np.double)
for i, (y0, x0) in enumerate(corners):
# crop window around corner + border for sobel operator
miny = y0 - wext - 1
maxy = y0 + wext + 2
minx = x0 - wext - 1
maxx = x0 + wext + 2
window = image[miny:maxy, minx:maxx]
winx, winy = _compute_derivatives(window, mode='constant', cval=0)
# compute gradient suares and remove border
winx_winx = (winx * winx)[1:-1, 1:-1]
winx_winy = (winx * winy)[1:-1, 1:-1]
winy_winy = (winy * winy)[1:-1, 1:-1]
# sum of squared differences (mean instead of gaussian filter)
Axx = np.sum(winx_winx)
Axy = np.sum(winx_winy)
Ayy = np.sum(winy_winy)
# sum of squared differences weighted with coordinates
# (mean instead of gaussian filter)
bxx_x = np.sum(winx_winx * x)
bxx_y = np.sum(winx_winx * y)
bxy_x = np.sum(winx_winy * x)
bxy_y = np.sum(winx_winy * y)
byy_x = np.sum(winy_winy * x)
byy_y = np.sum(winy_winy * y)
# normal equations for subpixel position
N_dot[0, 0] = Axx
N_dot[0, 1] = N_dot[1, 0] = - Axy
N_dot[1, 1] = Ayy
N_edge[0, 0] = Ayy
N_edge[0, 1] = N_edge[1, 0] = Axy
N_edge[1, 1] = Axx
b_dot[:] = bxx_y - bxy_x, byy_x - bxy_y
b_edge[:] = byy_y + bxy_x, bxx_x + bxy_y
# estimated positions
try:
est_dot = np.linalg.solve(N_dot, b_dot)
est_edge = np.linalg.solve(N_edge, b_edge)
except np.linalg.LinAlgError:
# if image is constant the system is singular
corners_subpix[i, :] = np.nan, np.nan
continue
# residuals
ry_dot = y - est_dot[0]
rx_dot = x - est_dot[1]
ry_edge = y - est_edge[0]
rx_edge = x - est_edge[1]
# squared residuals
rxx_dot = rx_dot * rx_dot
rxy_dot = rx_dot * ry_dot
ryy_dot = ry_dot * ry_dot
rxx_edge = rx_edge * rx_edge
rxy_edge = rx_edge * ry_edge
ryy_edge = ry_edge * ry_edge
# determine corner class (dot or edge)
# variance for different models
var_dot = np.sum(winx_winx * ryy_dot - 2 * winx_winy * rxy_dot
+ winy_winy * rxx_dot)
var_edge = np.sum(winy_winy * ryy_edge + 2 * winx_winy * rxy_edge
+ winx_winx * rxx_edge)
# test value (F-distributed)
if var_dot < np.spacing(1) and var_edge < np.spacing(1):
t = np.nan
elif var_dot == 0:
t = np.inf
else:
t = var_edge / var_dot
# 1 for edge, -1 for dot, 0 for "not classified"
corner_class = int(t < t_crit_edge) - int(t > t_crit_dot)
if corner_class == -1:
corners_subpix[i, :] = y0 + est_dot[0], x0 + est_dot[1]
elif corner_class == 0:
corners_subpix[i, :] = np.nan, np.nan
elif corner_class == 1:
corners_subpix[i, :] = y0 + est_edge[0], x0 + est_edge[1]
# subtract pad width
corners_subpix -= wext
return corners_subpix
def corner_peaks(image, min_distance=1, threshold_abs=None, threshold_rel=0.1,
exclude_border=True, indices=True, num_peaks=np.inf,
footprint=None, labels=None):
"""Find corners in corner measure response image.
This differs from `skimage.feature.peak_local_max` in that it suppresses
multiple connected peaks with the same accumulator value.
Parameters
----------
* : *
See :py:meth:`skimage.feature.peak_local_max`.
Examples
--------
>>> from skimage.feature import peak_local_max
>>> response = np.zeros((5, 5))
>>> response[2:4, 2:4] = 1
>>> response
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 0.],
[ 0., 0., 1., 1., 0.],
[ 0., 0., 0., 0., 0.]])
>>> peak_local_max(response)
array([[2, 2],
[2, 3],
[3, 2],
[3, 3]])
>>> corner_peaks(response)
array([[2, 2]])
"""
peaks = peak_local_max(image, min_distance=min_distance,
threshold_abs=threshold_abs,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
indices=False, num_peaks=num_peaks,
footprint=footprint, labels=labels)
if min_distance > 0:
coords = np.transpose(peaks.nonzero())
for r, c in coords:
if peaks[r, c]:
peaks[r - min_distance:r + min_distance + 1,
c - min_distance:c + min_distance + 1] = False
peaks[r, c] = True
if indices is True:
return np.transpose(peaks.nonzero())
else:
return peaks
| Hiyorimi/scikit-image | skimage/feature/corner.py | Python | bsd-3-clause | 27,202 |